Merge 4.5-rc6 into staging-next

We want the staging fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/CREDITS b/CREDITS
index a3887b5..4312cd0 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3054,6 +3054,7 @@
 D: PCA9634 driver
 D: Option GTM671WFS
 D: Fintek F81216A
+D: AD5761 iio driver
 D: Various kernel hacks
 S: Qtechnology A/S
 S: Valby Langgade 142
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 636e938..5d0125f7 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -27,3 +27,17 @@
 		Virtual Processors.
 		Format: <channel's child_relid:the bound cpu's number>
 Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/device
+Date:		Dec. 2015
+KernelVersion:	4.5
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The 16 bit device ID of the device
+Users:		tools/hv/lsvmbus and user level RDMA libraries
+
+What:		/sys/bus/vmbus/devices/vmbus_*/vendor
+Date:		Dec. 2015
+KernelVersion:	4.5
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The 16 bit vendor ID of the device
+Users:		tools/hv/lsvmbus and user level RDMA libraries
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 0439c2a..3c66248 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -496,8 +496,11 @@
 		1kohm_to_gnd: connected to ground via an 1kOhm resistor,
 		6kohm_to_gnd: connected to ground via a 6kOhm resistor,
 		20kohm_to_gnd: connected to ground via a 20kOhm resistor,
+		90kohm_to_gnd: connected to ground via a 90kOhm resistor,
 		100kohm_to_gnd: connected to ground via an 100kOhm resistor,
+		125kohm_to_gnd: connected to ground via an 125kOhm resistor,
 		500kohm_to_gnd: connected to ground via a 500kOhm resistor,
+		640kohm_to_gnd: connected to ground via a 640kOhm resistor,
 		three_state: left floating.
 		For a list of available output power down options read
 		outX_powerdown_mode_available. If Y is not present the
@@ -1491,3 +1494,10 @@
 		This ABI is especially applicable for humidity sensors
 		to heatup the device and get rid of any condensation
 		in some humidity environment
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_ph_raw
+KernelVersion:	4.5
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Raw (unscaled no offset etc.) pH reading of a substance as a negative
+		base-10 logarithm of hydrodium ions in a litre of water.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x b/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x
new file mode 100644
index 0000000..3740f25
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x
@@ -0,0 +1,54 @@
+What:		/sys/bus/iio/devices/iio:deviceX/tia_resistanceY
+		/sys/bus/iio/devices/iio:deviceX/tia_capacitanceY
+Date:		December 2015
+KernelVersion:
+Contact:	Andrew F. Davis <afd@ti.com>
+Description:
+		Get and set the resistance and the capacitance settings for the
+		Transimpedance Amplifier. Y is 1 for Rf1 and Cf1, Y is 2 for
+		Rf2 and Cf2 values.
+
+What:		/sys/bus/iio/devices/iio:deviceX/tia_separate_en
+Date:		December 2015
+KernelVersion:
+Contact:	Andrew F. Davis <afd@ti.com>
+Description:
+		Enable or disable separate settings for the TransImpedance
+		Amplifier above, when disabled both values are set by the
+		first channel.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_intensity_ledY_raw
+		/sys/bus/iio/devices/iio:deviceX/in_intensity_ledY_ambient_raw
+Date:		December 2015
+KernelVersion:
+Contact:	Andrew F. Davis <afd@ti.com>
+Description:
+		Get measured values from the ADC for these stages. Y is the
+		specific LED number. The values are expressed in 24-bit twos
+		complement.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_intensity_ledY-ledY_ambient_raw
+Date:		December 2015
+KernelVersion:
+Contact:	Andrew F. Davis <afd@ti.com>
+Description:
+		Get differential values from the ADC for these stages. Y is the
+		specific LED number. The values are expressed in 24-bit twos
+		complement for the specified LEDs.
+
+What:		/sys/bus/iio/devices/iio:deviceX/out_current_ledY_offset
+		/sys/bus/iio/devices/iio:deviceX/out_current_ledY_ambient_offset
+Date:		December 2015
+KernelVersion:
+Contact:	Andrew F. Davis <afd@ti.com>
+Description:
+		Get and set the offset cancellation DAC setting for these
+		stages. The values are expressed in 5-bit sign-magnitude.
+
+What:		/sys/bus/iio/devices/iio:deviceX/out_current_ledY_raw
+Date:		December 2015
+KernelVersion:
+Contact:	Andrew F. Davis <afd@ti.com>
+Description:
+		Get and set the LED current for the specified LED. Y is the
+		specific LED number.
diff --git a/Documentation/devicetree/bindings/goldfish/pipe.txt b/Documentation/devicetree/bindings/goldfish/pipe.txt
new file mode 100644
index 0000000..e417a31
--- /dev/null
+++ b/Documentation/devicetree/bindings/goldfish/pipe.txt
@@ -0,0 +1,17 @@
+Android Goldfish QEMU Pipe
+
+Andorid pipe virtual device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,android-pipe" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+	android_pipe@a010000 {
+		compatible = "google,android-pipe";
+		reg = <ff018000 0x2000>;
+		interrupts = <0x12>;
+	};
diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
index 3c10e85..165937e 100644
--- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt
+++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
@@ -1,8 +1,10 @@
-Freescale MMA8452Q, MMA8453Q, MMA8652FC or MMA8653FC triaxial accelerometer
+Freescale MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC or MMA8653FC
+triaxial accelerometer
 
 Required properties:
 
   - compatible: should contain one of
+    * "fsl,mma8451"
     * "fsl,mma8452"
     * "fsl,mma8453"
     * "fsl,mma8652"
diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
new file mode 100644
index 0000000..3223684
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
@@ -0,0 +1,28 @@
+* AT91 SAMA5D2 Analog to Digital Converter (ADC)
+
+Required properties:
+  - compatible: Should be "atmel,sama5d2-adc".
+  - reg: Should contain ADC registers location and length.
+  - interrupts: Should contain the IRQ line for the ADC.
+  - clocks: phandle to device clock.
+  - clock-names: Must be "adc_clk".
+  - vref-supply: Supply used as reference for conversions.
+  - vddana-supply: Supply for the adc device.
+  - atmel,min-sample-rate-hz: Minimum sampling rate, it depends on SoC.
+  - atmel,max-sample-rate-hz: Maximum sampling rate, it depends on SoC.
+  - atmel,startup-time-ms: Startup time expressed in ms, it depends on SoC.
+
+Example:
+
+adc: adc@fc030000 {
+	compatible = "atmel,sama5d2-adc";
+	reg = <0xfc030000 0x100>;
+	interrupts = <40 IRQ_TYPE_LEVEL_HIGH 7>;
+	clocks = <&adc_clk>;
+	clock-names = "adc_clk";
+	atmel,min-sample-rate-hz = <200000>;
+	atmel,max-sample-rate-hz = <20000000>;
+	atmel,startup-time-ms = <4>;
+	vddana-supply = <&vdd_3v3_lp_reg>;
+	vref-supply = <&vdd_3v3_lp_reg>;
+}
diff --git a/Documentation/devicetree/bindings/iio/adc/mcp3422.txt b/Documentation/devicetree/bindings/iio/adc/mcp3422.txt
index dcae4cc..82bcce0 100644
--- a/Documentation/devicetree/bindings/iio/adc/mcp3422.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mcp3422.txt
@@ -6,6 +6,7 @@
 	"microchip,mcp3422" or
 	"microchip,mcp3423" or
 	"microchip,mcp3424" or
+	"microchip,mcp3425" or
 	"microchip,mcp3426" or
 	"microchip,mcp3427" or
 	"microchip,mcp3428"
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc0832.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc0832.txt
new file mode 100644
index 0000000..d911305
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc0832.txt
@@ -0,0 +1,19 @@
+* Texas Instruments' ADC0831/ADC0832/ADC0832/ADC0838
+
+Required properties:
+ - compatible: Should be one of
+	* "ti,adc0831"
+	* "ti,adc0832"
+	* "ti,adc0834"
+	* "ti,adc0838"
+ - reg: spi chip select number for the device
+ - vref-supply: The regulator supply for ADC reference voltage
+ - spi-max-frequency: Max SPI frequency to use (< 400000)
+
+Example:
+adc@0 {
+	compatible = "ti,adc0832";
+	reg = <0>;
+	vref-supply = <&vdd_supply>;
+	spi-max-frequency = <200000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
new file mode 100644
index 0000000..cffa190
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
@@ -0,0 +1,22 @@
+* Atlas Scientific pH-SM OEM sensor
+
+http://www.atlas-scientific.com/_files/_datasheets/_oem/pH_oem_datasheet.pdf
+
+Required properties:
+
+  - compatible: must be "atlas,ph-sm"
+  - reg: the I2C address of the sensor
+  - interrupt-parent: should be the phandle for the interrupt controller
+  - interrupts: the sole interrupt generated by the device
+
+  Refer to interrupt-controller/interrupts.txt for generic interrupt client
+  node bindings.
+
+Example:
+
+atlas@65 {
+	compatible = "atlas,ph-sm";
+	reg = <0x65>;
+	interrupt-parent = <&gpio1>;
+	interrupts = <16 2>;
+};
diff --git a/Documentation/devicetree/bindings/iio/health/afe4403.txt b/Documentation/devicetree/bindings/iio/health/afe4403.txt
new file mode 100644
index 0000000..2fffd70
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/health/afe4403.txt
@@ -0,0 +1,34 @@
+Texas Instruments AFE4403 Heart rate and Pulse Oximeter
+
+Required properties:
+ - compatible		: Should be "ti,afe4403".
+ - reg			: SPI chip select address of device.
+ - tx-supply		: Regulator supply to transmitting LEDs.
+ - interrupt-parent	: Phandle to he parent interrupt controller.
+ - interrupts		: The interrupt line the device ADC_RDY pin is
+			  connected to. For details refer to,
+			  ../../interrupt-controller/interrupts.txt.
+
+Optional properties:
+ - reset-gpios		: GPIO used to reset the device.
+			  For details refer to, ../../gpio/gpio.txt.
+
+For other required and optional properties of SPI slave nodes
+please refer to ../../spi/spi-bus.txt.
+
+Example:
+
+&spi0 {
+	heart_mon@0 {
+		compatible = "ti,afe4403";
+		reg = <0>;
+		spi-max-frequency = <10000000>;
+
+		tx-supply = <&vbat>;
+
+		interrupt-parent = <&gpio1>;
+		interrupts = <28 IRQ_TYPE_EDGE_RISING>;
+
+		reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/iio/health/afe4404.txt b/Documentation/devicetree/bindings/iio/health/afe4404.txt
new file mode 100644
index 0000000..de69f20
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/health/afe4404.txt
@@ -0,0 +1,30 @@
+Texas Instruments AFE4404 Heart rate and Pulse Oximeter
+
+Required properties:
+ - compatible		: Should be "ti,afe4404".
+ - reg			: I2C address of the device.
+ - tx-supply		: Regulator supply to transmitting LEDs.
+ - interrupt-parent	: Phandle to he parent interrupt controller.
+ - interrupts		: The interrupt line the device ADC_RDY pin is
+			  connected to. For details refer to,
+			  ../interrupt-controller/interrupts.txt.
+
+Optional properties:
+ - reset-gpios		: GPIO used to reset the device.
+			  For details refer to, ../gpio/gpio.txt.
+
+Example:
+
+&i2c2 {
+	heart_mon@58 {
+		compatible = "ti,afe4404";
+		reg = <0x58>;
+
+		tx-supply = <&vbat>;
+
+		interrupt-parent = <&gpio1>;
+		interrupts = <28 IRQ_TYPE_EDGE_RISING>;
+
+		reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/iio/health/max30100.txt b/Documentation/devicetree/bindings/iio/health/max30100.txt
index f6fbac6..295a9ed 100644
--- a/Documentation/devicetree/bindings/iio/health/max30100.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30100.txt
@@ -11,11 +11,19 @@
   Refer to interrupt-controller/interrupts.txt for generic
   interrupt client node bindings.
 
+Optional properties:
+  - maxim,led-current-microamp: configuration for LED current in microamperes
+    while the engine is running. First indexed value is the configuration for
+    the RED LED, and second value is for the IR LED.
+
+    Refer to the datasheet for the allowed current values.
+
 Example:
 
 max30100@057 {
 	compatible = "maxim,max30100";
 	reg = <57>;
+	maxim,led-current-microamp = <24000 50000>;
 	interrupt-parent = <&gpio1>;
 	interrupts = <16 2>;
 };
diff --git a/Documentation/devicetree/bindings/iio/light/opt3001.txt b/Documentation/devicetree/bindings/iio/light/opt3001.txt
new file mode 100644
index 0000000..eac30d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/opt3001.txt
@@ -0,0 +1,26 @@
+* Texas Instruments OPT3001 Ambient Light Sensor
+
+The driver supports interrupt-driven and interrupt-less operation, depending
+on whether an interrupt property has been populated into the DT. Note that
+the optional generation of IIO events on rising/falling light threshold changes
+requires the use of interrupts. Without interrupts, only the simple reading
+of the current light value is supported through the IIO API.
+
+http://www.ti.com/product/opt3001
+
+Required properties:
+  - compatible: should be "ti,opt3001"
+  - reg: the I2C address of the sensor
+
+Optional properties:
+  - interrupt-parent: should be the phandle for the interrupt controller
+  - interrupts: interrupt mapping for GPIO IRQ (configure for falling edge)
+
+Example:
+
+opt3001@44 {
+	compatible = "ti,opt3001";
+	reg = <0x44>;
+	interrupt-parent = <&gpio1>;
+	interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
+};
diff --git a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
new file mode 100644
index 0000000..a8ebb46
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
@@ -0,0 +1,25 @@
+EEPROMs (SPI) compatible with Microchip Technology 93xx46 family.
+
+Required properties:
+- compatible : shall be one of:
+    "atmel,at93c46d"
+    "eeprom-93xx46"
+- data-size : number of data bits per word (either 8 or 16)
+
+Optional properties:
+- read-only : parameter-less property which disables writes to the EEPROM
+- select-gpios : if present, specifies the GPIO that will be asserted prior to
+  each access to the EEPROM (e.g. for SPI bus multiplexing)
+
+Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
+apply.  In particular, "reg" and "spi-max-frequency" properties must be given.
+
+Example:
+	eeprom@0 {
+		compatible = "eeprom-93xx46";
+		reg = <0>;
+		spi-max-frequency = <1000000>;
+		spi-cs-high;
+		data-size = <8>;
+		select-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>;
+	};
diff --git a/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt b/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt
new file mode 100644
index 0000000..809df68
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt
@@ -0,0 +1,28 @@
+* NXP LPC18xx EEPROM memory NVMEM driver
+
+Required properties:
+  - compatible: Should be "nxp,lpc1857-eeprom"
+  - reg: Must contain an entry with the physical base address and length
+    for each entry in reg-names.
+  - reg-names: Must include the following entries.
+    - reg: EEPROM registers.
+    - mem: EEPROM address space.
+  - clocks: Must contain an entry for each entry in clock-names.
+  - clock-names: Must include the following entries.
+    - eeprom: EEPROM operating clock.
+  - resets: Should contain a reference to the reset controller asserting
+    the EEPROM in reset.
+  - interrupts: Should contain EEPROM interrupt.
+
+Example:
+
+  eeprom: eeprom@4000e000 {
+    compatible = "nxp,lpc1857-eeprom";
+    reg = <0x4000e000 0x1000>,
+          <0x20040000 0x4000>;
+    reg-names = "reg", "mem";
+    clocks = <&ccu1 CLK_CPU_EEPROM>;
+    clock-names = "eeprom";
+    resets = <&rgu 27>;
+    interrupts = <4>;
+  };
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
new file mode 100644
index 0000000..74cf529
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
@@ -0,0 +1,36 @@
+= Mediatek MTK-EFUSE device tree bindings =
+
+This binding is intended to represent MTK-EFUSE which is found in most Mediatek SOCs.
+
+Required properties:
+- compatible: should be "mediatek,mt8173-efuse" or "mediatek,efuse"
+- reg: Should contain registers location and length
+
+= Data cells =
+Are child nodes of MTK-EFUSE, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+	efuse: efuse@10206000 {
+		compatible = "mediatek,mt8173-efuse";
+		reg	   = <0 0x10206000 0 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* Data cells */
+		thermal_calibration: calib@528 {
+			reg = <0x528 0xc>;
+		};
+	};
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+
+	thermal {
+		...
+		nvmem-cells = <&thermal_calibration>;
+		nvmem-cell-names = "calibration";
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 72e2c5a..44ddc98 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -28,6 +28,7 @@
 armadeus	ARMadeus Systems SARL
 artesyn	Artesyn Embedded Technologies Inc.
 asahi-kasei	Asahi Kasei Corp.
+atlas	Atlas Scientific LLC
 atmel	Atmel Corporation
 auo	AU Optronics Corporation
 avago	Avago Technologies
diff --git a/Documentation/mic/mic_overview.txt b/Documentation/mic/mic_overview.txt
index 73f44fc..074adbd 100644
--- a/Documentation/mic/mic_overview.txt
+++ b/Documentation/mic/mic_overview.txt
@@ -12,10 +12,19 @@
 
 Since it is a PCIe card, it does not have the ability to host hardware
 devices for networking, storage and console. We provide these devices
-on X100 coprocessors thus enabling a self-bootable equivalent environment
-for applications. A key benefit of our solution is that it leverages
-the standard virtio framework for network, disk and console devices,
-though in our case the virtio framework is used across a PCIe bus.
+on X100 coprocessors thus enabling a self-bootable equivalent
+environment for applications. A key benefit of our solution is that it
+leverages the standard virtio framework for network, disk and console
+devices, though in our case the virtio framework is used across a PCIe
+bus. A Virtio Over PCIe (VOP) driver allows creating user space
+backends or devices on the host which are used to probe virtio drivers
+for these devices on the MIC card. The existing VRINGH infrastructure
+in the kernel is used to access virtio rings from the host. The card
+VOP driver allows card virtio drivers to communicate with their user
+space backends on the host via a device page. Ring 3 apps on the host
+can add, remove and configure virtio devices. A thin MIC specific
+virtio_config_ops is implemented which is borrowed heavily from
+previous similar implementations in lguest and s390.
 
 MIC PCIe card has a dma controller with 8 channels. These channels are
 shared between the host s/w and the card s/w. 0 to 3 are used by host
@@ -38,7 +47,6 @@
 the host to initiate DMA's to/from the card using the MIC DMA engine and
 the fact that the virtio block storage backend can only be on the host.
 
-                                      |
                +----------+           |             +----------+
                | Card OS  |           |             | Host OS  |
                +----------+           |             +----------+
@@ -47,27 +55,25 @@
         | Virtio| |Virtio  | |Virtio| | |Virtio   |  |Virtio  | |Virtio  |
         | Net   | |Console | |Block | | |Net      |  |Console | |Block   |
         | Driver| |Driver  | |Driver| | |backend  |  |backend | |backend |
-        +-------+ +--------+ +------+ | +---------+  +--------+ +--------+
+        +---+---+ +---+----+ +--+---+ | +---------+  +----+---+ +--------+
             |         |         |     |      |            |         |
             |         |         |     |User  |            |         |
-            |         |         |     |------|------------|---------|-------
-            +-------------------+     |Kernel +--------------------------+
-                      |               |       | Virtio over PCIe IOCTLs  |
-                      |               |       +--------------------------+
-+-----------+         |               |                   |  +-----------+
-| MIC DMA   |         |      +------+ | +------+ +------+ |  | MIC DMA   |
-| Driver    |         |      | SCIF | | | SCIF | | COSM | |  | Driver    |
-+-----------+         |      +------+ | +------+ +--+---+ |  +-----------+
-      |               |         |     |    |        |     |        |
-+---------------+     |      +------+ | +--+---+ +--+---+ | +----------------+
-|MIC virtual Bus|     |      |SCIF  | | |SCIF  | | COSM | | |MIC virtual Bus |
-+---------------+     |      |HW Bus| | |HW Bus| | Bus  | | +----------------+
-      |               |      +------+ | +--+---+ +------+ |              |
-      |               |         |     |       |     |     |              |
-      |   +-----------+---+     |     |       |    +---------------+     |
-      |   |Intel MIC      |     |     |       |    |Intel MIC      |     |
-      +---|Card Driver    |     |     |       |    |Host Driver    |     |
-          +------------+--------+     |       +----+---------------+-----+
+            |         |         |     |------|------------|--+------|-------
+            +---------+---------+     |Kernel                |
+                      |               |                      |
+  +---------+     +---+----+ +------+ | +------+ +------+ +--+---+  +-------+
+  |MIC DMA  |     |  VOP   | | SCIF | | | SCIF | | COSM | | VOP  |  |MIC DMA|
+  +---+-----+     +---+----+ +--+---+ | +--+---+ +--+---+ +------+  +----+--+
+      |               |         |     |    |        |                    |
+  +---+-----+     +---+----+ +--+---+ | +--+---+ +--+---+ +------+  +----+--+
+  |MIC      |     |  VOP   | |SCIF  | | |SCIF  | | COSM | | VOP  |  | MIC   |
+  |HW Bus   |     |  HW Bus| |HW Bus| | |HW Bus| | Bus  | |HW Bus|  |HW Bus |
+  +---------+     +--------+ +--+---+ | +--+---+ +------+ +------+  +-------+
+      |               |         |     |       |     |                    |
+      |   +-----------+--+      |     |       |    +---------------+     |
+      |   |Intel MIC     |      |     |       |    |Intel MIC      |     |
+      |   |Card Driver   |      |     |       |    |Host Driver    |     |
+      +---+--------------+------+     |       +----+---------------+-----+
                  |                    |                   |
              +-------------------------------------------------------------+
              |                                                             |
diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss
index 09ea9093..5fcf9fa 100755
--- a/Documentation/mic/mpssd/mpss
+++ b/Documentation/mic/mpssd/mpss
@@ -35,7 +35,7 @@
 
 exec=/usr/sbin/mpssd
 sysfs="/sys/class/mic"
-mic_modules="mic_host mic_x100_dma scif"
+mic_modules="mic_host mic_x100_dma scif vop"
 
 start()
 {
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
index aaeafa1..518dece 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/Documentation/mic/mpssd/mpssd.c
@@ -926,7 +926,7 @@
 	char path[PATH_MAX];
 	int fd, err;
 
-	snprintf(path, PATH_MAX, "/dev/mic%d", mic->id);
+	snprintf(path, PATH_MAX, "/dev/vop_virtio%d", mic->id);
 	fd = open(path, O_RDWR);
 	if (fd < 0) {
 		mpsslog("Could not open %s %s\n", path, strerror(errno));
diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/Documentation/misc-devices/lcd-panel-cgram.txt
similarity index 100%
rename from drivers/staging/panel/lcd-panel-cgram.txt
rename to Documentation/misc-devices/lcd-panel-cgram.txt
diff --git a/Documentation/misc-devices/mei/mei.txt b/Documentation/misc-devices/mei/mei.txt
index 91c1fa3..2b80a0c 100644
--- a/Documentation/misc-devices/mei/mei.txt
+++ b/Documentation/misc-devices/mei/mei.txt
@@ -231,15 +231,15 @@
 The Intel AMT Watchdog is composed of two parts:
 	1) Firmware feature - receives the heartbeats
 	   and sends an event when the heartbeats stop.
-	2) Intel MEI driver - connects to the watchdog feature, configures the
-	   watchdog and sends the heartbeats.
+	2) Intel MEI iAMT watchdog driver - connects to the watchdog feature,
+	   configures the watchdog and sends the heartbeats.
 
-The Intel MEI driver uses the kernel watchdog API to configure the Intel AMT
-Watchdog and to send heartbeats to it. The default timeout of the
+The Intel iAMT watchdog MEI driver uses the kernel watchdog API to configure
+the Intel AMT Watchdog and to send heartbeats to it. The default timeout of the
 watchdog is 120 seconds.
 
-If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
-the Intel MEI driver will disable the sending of heartbeats.
+If the Intel AMT is not enabled in the firmware then the watchdog client won't enumerate
+on the me client bus and watchdog devices won't be exposed.
 
 
 Supported Chipsets
diff --git a/MAINTAINERS b/MAINTAINERS
index da3e4d8..dd9ee367d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -769,6 +769,12 @@
 S:	Maintained
 F:	sound/aoa/
 
+APEX EMBEDDED SYSTEMS STX104 DAC DRIVER
+M:	William Breathitt Gray <vilhelm.gray@gmail.com>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	drivers/iio/dac/stx104.c
+
 APM DRIVER
 M:	Jiri Kosina <jikos@kernel.org>
 S:	Odd fixes
@@ -1956,6 +1962,12 @@
 S:	Supported
 F:	drivers/tty/serial/atmel_serial.c
 
+ATMEL SAMA5D2 ADC DRIVER
+M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+L:	linux-iio@vger.kernel.org
+S:	Supported
+F:	drivers/iio/adc/at91-sama5d2_adc.c
+
 ATMEL Audio ALSA driver
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -3541,13 +3553,6 @@
 S:	Maintained
 F:	drivers/staging/dgnc/
 
-DIGI EPCA PCI PRODUCTS
-M:	Lidza Louina <lidza.louina@gmail.com>
-M:	Daeseok Youn <daeseok.youn@gmail.com>
-L:	driverdev-devel@linuxdriverproject.org
-S:	Maintained
-F:	drivers/staging/dgap/
-
 DIOLAN U2C-12 I2C DRIVER
 M:	Guenter Roeck <linux@roeck-us.net>
 L:	linux-i2c@vger.kernel.org
@@ -5747,6 +5752,7 @@
 F:	include/uapi/linux/mei.h
 F:	include/linux/mei_cl_bus.h
 F:	drivers/misc/mei/*
+F:	drivers/watchdog/mei_wdt.c
 F:	Documentation/misc-devices/mei/*
 
 INTEL MIC DRIVERS (mic)
@@ -8157,6 +8163,13 @@
 F:	Documentation/mn10300/
 F:	arch/mn10300/
 
+PARALLEL LCD/KEYPAD PANEL DRIVER
+M:      Willy Tarreau <willy@haproxy.com>
+M:      Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
+S:      Odd Fixes
+F:      Documentation/misc-devices/lcd-panel-cgram.txt
+F:      drivers/misc/panel.c
+
 PARALLEL PORT SUBSYSTEM
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 M:	Sudip Mukherjee <sudip@vectorindia.org>
@@ -10391,19 +10404,6 @@
 S:	Maintained
 F:	drivers/staging/nvec/
 
-STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
-M:	Jens Frederich <jfrederich@gmail.com>
-M:	Daniel Drake <dsd@laptop.org>
-M:	Jon Nettleton <jon.nettleton@gmail.com>
-W:	http://wiki.laptop.org/go/DCON
-S:	Maintained
-F:	drivers/staging/olpc_dcon/
-
-STAGING - PARALLEL LCD/KEYPAD PANEL DRIVER
-M:	Willy Tarreau <willy@meta-x.org>
-S:	Odd Fixes
-F:	drivers/staging/panel/
-
 STAGING - REALTEK RTL8712U DRIVERS
 M:	Larry Finger <Larry.Finger@lwfinger.net>
 M:	Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7d00b7a..16288e7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1321,6 +1321,7 @@
 	struct binder_transaction *t;
 	struct binder_work *tcomplete;
 	binder_size_t *offp, *off_end;
+	binder_size_t off_min;
 	struct binder_proc *target_proc;
 	struct binder_thread *target_thread = NULL;
 	struct binder_node *target_node = NULL;
@@ -1522,18 +1523,24 @@
 		goto err_bad_offset;
 	}
 	off_end = (void *)offp + tr->offsets_size;
+	off_min = 0;
 	for (; offp < off_end; offp++) {
 		struct flat_binder_object *fp;
 
 		if (*offp > t->buffer->data_size - sizeof(*fp) ||
+		    *offp < off_min ||
 		    t->buffer->data_size < sizeof(*fp) ||
 		    !IS_ALIGNED(*offp, sizeof(u32))) {
-			binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
-					  proc->pid, thread->pid, (u64)*offp);
+			binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+					  proc->pid, thread->pid, (u64)*offp,
+					  (u64)off_min,
+					  (u64)(t->buffer->data_size -
+					  sizeof(*fp)));
 			return_error = BR_FAILED_REPLY;
 			goto err_bad_offset;
 		}
 		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+		off_min = *offp + sizeof(struct flat_binder_object);
 		switch (fp->type) {
 		case BINDER_TYPE_BINDER:
 		case BINDER_TYPE_WEAK_BINDER: {
@@ -2737,6 +2744,10 @@
 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
 			proc->pid, current->pid, cmd, arg);*/
 
+	if (unlikely(current->mm != proc->vma_vm_mm)) {
+		pr_err("current mm mismatch proc mm\n");
+		return -EINVAL;
+	}
 	trace_binder_ioctl(cmd, arg);
 
 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -2951,6 +2962,7 @@
 		return -ENOMEM;
 	get_task_struct(current);
 	proc->tsk = current;
+	proc->vma_vm_mm = current->mm;
 	INIT_LIST_HEAD(&proc->todo);
 	init_waitqueue_head(&proc->wait);
 	proc->default_priority = task_nice(current);
@@ -3593,13 +3605,24 @@
 
 static int binder_proc_show(struct seq_file *m, void *unused)
 {
+	struct binder_proc *itr;
 	struct binder_proc *proc = m->private;
 	int do_lock = !binder_debug_no_lock;
+	bool valid_proc = false;
 
 	if (do_lock)
 		binder_lock(__func__);
-	seq_puts(m, "binder proc state:\n");
-	print_binder_proc(m, proc, 1);
+
+	hlist_for_each_entry(itr, &binder_procs, proc_node) {
+		if (itr == proc) {
+			valid_proc = true;
+			break;
+		}
+	}
+	if (valid_proc) {
+		seq_puts(m, "binder proc state:\n");
+		print_binder_proc(m, proc, 1);
+	}
 	if (do_lock)
 		binder_unlock(__func__);
 	return 0;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9250e5..a7f4aa3 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -257,7 +257,7 @@
 		vunmap(buf->data);
 		for (i = 0; i < buf->nr_pages; i++)
 			__free_page(buf->pages[i]);
-		kfree(buf->pages);
+		vfree(buf->pages);
 	} else
 #endif
 		vfree(buf->data);
@@ -353,15 +353,15 @@
 		rc = fw_read_file_contents(file, buf);
 		fput(file);
 		if (rc)
-			dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
-				path, rc);
+			dev_warn(device, "loading %s failed with error %d\n",
+				 path, rc);
 		else
 			break;
 	}
 	__putname(path);
 
 	if (!rc) {
-		dev_dbg(device, "firmware: direct-loading firmware %s\n",
+		dev_dbg(device, "direct-loading %s\n",
 			buf->fw_id);
 		mutex_lock(&fw_lock);
 		set_bit(FW_STATUS_DONE, &buf->status);
@@ -660,7 +660,7 @@
 		if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
 			for (i = 0; i < fw_buf->nr_pages; i++)
 				__free_page(fw_buf->pages[i]);
-			kfree(fw_buf->pages);
+			vfree(fw_buf->pages);
 			fw_buf->pages = NULL;
 			fw_buf->page_array_size = 0;
 			fw_buf->nr_pages = 0;
@@ -770,8 +770,7 @@
 					 buf->page_array_size * 2);
 		struct page **new_pages;
 
-		new_pages = kmalloc(new_array_size * sizeof(void *),
-				    GFP_KERNEL);
+		new_pages = vmalloc(new_array_size * sizeof(void *));
 		if (!new_pages) {
 			fw_load_abort(fw_priv);
 			return -ENOMEM;
@@ -780,7 +779,7 @@
 		       buf->page_array_size * sizeof(void *));
 		memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
 		       (new_array_size - buf->page_array_size));
-		kfree(buf->pages);
+		vfree(buf->pages);
 		buf->pages = new_pages;
 		buf->page_array_size = new_array_size;
 	}
@@ -1051,7 +1050,7 @@
 	}
 
 	if (fw_get_builtin_firmware(firmware, name)) {
-		dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+		dev_dbg(device, "using built-in %s\n", name);
 		return 0; /* assigned */
 	}
 
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 0129232..678fa97 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -496,12 +496,12 @@
 
 #ifdef CONFIG_PROC_FS
 
-static char *floppy_types[] = {
+static const char * const floppy_types[] = {
 	"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
 	"3.5'' 2.88M", "3.5'' 2.88M"
 };
 
-static char *gfx_types[] = {
+static const char * const gfx_types[] = {
 	"EGA, VGA, ... (with BIOS)",
 	"CGA (40 cols)",
 	"CGA (80 cols)",
@@ -602,7 +602,7 @@
 
 static struct {
 	unsigned char val;
-	char *name;
+	const char *name;
 } boot_prefs[] = {
 	{ 0x80, "TOS" },
 	{ 0x40, "ASV" },
@@ -611,7 +611,7 @@
 	{ 0x00, "unspecified" }
 };
 
-static char *languages[] = {
+static const char * const languages[] = {
 	"English (US)",
 	"German",
 	"French",
@@ -623,7 +623,7 @@
 	"Swiss (German)"
 };
 
-static char *dateformat[] = {
+static const char * const dateformat[] = {
 	"MM%cDD%cYY",
 	"DD%cMM%cYY",
 	"YY%cMM%cDD",
@@ -634,7 +634,7 @@
 	"7 (undefined)"
 };
 
-static char *colors[] = {
+static const char * const colors[] = {
 	"2", "4", "16", "256", "65536", "??", "??", "??"
 };
 
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 76c490f..0e18442 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -129,10 +129,9 @@
 
 static void button_sequence_finished (unsigned long parameters)
 {
-#ifdef CONFIG_NWBUTTON_REBOOT		/* Reboot using button is enabled */
-	if (button_press_count == reboot_count)
+	if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+	    button_press_count == reboot_count)
 		kill_cad_pid(SIGINT, 1);	/* Ask init to reboot us */
-#endif /* CONFIG_NWBUTTON_REBOOT */
 	button_consume_callbacks (button_press_count);
 	bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
 	button_press_count = 0;		/* Reset the button press counter */
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index ae0b42b..d233688 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -69,12 +69,13 @@
 #include <linux/ppdev.h>
 #include <linux/mutex.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #define PP_VERSION "ppdev: user-space parallel port driver"
 #define CHRDEV "ppdev"
 
 struct pp_struct {
-	struct pardevice * pdev;
+	struct pardevice *pdev;
 	wait_queue_head_t irq_wait;
 	atomic_t irqc;
 	unsigned int flags;
@@ -98,18 +99,26 @@
 #define ROUND_UP(x,y) (((x)+(y)-1)/(y))
 
 static DEFINE_MUTEX(pp_do_mutex);
-static inline void pp_enable_irq (struct pp_struct *pp)
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32	_IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32	_IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64	_IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64	_IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
 {
 	struct parport *port = pp->pdev->port;
-	port->ops->enable_irq (port);
+
+	port->ops->enable_irq(port);
 }
 
-static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
-			loff_t * ppos)
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+		       loff_t *ppos)
 {
 	unsigned int minor = iminor(file_inode(file));
 	struct pp_struct *pp = file->private_data;
-	char * kbuffer;
+	char *kbuffer;
 	ssize_t bytes_read = 0;
 	struct parport *pport;
 	int mode;
@@ -125,16 +134,15 @@
 		return 0;
 
 	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
-	if (!kbuffer) {
+	if (!kbuffer)
 		return -ENOMEM;
-	}
 	pport = pp->pdev->port;
 	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
 
-	parport_set_timeout (pp->pdev,
-			     (file->f_flags & O_NONBLOCK) ?
-			     PARPORT_INACTIVITY_O_NONBLOCK :
-			     pp->default_inactivity);
+	parport_set_timeout(pp->pdev,
+			    (file->f_flags & O_NONBLOCK) ?
+			    PARPORT_INACTIVITY_O_NONBLOCK :
+			    pp->default_inactivity);
 
 	while (bytes_read == 0) {
 		ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
@@ -144,20 +152,17 @@
 			int flags = 0;
 			size_t (*fn)(struct parport *, void *, size_t, int);
 
-			if (pp->flags & PP_W91284PIC) {
+			if (pp->flags & PP_W91284PIC)
 				flags |= PARPORT_W91284PIC;
-			}
-			if (pp->flags & PP_FASTREAD) {
+			if (pp->flags & PP_FASTREAD)
 				flags |= PARPORT_EPP_FAST;
-			}
-			if (pport->ieee1284.mode & IEEE1284_ADDR) {
+			if (pport->ieee1284.mode & IEEE1284_ADDR)
 				fn = pport->ops->epp_read_addr;
-			} else {
+			else
 				fn = pport->ops->epp_read_data;
-			}
 			bytes_read = (*fn)(pport, kbuffer, need, flags);
 		} else {
-			bytes_read = parport_read (pport, kbuffer, need);
+			bytes_read = parport_read(pport, kbuffer, need);
 		}
 
 		if (bytes_read != 0)
@@ -168,7 +173,7 @@
 			break;
 		}
 
-		if (signal_pending (current)) {
+		if (signal_pending(current)) {
 			bytes_read = -ERESTARTSYS;
 			break;
 		}
@@ -176,22 +181,22 @@
 		cond_resched();
 	}
 
-	parport_set_timeout (pp->pdev, pp->default_inactivity);
+	parport_set_timeout(pp->pdev, pp->default_inactivity);
 
-	if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
+	if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
 		bytes_read = -EFAULT;
 
-	kfree (kbuffer);
-	pp_enable_irq (pp);
+	kfree(kbuffer);
+	pp_enable_irq(pp);
 	return bytes_read;
 }
 
-static ssize_t pp_write (struct file * file, const char __user * buf,
-			 size_t count, loff_t * ppos)
+static ssize_t pp_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
 {
 	unsigned int minor = iminor(file_inode(file));
 	struct pp_struct *pp = file->private_data;
-	char * kbuffer;
+	char *kbuffer;
 	ssize_t bytes_written = 0;
 	ssize_t wrote;
 	int mode;
@@ -204,21 +209,21 @@
 	}
 
 	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
-	if (!kbuffer) {
+	if (!kbuffer)
 		return -ENOMEM;
-	}
+
 	pport = pp->pdev->port;
 	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
 
-	parport_set_timeout (pp->pdev,
-			     (file->f_flags & O_NONBLOCK) ?
-			     PARPORT_INACTIVITY_O_NONBLOCK :
-			     pp->default_inactivity);
+	parport_set_timeout(pp->pdev,
+			    (file->f_flags & O_NONBLOCK) ?
+			    PARPORT_INACTIVITY_O_NONBLOCK :
+			    pp->default_inactivity);
 
 	while (bytes_written < count) {
 		ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
 
-		if (copy_from_user (kbuffer, buf + bytes_written, n)) {
+		if (copy_from_user(kbuffer, buf + bytes_written, n)) {
 			bytes_written = -EFAULT;
 			break;
 		}
@@ -226,20 +231,19 @@
 		if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
 			/* do a fast EPP write */
 			if (pport->ieee1284.mode & IEEE1284_ADDR) {
-				wrote = pport->ops->epp_write_addr (pport,
+				wrote = pport->ops->epp_write_addr(pport,
 					kbuffer, n, PARPORT_EPP_FAST);
 			} else {
-				wrote = pport->ops->epp_write_data (pport,
+				wrote = pport->ops->epp_write_data(pport,
 					kbuffer, n, PARPORT_EPP_FAST);
 			}
 		} else {
-			wrote = parport_write (pp->pdev->port, kbuffer, n);
+			wrote = parport_write(pp->pdev->port, kbuffer, n);
 		}
 
 		if (wrote <= 0) {
-			if (!bytes_written) {
+			if (!bytes_written)
 				bytes_written = wrote;
-			}
 			break;
 		}
 
@@ -251,67 +255,69 @@
 			break;
 		}
 
-		if (signal_pending (current))
+		if (signal_pending(current))
 			break;
 
 		cond_resched();
 	}
 
-	parport_set_timeout (pp->pdev, pp->default_inactivity);
+	parport_set_timeout(pp->pdev, pp->default_inactivity);
 
-	kfree (kbuffer);
-	pp_enable_irq (pp);
+	kfree(kbuffer);
+	pp_enable_irq(pp);
 	return bytes_written;
 }
 
-static void pp_irq (void *private)
+static void pp_irq(void *private)
 {
 	struct pp_struct *pp = private;
 
 	if (pp->irqresponse) {
-		parport_write_control (pp->pdev->port, pp->irqctl);
+		parport_write_control(pp->pdev->port, pp->irqctl);
 		pp->irqresponse = 0;
 	}
 
-	atomic_inc (&pp->irqc);
-	wake_up_interruptible (&pp->irq_wait);
+	atomic_inc(&pp->irqc);
+	wake_up_interruptible(&pp->irq_wait);
 }
 
-static int register_device (int minor, struct pp_struct *pp)
+static int register_device(int minor, struct pp_struct *pp)
 {
 	struct parport *port;
-	struct pardevice * pdev = NULL;
+	struct pardevice *pdev = NULL;
 	char *name;
-	int fl;
+	struct pardev_cb ppdev_cb;
 
 	name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
 	if (name == NULL)
 		return -ENOMEM;
 
-	port = parport_find_number (minor);
+	port = parport_find_number(minor);
 	if (!port) {
-		printk (KERN_WARNING "%s: no associated port!\n", name);
-		kfree (name);
+		printk(KERN_WARNING "%s: no associated port!\n", name);
+		kfree(name);
 		return -ENXIO;
 	}
 
-	fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
-	pdev = parport_register_device (port, name, NULL,
-					NULL, pp_irq, fl, pp);
-	parport_put_port (port);
+	memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+	ppdev_cb.irq_func = pp_irq;
+	ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+	ppdev_cb.private = pp;
+	pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
+	parport_put_port(port);
 
 	if (!pdev) {
-		printk (KERN_WARNING "%s: failed to register device!\n", name);
-		kfree (name);
+		printk(KERN_WARNING "%s: failed to register device!\n", name);
+		kfree(name);
 		return -ENXIO;
 	}
 
 	pp->pdev = pdev;
-	pr_debug("%s: registered pardevice\n", name);
+	dev_dbg(&pdev->dev, "registered pardevice\n");
 	return 0;
 }
 
-static enum ieee1284_phase init_phase (int mode)
+static enum ieee1284_phase init_phase(int mode)
 {
 	switch (mode & ~(IEEE1284_DEVICEID
 			 | IEEE1284_ADDR)) {
@@ -322,11 +328,27 @@
 	return IEEE1284_PH_FWD_IDLE;
 }
 
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+	long to_jiffies;
+
+	if ((tv_sec < 0) || (tv_usec < 0))
+		return -EINVAL;
+
+	to_jiffies = usecs_to_jiffies(tv_usec);
+	to_jiffies += tv_sec * HZ;
+	if (to_jiffies <= 0)
+		return -EINVAL;
+
+	pdev->timeout = to_jiffies;
+	return 0;
+}
+
 static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	unsigned int minor = iminor(file_inode(file));
 	struct pp_struct *pp = file->private_data;
-	struct parport * port;
+	struct parport *port;
 	void __user *argp = (void __user *)arg;
 
 	/* First handle the cases that don't take arguments. */
@@ -337,19 +359,19 @@
 		int ret;
 
 		if (pp->flags & PP_CLAIMED) {
-			pr_debug(CHRDEV "%x: you've already got it!\n", minor);
+			dev_dbg(&pp->pdev->dev, "you've already got it!\n");
 			return -EINVAL;
 		}
 
 		/* Deferred device registration. */
 		if (!pp->pdev) {
-			int err = register_device (minor, pp);
-			if (err) {
+			int err = register_device(minor, pp);
+
+			if (err)
 				return err;
-			}
 		}
 
-		ret = parport_claim_or_block (pp->pdev);
+		ret = parport_claim_or_block(pp->pdev);
 		if (ret < 0)
 			return ret;
 
@@ -357,7 +379,7 @@
 
 		/* For interrupt-reporting to work, we need to be
 		 * informed of each interrupt. */
-		pp_enable_irq (pp);
+		pp_enable_irq(pp);
 
 		/* We may need to fix up the state machine. */
 		info = &pp->pdev->port->ieee1284;
@@ -365,15 +387,15 @@
 		pp->saved_state.phase = info->phase;
 		info->mode = pp->state.mode;
 		info->phase = pp->state.phase;
-		pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
-		parport_set_timeout (pp->pdev, pp->default_inactivity);
+		pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+		parport_set_timeout(pp->pdev, pp->default_inactivity);
 
 		return 0;
 	    }
 	case PPEXCL:
 		if (pp->pdev) {
-			pr_debug(CHRDEV "%x: too late for PPEXCL; "
-				"already registered\n", minor);
+			dev_dbg(&pp->pdev->dev,
+				"too late for PPEXCL; already registered\n");
 			if (pp->flags & PP_EXCL)
 				/* But it's not really an error. */
 				return 0;
@@ -388,11 +410,12 @@
 	case PPSETMODE:
 	    {
 		int mode;
-		if (copy_from_user (&mode, argp, sizeof (mode)))
+
+		if (copy_from_user(&mode, argp, sizeof(mode)))
 			return -EFAULT;
 		/* FIXME: validate mode */
 		pp->state.mode = mode;
-		pp->state.phase = init_phase (mode);
+		pp->state.phase = init_phase(mode);
 
 		if (pp->flags & PP_CLAIMED) {
 			pp->pdev->port->ieee1284.mode = mode;
@@ -405,28 +428,27 @@
 	    {
 		int mode;
 
-		if (pp->flags & PP_CLAIMED) {
+		if (pp->flags & PP_CLAIMED)
 			mode = pp->pdev->port->ieee1284.mode;
-		} else {
+		else
 			mode = pp->state.mode;
-		}
-		if (copy_to_user (argp, &mode, sizeof (mode))) {
+
+		if (copy_to_user(argp, &mode, sizeof(mode)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	case PPSETPHASE:
 	    {
 		int phase;
-		if (copy_from_user (&phase, argp, sizeof (phase))) {
+
+		if (copy_from_user(&phase, argp, sizeof(phase)))
 			return -EFAULT;
-		}
+
 		/* FIXME: validate phase */
 		pp->state.phase = phase;
 
-		if (pp->flags & PP_CLAIMED) {
+		if (pp->flags & PP_CLAIMED)
 			pp->pdev->port->ieee1284.phase = phase;
-		}
 
 		return 0;
 	    }
@@ -434,38 +456,34 @@
 	    {
 		int phase;
 
-		if (pp->flags & PP_CLAIMED) {
+		if (pp->flags & PP_CLAIMED)
 			phase = pp->pdev->port->ieee1284.phase;
-		} else {
+		else
 			phase = pp->state.phase;
-		}
-		if (copy_to_user (argp, &phase, sizeof (phase))) {
+		if (copy_to_user(argp, &phase, sizeof(phase)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	case PPGETMODES:
 	    {
 		unsigned int modes;
 
-		port = parport_find_number (minor);
+		port = parport_find_number(minor);
 		if (!port)
 			return -ENODEV;
 
 		modes = port->modes;
 		parport_put_port(port);
-		if (copy_to_user (argp, &modes, sizeof (modes))) {
+		if (copy_to_user(argp, &modes, sizeof(modes)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	case PPSETFLAGS:
 	    {
 		int uflags;
 
-		if (copy_from_user (&uflags, argp, sizeof (uflags))) {
+		if (copy_from_user(&uflags, argp, sizeof(uflags)))
 			return -EFAULT;
-		}
 		pp->flags &= ~PP_FLAGMASK;
 		pp->flags |= (uflags & PP_FLAGMASK);
 		return 0;
@@ -475,9 +493,8 @@
 		int uflags;
 
 		uflags = pp->flags & PP_FLAGMASK;
-		if (copy_to_user (argp, &uflags, sizeof (uflags))) {
+		if (copy_to_user(argp, &uflags, sizeof(uflags)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	}	/* end switch() */
@@ -495,27 +512,28 @@
 		unsigned char reg;
 		unsigned char mask;
 		int mode;
+		s32 time32[2];
+		s64 time64[2];
+		struct timespec64 ts;
 		int ret;
-		struct timeval par_timeout;
-		long to_jiffies;
 
 	case PPRSTATUS:
-		reg = parport_read_status (port);
-		if (copy_to_user (argp, &reg, sizeof (reg)))
+		reg = parport_read_status(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
 			return -EFAULT;
 		return 0;
 	case PPRDATA:
-		reg = parport_read_data (port);
-		if (copy_to_user (argp, &reg, sizeof (reg)))
+		reg = parport_read_data(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
 			return -EFAULT;
 		return 0;
 	case PPRCONTROL:
-		reg = parport_read_control (port);
-		if (copy_to_user (argp, &reg, sizeof (reg)))
+		reg = parport_read_control(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
 			return -EFAULT;
 		return 0;
 	case PPYIELD:
-		parport_yield_blocking (pp->pdev);
+		parport_yield_blocking(pp->pdev);
 		return 0;
 
 	case PPRELEASE:
@@ -525,45 +543,45 @@
 		pp->state.phase = info->phase;
 		info->mode = pp->saved_state.mode;
 		info->phase = pp->saved_state.phase;
-		parport_release (pp->pdev);
+		parport_release(pp->pdev);
 		pp->flags &= ~PP_CLAIMED;
 		return 0;
 
 	case PPWCONTROL:
-		if (copy_from_user (&reg, argp, sizeof (reg)))
+		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
-		parport_write_control (port, reg);
+		parport_write_control(port, reg);
 		return 0;
 
 	case PPWDATA:
-		if (copy_from_user (&reg, argp, sizeof (reg)))
+		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
-		parport_write_data (port, reg);
+		parport_write_data(port, reg);
 		return 0;
 
 	case PPFCONTROL:
-		if (copy_from_user (&mask, argp,
-				    sizeof (mask)))
+		if (copy_from_user(&mask, argp,
+				   sizeof(mask)))
 			return -EFAULT;
-		if (copy_from_user (&reg, 1 + (unsigned char __user *) arg,
-				    sizeof (reg)))
+		if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+				   sizeof(reg)))
 			return -EFAULT;
-		parport_frob_control (port, mask, reg);
+		parport_frob_control(port, mask, reg);
 		return 0;
 
 	case PPDATADIR:
-		if (copy_from_user (&mode, argp, sizeof (mode)))
+		if (copy_from_user(&mode, argp, sizeof(mode)))
 			return -EFAULT;
 		if (mode)
-			port->ops->data_reverse (port);
+			port->ops->data_reverse(port);
 		else
-			port->ops->data_forward (port);
+			port->ops->data_forward(port);
 		return 0;
 
 	case PPNEGOT:
-		if (copy_from_user (&mode, argp, sizeof (mode)))
+		if (copy_from_user(&mode, argp, sizeof(mode)))
 			return -EFAULT;
-		switch ((ret = parport_negotiate (port, mode))) {
+		switch ((ret = parport_negotiate(port, mode))) {
 		case 0: break;
 		case -1: /* handshake failed, peripheral not IEEE 1284 */
 			ret = -EIO;
@@ -572,11 +590,11 @@
 			ret = -ENXIO;
 			break;
 		}
-		pp_enable_irq (pp);
+		pp_enable_irq(pp);
 		return ret;
 
 	case PPWCTLONIRQ:
-		if (copy_from_user (&reg, argp, sizeof (reg)))
+		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
 
 		/* Remember what to set the control lines to, for next
@@ -586,39 +604,50 @@
 		return 0;
 
 	case PPCLRIRQ:
-		ret = atomic_read (&pp->irqc);
-		if (copy_to_user (argp, &ret, sizeof (ret)))
+		ret = atomic_read(&pp->irqc);
+		if (copy_to_user(argp, &ret, sizeof(ret)))
 			return -EFAULT;
-		atomic_sub (ret, &pp->irqc);
+		atomic_sub(ret, &pp->irqc);
 		return 0;
 
-	case PPSETTIME:
-		if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
+	case PPSETTIME32:
+		if (copy_from_user(time32, argp, sizeof(time32)))
 			return -EFAULT;
-		}
-		/* Convert to jiffies, place in pp->pdev->timeout */
-		if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
+
+		return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+	case PPSETTIME64:
+		if (copy_from_user(time64, argp, sizeof(time64)))
+			return -EFAULT;
+
+		return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+	case PPGETTIME32:
+		jiffies_to_timespec64(pp->pdev->timeout, &ts);
+		time32[0] = ts.tv_sec;
+		time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+		if ((time32[0] < 0) || (time32[1] < 0))
 			return -EINVAL;
-		}
-		to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
-		to_jiffies += par_timeout.tv_sec * (long)HZ;
-		if (to_jiffies <= 0) {
-			return -EINVAL;
-		}
-		pp->pdev->timeout = to_jiffies;
+
+		if (copy_to_user(argp, time32, sizeof(time32)))
+			return -EFAULT;
+
 		return 0;
 
-	case PPGETTIME:
-		to_jiffies = pp->pdev->timeout;
-		memset(&par_timeout, 0, sizeof(par_timeout));
-		par_timeout.tv_sec = to_jiffies / HZ;
-		par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
-		if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
+	case PPGETTIME64:
+		jiffies_to_timespec64(pp->pdev->timeout, &ts);
+		time64[0] = ts.tv_sec;
+		time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+		if ((time64[0] < 0) || (time64[1] < 0))
+			return -EINVAL;
+
+		if (copy_to_user(argp, time64, sizeof(time64)))
 			return -EFAULT;
+
 		return 0;
 
 	default:
-		pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
+		dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
 		return -EINVAL;
 	}
 
@@ -629,13 +658,22 @@
 static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	long ret;
+
 	mutex_lock(&pp_do_mutex);
 	ret = pp_do_ioctl(file, cmd, arg);
 	mutex_unlock(&pp_do_mutex);
 	return ret;
 }
 
-static int pp_open (struct inode * inode, struct file * file)
+#ifdef CONFIG_COMPAT
+static long pp_compat_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int pp_open(struct inode *inode, struct file *file)
 {
 	unsigned int minor = iminor(inode);
 	struct pp_struct *pp;
@@ -643,16 +681,16 @@
 	if (minor >= PARPORT_MAX)
 		return -ENXIO;
 
-	pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
+	pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
 	if (!pp)
 		return -ENOMEM;
 
 	pp->state.mode = IEEE1284_MODE_COMPAT;
-	pp->state.phase = init_phase (pp->state.mode);
+	pp->state.phase = init_phase(pp->state.mode);
 	pp->flags = 0;
 	pp->irqresponse = 0;
-	atomic_set (&pp->irqc, 0);
-	init_waitqueue_head (&pp->irq_wait);
+	atomic_set(&pp->irqc, 0);
+	init_waitqueue_head(&pp->irq_wait);
 
 	/* Defer the actual device registration until the first claim.
 	 * That way, we know whether or not the driver wants to have
@@ -664,7 +702,7 @@
 	return 0;
 }
 
-static int pp_release (struct inode * inode, struct file * file)
+static int pp_release(struct inode *inode, struct file *file)
 {
 	unsigned int minor = iminor(inode);
 	struct pp_struct *pp = file->private_data;
@@ -673,10 +711,10 @@
 	compat_negot = 0;
 	if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
 	    (pp->state.mode != IEEE1284_MODE_COMPAT)) {
-	    	struct ieee1284_info *info;
+		struct ieee1284_info *info;
 
 		/* parport released, but not in compatibility mode */
-		parport_claim_or_block (pp->pdev);
+		parport_claim_or_block(pp->pdev);
 		pp->flags |= PP_CLAIMED;
 		info = &pp->pdev->port->ieee1284;
 		pp->saved_state.mode = info->mode;
@@ -689,9 +727,9 @@
 		compat_negot = 2;
 	}
 	if (compat_negot) {
-		parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
-		pr_debug(CHRDEV "%x: negotiated back to compatibility "
-			"mode because user-space forgot\n", minor);
+		parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+		dev_dbg(&pp->pdev->dev,
+			"negotiated back to compatibility mode because user-space forgot\n");
 	}
 
 	if (pp->flags & PP_CLAIMED) {
@@ -702,7 +740,7 @@
 		pp->state.phase = info->phase;
 		info->mode = pp->saved_state.mode;
 		info->phase = pp->saved_state.phase;
-		parport_release (pp->pdev);
+		parport_release(pp->pdev);
 		if (compat_negot != 1) {
 			pr_debug(CHRDEV "%x: released pardevice "
 				"because user-space forgot\n", minor);
@@ -711,25 +749,26 @@
 
 	if (pp->pdev) {
 		const char *name = pp->pdev->name;
-		parport_unregister_device (pp->pdev);
-		kfree (name);
+
+		parport_unregister_device(pp->pdev);
+		kfree(name);
 		pp->pdev = NULL;
 		pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
 	}
 
-	kfree (pp);
+	kfree(pp);
 
 	return 0;
 }
 
 /* No kernel lock held - fine */
-static unsigned int pp_poll (struct file * file, poll_table * wait)
+static unsigned int pp_poll(struct file *file, poll_table *wait)
 {
 	struct pp_struct *pp = file->private_data;
 	unsigned int mask = 0;
 
-	poll_wait (file, &pp->irq_wait, wait);
-	if (atomic_read (&pp->irqc))
+	poll_wait(file, &pp->irq_wait, wait);
+	if (atomic_read(&pp->irqc))
 		mask |= POLLIN | POLLRDNORM;
 
 	return mask;
@@ -744,6 +783,9 @@
 	.write		= pp_write,
 	.poll		= pp_poll,
 	.unlocked_ioctl	= pp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = pp_compat_ioctl,
+#endif
 	.open		= pp_open,
 	.release	= pp_release,
 };
@@ -759,19 +801,32 @@
 	device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
 }
 
+static int pp_probe(struct pardevice *par_dev)
+{
+	struct device_driver *drv = par_dev->dev.driver;
+	int len = strlen(drv->name);
+
+	if (strncmp(par_dev->name, drv->name, len))
+		return -ENODEV;
+
+	return 0;
+}
+
 static struct parport_driver pp_driver = {
 	.name		= CHRDEV,
-	.attach		= pp_attach,
+	.probe		= pp_probe,
+	.match_port	= pp_attach,
 	.detach		= pp_detach,
+	.devmodel	= true,
 };
 
-static int __init ppdev_init (void)
+static int __init ppdev_init(void)
 {
 	int err = 0;
 
-	if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
-		printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
-			PP_MAJOR);
+	if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+		printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
+		       PP_MAJOR);
 		return -EIO;
 	}
 	ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -781,11 +836,11 @@
 	}
 	err = parport_register_driver(&pp_driver);
 	if (err < 0) {
-		printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
+		printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
 		goto out_class;
 	}
 
-	printk (KERN_INFO PP_VERSION "\n");
+	printk(KERN_INFO PP_VERSION "\n");
 	goto out;
 
 out_class:
@@ -796,12 +851,12 @@
 	return err;
 }
 
-static void __exit ppdev_cleanup (void)
+static void __exit ppdev_cleanup(void)
 {
 	/* Clean up all parport stuff */
 	parport_unregister_driver(&pp_driver);
 	class_destroy(ppdev_class);
-	unregister_chrdev (PP_MAJOR, CHRDEV);
+	unregister_chrdev(PP_MAJOR, CHRDEV);
 }
 
 module_init(ppdev_init);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 9b9809b..e83b2ad 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -334,10 +334,8 @@
 
 	cdev_init(&raw_cdev, &raw_fops);
 	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
-	if (ret) {
+	if (ret)
 		goto error_region;
-	}
-
 	raw_class = class_create(THIS_MODULE, "raw");
 	if (IS_ERR(raw_class)) {
 		printk(KERN_ERR "Error creating raw class.\n");
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1161d68..56dd261 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -219,6 +219,21 @@
 }
 EXPORT_SYMBOL_GPL(vmbus_open);
 
+/* Used for Hyper-V Socket: a guest client's connect() to the host */
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+				  const uuid_le *shv_host_servie_id)
+{
+	struct vmbus_channel_tl_connect_request conn_msg;
+
+	memset(&conn_msg, 0, sizeof(conn_msg));
+	conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
+	conn_msg.guest_endpoint_id = *shv_guest_servie_id;
+	conn_msg.host_service_id = *shv_host_servie_id;
+
+	return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+}
+EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+
 /*
  * create_gpadl_header - Creates a gpadl for the specified buffer
  */
@@ -624,6 +639,7 @@
 	u64 aligned_data = 0;
 	int ret;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 	int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
@@ -643,7 +659,7 @@
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
 	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
-				  &signal);
+				  &signal, lock);
 
 	/*
 	 * Signalling the host is conditional on many factors:
@@ -659,6 +675,9 @@
 	 * If we cannot write to the ring-buffer; signal the host
 	 * even if we may not have written anything. This is a rare
 	 * enough condition that it should not matter.
+	 * NOTE: in this case, the hvsock channel is an exception, because
+	 * it looks the host side's hvsock implementation has a throttling
+	 * mechanism which can hurt the performance otherwise.
 	 */
 
 	if (channel->signal_policy)
@@ -666,7 +685,8 @@
 	else
 		kick_q = true;
 
-	if (((ret == 0) && kick_q && signal) || (ret))
+	if (((ret == 0) && kick_q && signal) ||
+	    (ret && !is_hvsock_channel(channel)))
 		vmbus_setevent(channel);
 
 	return ret;
@@ -719,6 +739,7 @@
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 
 	if (pagecount > MAX_PAGE_BUFFER_COUNT)
 		return -EINVAL;
@@ -755,7 +776,8 @@
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+				  &signal, lock);
 
 	/*
 	 * Signalling the host is conditional on many factors:
@@ -818,6 +840,7 @@
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 
 	packetlen = desc_size + bufferlen;
 	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -837,7 +860,8 @@
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+				  &signal, lock);
 
 	if (ret == 0 && signal)
 		vmbus_setevent(channel);
@@ -862,6 +886,7 @@
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
 					 multi_pagebuffer->len);
 
@@ -900,7 +925,8 @@
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+				  &signal, lock);
 
 	if (ret == 0 && signal)
 		vmbus_setevent(channel);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 1c1ad47..b40f429 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -28,12 +28,127 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/hyperv.h>
 
 #include "hyperv_vmbus.h"
 
-static void init_vp_index(struct vmbus_channel *channel,
-			  const uuid_le *type_guid);
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+
+static const struct vmbus_device vmbus_devs[] = {
+	/* IDE */
+	{ .dev_type = HV_IDE,
+	  HV_IDE_GUID,
+	  .perf_device = true,
+	},
+
+	/* SCSI */
+	{ .dev_type = HV_SCSI,
+	  HV_SCSI_GUID,
+	  .perf_device = true,
+	},
+
+	/* Fibre Channel */
+	{ .dev_type = HV_FC,
+	  HV_SYNTHFC_GUID,
+	  .perf_device = true,
+	},
+
+	/* Synthetic NIC */
+	{ .dev_type = HV_NIC,
+	  HV_NIC_GUID,
+	  .perf_device = true,
+	},
+
+	/* Network Direct */
+	{ .dev_type = HV_ND,
+	  HV_ND_GUID,
+	  .perf_device = true,
+	},
+
+	/* PCIE */
+	{ .dev_type = HV_PCIE,
+	  HV_PCIE_GUID,
+	  .perf_device = true,
+	},
+
+	/* Synthetic Frame Buffer */
+	{ .dev_type = HV_FB,
+	  HV_SYNTHVID_GUID,
+	  .perf_device = false,
+	},
+
+	/* Synthetic Keyboard */
+	{ .dev_type = HV_KBD,
+	  HV_KBD_GUID,
+	  .perf_device = false,
+	},
+
+	/* Synthetic MOUSE */
+	{ .dev_type = HV_MOUSE,
+	  HV_MOUSE_GUID,
+	  .perf_device = false,
+	},
+
+	/* KVP */
+	{ .dev_type = HV_KVP,
+	  HV_KVP_GUID,
+	  .perf_device = false,
+	},
+
+	/* Time Synch */
+	{ .dev_type = HV_TS,
+	  HV_TS_GUID,
+	  .perf_device = false,
+	},
+
+	/* Heartbeat */
+	{ .dev_type = HV_HB,
+	  HV_HEART_BEAT_GUID,
+	  .perf_device = false,
+	},
+
+	/* Shutdown */
+	{ .dev_type = HV_SHUTDOWN,
+	  HV_SHUTDOWN_GUID,
+	  .perf_device = false,
+	},
+
+	/* File copy */
+	{ .dev_type = HV_FCOPY,
+	  HV_FCOPY_GUID,
+	  .perf_device = false,
+	},
+
+	/* Backup */
+	{ .dev_type = HV_BACKUP,
+	  HV_VSS_GUID,
+	  .perf_device = false,
+	},
+
+	/* Dynamic Memory */
+	{ .dev_type = HV_DM,
+	  HV_DM_GUID,
+	  .perf_device = false,
+	},
+
+	/* Unknown GUID */
+	{ .dev_type = HV_UNKOWN,
+	  .perf_device = false,
+	},
+};
+
+static u16 hv_get_dev_type(const uuid_le *guid)
+{
+	u16 i;
+
+	for (i = HV_IDE; i < HV_UNKOWN; i++) {
+		if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+			return i;
+	}
+	pr_info("Unknown GUID: %pUl\n", guid);
+	return i;
+}
 
 /**
  * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
@@ -144,6 +259,7 @@
 		return NULL;
 
 	channel->id = atomic_inc_return(&chan_num);
+	channel->acquire_ring_lock = true;
 	spin_lock_init(&channel->inbound_lock);
 	spin_lock_init(&channel->lock);
 
@@ -195,6 +311,7 @@
 	vmbus_release_relid(relid);
 
 	BUG_ON(!channel->rescind);
+	BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
 	if (channel->target_cpu != get_cpu()) {
 		put_cpu();
@@ -206,9 +323,7 @@
 	}
 
 	if (channel->primary_channel == NULL) {
-		mutex_lock(&vmbus_connection.channel_mutex);
 		list_del(&channel->listentry);
-		mutex_unlock(&vmbus_connection.channel_mutex);
 
 		primary_channel = channel;
 	} else {
@@ -251,6 +366,8 @@
 	struct vmbus_channel *channel;
 	bool fnew = true;
 	unsigned long flags;
+	u16 dev_type;
+	int ret;
 
 	/* Make sure this is a new offer */
 	mutex_lock(&vmbus_connection.channel_mutex);
@@ -288,7 +405,9 @@
 			goto err_free_chan;
 	}
 
-	init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
+	dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
+
+	init_vp_index(newchannel, dev_type);
 
 	if (newchannel->target_cpu != get_cpu()) {
 		put_cpu();
@@ -325,12 +444,17 @@
 	if (!newchannel->device_obj)
 		goto err_deq_chan;
 
+	newchannel->device_obj->device_id = dev_type;
 	/*
 	 * Add the new device to the bus. This will kick off device-driver
 	 * binding which eventually invokes the device driver's AddDevice()
 	 * method.
 	 */
-	if (vmbus_device_register(newchannel->device_obj) != 0) {
+	mutex_lock(&vmbus_connection.channel_mutex);
+	ret = vmbus_device_register(newchannel->device_obj);
+	mutex_unlock(&vmbus_connection.channel_mutex);
+
+	if (ret != 0) {
 		pr_err("unable to add child device object (relid %d)\n",
 			newchannel->offermsg.child_relid);
 		kfree(newchannel->device_obj);
@@ -358,37 +482,6 @@
 	free_channel(newchannel);
 }
 
-enum {
-	IDE = 0,
-	SCSI,
-	FC,
-	NIC,
-	ND_NIC,
-	PCIE,
-	MAX_PERF_CHN,
-};
-
-/*
- * This is an array of device_ids (device types) that are performance critical.
- * We attempt to distribute the interrupt load for these devices across
- * all available CPUs.
- */
-static const struct hv_vmbus_device_id hp_devs[] = {
-	/* IDE */
-	{ HV_IDE_GUID, },
-	/* Storage - SCSI */
-	{ HV_SCSI_GUID, },
-	/* Storage - FC */
-	{ HV_SYNTHFC_GUID, },
-	/* Network */
-	{ HV_NIC_GUID, },
-	/* NetworkDirect Guest RDMA */
-	{ HV_ND_GUID, },
-	/* PCI Express Pass Through */
-	{ HV_PCIE_GUID, },
-};
-
-
 /*
  * We use this state to statically distribute the channel interrupt load.
  */
@@ -405,22 +498,15 @@
  * For pre-win8 hosts or non-performance critical channels we assign the
  * first CPU in the first NUMA node.
  */
-static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
 {
 	u32 cur_cpu;
-	int i;
-	bool perf_chn = false;
+	bool perf_chn = vmbus_devs[dev_type].perf_device;
 	struct vmbus_channel *primary = channel->primary_channel;
 	int next_node;
 	struct cpumask available_mask;
 	struct cpumask *alloced_mask;
 
-	for (i = IDE; i < MAX_PERF_CHN; i++) {
-		if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) {
-			perf_chn = true;
-			break;
-		}
-	}
 	if ((vmbus_proto_version == VERSION_WS2008) ||
 	    (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
 		/*
@@ -469,6 +555,17 @@
 		    cpumask_of_node(primary->numa_node));
 
 	cur_cpu = -1;
+
+	/*
+	 * Normally Hyper-V host doesn't create more subchannels than there
+	 * are VCPUs on the node but it is possible when not all present VCPUs
+	 * on the node are initialized by guest. Clear the alloced_cpus_in_node
+	 * to start over.
+	 */
+	if (cpumask_equal(&primary->alloced_cpus_in_node,
+			  cpumask_of_node(primary->numa_node)))
+		cpumask_clear(&primary->alloced_cpus_in_node);
+
 	while (true) {
 		cur_cpu = cpumask_next(cur_cpu, &available_mask);
 		if (cur_cpu >= nr_cpu_ids) {
@@ -498,6 +595,40 @@
 	channel->target_vp = hv_context.vp_index[cur_cpu];
 }
 
+static void vmbus_wait_for_unload(void)
+{
+	int cpu = smp_processor_id();
+	void *page_addr = hv_context.synic_message_page[cpu];
+	struct hv_message *msg = (struct hv_message *)page_addr +
+				  VMBUS_MESSAGE_SINT;
+	struct vmbus_channel_message_header *hdr;
+	bool unloaded = false;
+
+	while (1) {
+		if (msg->header.message_type == HVMSG_NONE) {
+			mdelay(10);
+			continue;
+		}
+
+		hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+		if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+			unloaded = true;
+
+		msg->header.message_type = HVMSG_NONE;
+		/*
+		 * header.message_type needs to be written before we do
+		 * wrmsrl() below.
+		 */
+		mb();
+
+		if (msg->header.message_flags.msg_pending)
+			wrmsrl(HV_X64_MSR_EOM, 0);
+
+		if (unloaded)
+			break;
+	}
+}
+
 /*
  * vmbus_unload_response - Handler for the unload response.
  */
@@ -523,7 +654,14 @@
 	hdr.msgtype = CHANNELMSG_UNLOAD;
 	vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
 
-	wait_for_completion(&vmbus_connection.unload_event);
+	/*
+	 * vmbus_initiate_unload() is also called on crash and the crash can be
+	 * happening in an interrupt context, where scheduling is impossible.
+	 */
+	if (!in_interrupt())
+		wait_for_completion(&vmbus_connection.unload_event);
+	else
+		vmbus_wait_for_unload();
 }
 
 /*
@@ -592,6 +730,8 @@
 	struct device *dev;
 
 	rescind = (struct vmbus_channel_rescind_offer *)hdr;
+
+	mutex_lock(&vmbus_connection.channel_mutex);
 	channel = relid2channel(rescind->child_relid);
 
 	if (channel == NULL) {
@@ -600,7 +740,7 @@
 		 * vmbus_process_offer(), we have already invoked
 		 * vmbus_release_relid() on error.
 		 */
-		return;
+		goto out;
 	}
 
 	spin_lock_irqsave(&channel->lock, flags);
@@ -608,6 +748,10 @@
 	spin_unlock_irqrestore(&channel->lock, flags);
 
 	if (channel->device_obj) {
+		if (channel->chn_rescind_callback) {
+			channel->chn_rescind_callback(channel);
+			goto out;
+		}
 		/*
 		 * We will have to unregister this device from the
 		 * driver core.
@@ -621,8 +765,25 @@
 		hv_process_channel_removal(channel,
 			channel->offermsg.child_relid);
 	}
+
+out:
+	mutex_unlock(&vmbus_connection.channel_mutex);
 }
 
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+{
+	mutex_lock(&vmbus_connection.channel_mutex);
+
+	BUG_ON(!is_hvsock_channel(channel));
+
+	channel->rescind = true;
+	vmbus_device_unregister(channel->device_obj);
+
+	mutex_unlock(&vmbus_connection.channel_mutex);
+}
+EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+
+
 /*
  * vmbus_onoffers_delivered -
  * This is invoked when all offers have been delivered.
@@ -825,6 +986,10 @@
 	{CHANNELMSG_VERSION_RESPONSE,		1, vmbus_onversion_response},
 	{CHANNELMSG_UNLOAD,			0, NULL},
 	{CHANNELMSG_UNLOAD_RESPONSE,		1, vmbus_unload_response},
+	{CHANNELMSG_18,				0, NULL},
+	{CHANNELMSG_19,				0, NULL},
+	{CHANNELMSG_20,				0, NULL},
+	{CHANNELMSG_TL_CONNECT_REQUEST,		0, NULL},
 };
 
 /*
@@ -973,3 +1138,10 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
+
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+		void (*chn_rescind_cb)(struct vmbus_channel *))
+{
+	channel->chn_rescind_callback = chn_rescind_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3dc5a9c..fa86b2c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -288,7 +288,8 @@
 	struct list_head *cur, *tmp;
 	struct vmbus_channel *cur_sc;
 
-	mutex_lock(&vmbus_connection.channel_mutex);
+	BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+
 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
 		if (channel->offermsg.child_relid == relid) {
 			found_channel = channel;
@@ -307,7 +308,6 @@
 			}
 		}
 	}
-	mutex_unlock(&vmbus_connection.channel_mutex);
 
 	return found_channel;
 }
@@ -474,7 +474,7 @@
 /*
  * vmbus_set_event - Send an event notification to the parent
  */
-int vmbus_set_event(struct vmbus_channel *channel)
+void vmbus_set_event(struct vmbus_channel *channel)
 {
 	u32 child_relid = channel->offermsg.child_relid;
 
@@ -485,5 +485,5 @@
 			(child_relid >> 5));
 	}
 
-	return hv_signal_event(channel->sig_event);
+	hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
 }
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 11bca51..ccb335f 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -295,8 +295,14 @@
 	 * Cleanup the TSC page based CS.
 	 */
 	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
-		clocksource_change_rating(&hyperv_cs_tsc, 10);
-		clocksource_unregister(&hyperv_cs_tsc);
+		/*
+		 * Crash can happen in an interrupt context and unregistering
+		 * a clocksource is impossible and redundant in this case.
+		 */
+		if (!oops_in_progress) {
+			clocksource_change_rating(&hyperv_cs_tsc, 10);
+			clocksource_unregister(&hyperv_cs_tsc);
+		}
 
 		hypercall_msr.as_uint64 = 0;
 		wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
@@ -337,22 +343,6 @@
 	return status & 0xFFFF;
 }
 
-
-/*
- * hv_signal_event -
- * Signal an event on the specified connection using the hypervisor event IPC.
- *
- * This involves a hypercall.
- */
-int hv_signal_event(void *con_id)
-{
-	u64 status;
-
-	status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL);
-
-	return status & 0xFFFF;
-}
-
 static int hv_ce_set_next_event(unsigned long delta,
 				struct clock_event_device *evt)
 {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4ebc796..b9ea7f5 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -501,8 +501,6 @@
 			 enum hv_message_type message_type,
 			 void *payload, size_t payload_size);
 
-extern int hv_signal_event(void *con_id);
-
 extern int hv_synic_alloc(void);
 
 extern void hv_synic_free(void);
@@ -531,7 +529,7 @@
 
 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
 		    struct kvec *kv_list,
-		    u32 kv_count, bool *signal);
+		    u32 kv_count, bool *signal, bool lock);
 
 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
@@ -650,7 +648,7 @@
 
 int vmbus_post_msg(void *buffer, size_t buflen);
 
-int vmbus_set_event(struct vmbus_channel *channel);
+void vmbus_set_event(struct vmbus_channel *channel);
 
 void vmbus_on_event(unsigned long data);
 
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index b53702c..5613e2b 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -314,7 +314,7 @@
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-		    struct kvec *kv_list, u32 kv_count, bool *signal)
+		    struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
 {
 	int i = 0;
 	u32 bytes_avail_towrite;
@@ -324,14 +324,15 @@
 	u32 next_write_location;
 	u32 old_write;
 	u64 prev_indices = 0;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 	for (i = 0; i < kv_count; i++)
 		totalbytes_towrite += kv_list[i].iov_len;
 
 	totalbytes_towrite += sizeof(u64);
 
-	spin_lock_irqsave(&outring_info->ring_lock, flags);
+	if (lock)
+		spin_lock_irqsave(&outring_info->ring_lock, flags);
 
 	hv_get_ringbuffer_availbytes(outring_info,
 				&bytes_avail_toread,
@@ -343,7 +344,8 @@
 	 * is empty since the read index == write index.
 	 */
 	if (bytes_avail_towrite <= totalbytes_towrite) {
-		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+		if (lock)
+			spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 		return -EAGAIN;
 	}
 
@@ -374,7 +376,8 @@
 	hv_set_next_write_location(outring_info, next_write_location);
 
 
-	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+	if (lock)
+		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
 	*signal = hv_need_to_signal(old_write, outring_info);
 	return 0;
@@ -388,7 +391,6 @@
 	u32 bytes_avail_toread;
 	u32 next_read_location = 0;
 	u64 prev_indices = 0;
-	unsigned long flags;
 	struct vmpacket_descriptor desc;
 	u32 offset;
 	u32 packetlen;
@@ -397,7 +399,6 @@
 	if (buflen <= 0)
 		return -EINVAL;
 
-	spin_lock_irqsave(&inring_info->ring_lock, flags);
 
 	*buffer_actual_len = 0;
 	*requestid = 0;
@@ -412,7 +413,7 @@
 		 * No error is set when there is even no header, drivers are
 		 * supposed to analyze buffer_actual_len.
 		 */
-		goto out_unlock;
+		return ret;
 	}
 
 	next_read_location = hv_get_next_read_location(inring_info);
@@ -425,15 +426,11 @@
 	*buffer_actual_len = packetlen;
 	*requestid = desc.trans_id;
 
-	if (bytes_avail_toread < packetlen + offset) {
-		ret = -EAGAIN;
-		goto out_unlock;
-	}
+	if (bytes_avail_toread < packetlen + offset)
+		return -EAGAIN;
 
-	if (packetlen > buflen) {
-		ret = -ENOBUFS;
-		goto out_unlock;
-	}
+	if (packetlen > buflen)
+		return -ENOBUFS;
 
 	next_read_location =
 		hv_get_next_readlocation_withoffset(inring_info, offset);
@@ -460,7 +457,5 @@
 
 	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
 
-out_unlock:
-	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 	return ret;
 }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 328e4c3..063e5f5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -477,6 +477,24 @@
 }
 static DEVICE_ATTR_RO(channel_vp_mapping);
 
+static ssize_t vendor_show(struct device *dev,
+			   struct device_attribute *dev_attr,
+			   char *buf)
+{
+	struct hv_device *hv_dev = device_to_hv_device(dev);
+	return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t device_show(struct device *dev,
+			   struct device_attribute *dev_attr,
+			   char *buf)
+{
+	struct hv_device *hv_dev = device_to_hv_device(dev);
+	return sprintf(buf, "0x%x\n", hv_dev->device_id);
+}
+static DEVICE_ATTR_RO(device);
+
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
 static struct attribute *vmbus_attrs[] = {
 	&dev_attr_id.attr,
@@ -502,6 +520,8 @@
 	&dev_attr_in_read_bytes_avail.attr,
 	&dev_attr_in_write_bytes_avail.attr,
 	&dev_attr_channel_vp_mapping.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_device.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(vmbus);
@@ -562,6 +582,10 @@
 	struct hv_driver *drv = drv_to_hv_drv(driver);
 	struct hv_device *hv_dev = device_to_hv_device(device);
 
+	/* The hv_sock driver handles all hv_sock offers. */
+	if (is_hvsock_channel(hv_dev->channel))
+		return drv->hvsock;
+
 	if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
 		return 1;
 
@@ -957,6 +981,7 @@
 	memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
 	memcpy(&child_device_obj->dev_instance, instance,
 	       sizeof(uuid_le));
+	child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
 
 
 	return child_device_obj;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index c85935f..db05410 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
 menuconfig CORESIGHT
 	bool "CoreSight Tracing Support"
 	select ARM_AMBA
+	select PERF_EVENTS
 	help
 	  This framework provides a kernel interface for the CoreSight debug
 	  and trace drivers to register themselves with. It's intended to build
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 99f8e5f6..cf8c6d6 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -8,6 +8,8 @@
 obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
 obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
 					   coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
+					coresight-etm3x-sysfs.o \
+					coresight-etm-perf.o
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 77d0f9c..acbce79 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -1,5 +1,7 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
+ * Description: CoreSight Embedded Trace Buffer driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -10,8 +12,8 @@
  * GNU General Public License for more details.
  */
 
+#include <asm/local.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -27,6 +29,11 @@
 #include <linux/coresight.h>
 #include <linux/amba/bus.h>
 #include <linux/clk.h>
+#include <linux/circ_buf.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+
+#include <asm/local.h>
 
 #include "coresight-priv.h"
 
@@ -64,6 +71,26 @@
 #define ETB_FRAME_SIZE_WORDS	4
 
 /**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur:	index of the current buffer
+ * @nr_pages:	max number of pages granted to us
+ * @offset:	offset within the current buffer
+ * @data_size:	how much we collected in this run
+ * @lost:	other than zero if we had a HW buffer wrap around
+ * @snapshot:	is this run in snapshot mode
+ * @data_pages:	a handle the ring buffer
+ */
+struct cs_buffers {
+	unsigned int		cur;
+	unsigned int		nr_pages;
+	unsigned long		offset;
+	local_t			data_size;
+	local_t			lost;
+	bool			snapshot;
+	void			**data_pages;
+};
+
+/**
  * struct etb_drvdata - specifics associated to an ETB component
  * @base:	memory mapped base address for this component.
  * @dev:	the device entity associated to this component.
@@ -71,10 +98,10 @@
  * @csdev:	component vitals needed by the framework.
  * @miscdev:	specifics to handle "/dev/xyz.etb" entry.
  * @spinlock:	only one at a time pls.
- * @in_use:	synchronise user space access to etb buffer.
+ * @reading:	synchronise user space access to etb buffer.
+ * @mode:	this ETB is being used.
  * @buf:	area of memory where ETB buffer content gets sent.
  * @buffer_depth: size of @buf.
- * @enable:	this ETB is being used.
  * @trigger_cntr: amount of words to store after a trigger.
  */
 struct etb_drvdata {
@@ -84,10 +111,10 @@
 	struct coresight_device	*csdev;
 	struct miscdevice	miscdev;
 	spinlock_t		spinlock;
-	atomic_t		in_use;
+	local_t			reading;
+	local_t			mode;
 	u8			*buf;
 	u32			buffer_depth;
-	bool			enable;
 	u32			trigger_cntr;
 };
 
@@ -132,18 +159,31 @@
 	CS_LOCK(drvdata->base);
 }
 
-static int etb_enable(struct coresight_device *csdev)
+static int etb_enable(struct coresight_device *csdev, u32 mode)
 {
-	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+	u32 val;
 	unsigned long flags;
+	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
+	val = local_cmpxchg(&drvdata->mode,
+			    CS_MODE_DISABLED, mode);
+	/*
+	 * When accessing from Perf, a HW buffer can be handled
+	 * by a single trace entity.  In sysFS mode many tracers
+	 * can be logging to the same HW buffer.
+	 */
+	if (val == CS_MODE_PERF)
+		return -EBUSY;
+
+	/* Nothing to do, the tracer is already enabled. */
+	if (val == CS_MODE_SYSFS)
+		goto out;
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	etb_enable_hw(drvdata);
-	drvdata->enable = true;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+out:
 	dev_info(drvdata->dev, "ETB enabled\n");
 	return 0;
 }
@@ -244,17 +284,225 @@
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	etb_disable_hw(drvdata);
 	etb_dump_hw(drvdata);
-	drvdata->enable = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	pm_runtime_put(drvdata->dev);
+	local_set(&drvdata->mode, CS_MODE_DISABLED);
 
 	dev_info(drvdata->dev, "ETB disabled\n");
 }
 
+static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
+			      void **pages, int nr_pages, bool overwrite)
+{
+	int node;
+	struct cs_buffers *buf;
+
+	if (cpu == -1)
+		cpu = smp_processor_id();
+	node = cpu_to_node(cpu);
+
+	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+	if (!buf)
+		return NULL;
+
+	buf->snapshot = overwrite;
+	buf->nr_pages = nr_pages;
+	buf->data_pages = pages;
+
+	return buf;
+}
+
+static void etb_free_buffer(void *config)
+{
+	struct cs_buffers *buf = config;
+
+	kfree(buf);
+}
+
+static int etb_set_buffer(struct coresight_device *csdev,
+			  struct perf_output_handle *handle,
+			  void *sink_config)
+{
+	int ret = 0;
+	unsigned long head;
+	struct cs_buffers *buf = sink_config;
+
+	/* wrap head around to the amount of space we have */
+	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+	/* find the page to write to */
+	buf->cur = head / PAGE_SIZE;
+
+	/* and offset within that page */
+	buf->offset = head % PAGE_SIZE;
+
+	local_set(&buf->data_size, 0);
+
+	return ret;
+}
+
+static unsigned long etb_reset_buffer(struct coresight_device *csdev,
+				      struct perf_output_handle *handle,
+				      void *sink_config, bool *lost)
+{
+	unsigned long size = 0;
+	struct cs_buffers *buf = sink_config;
+
+	if (buf) {
+		/*
+		 * In snapshot mode ->data_size holds the new address of the
+		 * ring buffer's head.  The size itself is the whole address
+		 * range since we want the latest information.
+		 */
+		if (buf->snapshot)
+			handle->head = local_xchg(&buf->data_size,
+						  buf->nr_pages << PAGE_SHIFT);
+
+		/*
+		 * Tell the tracer PMU how much we got in this run and if
+		 * something went wrong along the way.  Nobody else can use
+		 * this cs_buffers instance until we are done.  As such
+		 * resetting parameters here and squaring off with the ring
+		 * buffer API in the tracer PMU is fine.
+		 */
+		*lost = !!local_xchg(&buf->lost, 0);
+		size = local_xchg(&buf->data_size, 0);
+	}
+
+	return size;
+}
+
+static void etb_update_buffer(struct coresight_device *csdev,
+			      struct perf_output_handle *handle,
+			      void *sink_config)
+{
+	int i, cur;
+	u8 *buf_ptr;
+	u32 read_ptr, write_ptr, capacity;
+	u32 status, read_data, to_read;
+	unsigned long offset;
+	struct cs_buffers *buf = sink_config;
+	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	if (!buf)
+		return;
+
+	capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+
+	CS_UNLOCK(drvdata->base);
+	etb_disable_hw(drvdata);
+
+	/* unit is in words, not bytes */
+	read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+	write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+	/*
+	 * Entries should be aligned to the frame size.  If they are not
+	 * go back to the last alignement point to give decoding tools a
+	 * chance to fix things.
+	 */
+	if (write_ptr % ETB_FRAME_SIZE_WORDS) {
+		dev_err(drvdata->dev,
+			"write_ptr: %lu not aligned to formatter frame size\n",
+			(unsigned long)write_ptr);
+
+		write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
+		local_inc(&buf->lost);
+	}
+
+	/*
+	 * Get a hold of the status register and see if a wrap around
+	 * has occurred.  If so adjust things accordingly.  Otherwise
+	 * start at the beginning and go until the write pointer has
+	 * been reached.
+	 */
+	status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+	if (status & ETB_STATUS_RAM_FULL) {
+		local_inc(&buf->lost);
+		to_read = capacity;
+		read_ptr = write_ptr;
+	} else {
+		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
+		to_read *= ETB_FRAME_SIZE_WORDS;
+	}
+
+	/*
+	 * Make sure we don't overwrite data that hasn't been consumed yet.
+	 * It is entirely possible that the HW buffer has more data than the
+	 * ring buffer can currently handle.  If so adjust the start address
+	 * to take only the last traces.
+	 *
+	 * In snapshot mode we are looking to get the latest traces only and as
+	 * such, we don't care about not overwriting data that hasn't been
+	 * processed by user space.
+	 */
+	if (!buf->snapshot && to_read > handle->size) {
+		u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
+
+		/* The new read pointer must be frame size aligned */
+		to_read -= handle->size & mask;
+		/*
+		 * Move the RAM read pointer up, keeping in mind that
+		 * everything is in frame size units.
+		 */
+		read_ptr = (write_ptr + drvdata->buffer_depth) -
+					to_read / ETB_FRAME_SIZE_WORDS;
+		/* Wrap around if need be*/
+		read_ptr &= ~(drvdata->buffer_depth - 1);
+		/* let the decoder know we've skipped ahead */
+		local_inc(&buf->lost);
+	}
+
+	/* finally tell HW where we want to start reading from */
+	writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+	cur = buf->cur;
+	offset = buf->offset;
+	for (i = 0; i < to_read; i += 4) {
+		buf_ptr = buf->data_pages[cur] + offset;
+		read_data = readl_relaxed(drvdata->base +
+					  ETB_RAM_READ_DATA_REG);
+		*buf_ptr++ = read_data >> 0;
+		*buf_ptr++ = read_data >> 8;
+		*buf_ptr++ = read_data >> 16;
+		*buf_ptr++ = read_data >> 24;
+
+		offset += 4;
+		if (offset >= PAGE_SIZE) {
+			offset = 0;
+			cur++;
+			/* wrap around at the end of the buffer */
+			cur &= buf->nr_pages - 1;
+		}
+	}
+
+	/* reset ETB buffer for next run */
+	writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+	writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+
+	/*
+	 * In snapshot mode all we have to do is communicate to
+	 * perf_aux_output_end() the address of the current head.  In full
+	 * trace mode the same function expects a size to move rb->aux_head
+	 * forward.
+	 */
+	if (buf->snapshot)
+		local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+	else
+		local_add(to_read, &buf->data_size);
+
+	etb_enable_hw(drvdata);
+	CS_LOCK(drvdata->base);
+}
+
 static const struct coresight_ops_sink etb_sink_ops = {
 	.enable		= etb_enable,
 	.disable	= etb_disable,
+	.alloc_buffer	= etb_alloc_buffer,
+	.free_buffer	= etb_free_buffer,
+	.set_buffer	= etb_set_buffer,
+	.reset_buffer	= etb_reset_buffer,
+	.update_buffer	= etb_update_buffer,
 };
 
 static const struct coresight_ops etb_cs_ops = {
@@ -266,7 +514,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
-	if (drvdata->enable) {
+	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
 		etb_disable_hw(drvdata);
 		etb_dump_hw(drvdata);
 		etb_enable_hw(drvdata);
@@ -281,7 +529,7 @@
 	struct etb_drvdata *drvdata = container_of(file->private_data,
 						   struct etb_drvdata, miscdev);
 
-	if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+	if (local_cmpxchg(&drvdata->reading, 0, 1))
 		return -EBUSY;
 
 	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -317,7 +565,7 @@
 {
 	struct etb_drvdata *drvdata = container_of(file->private_data,
 						   struct etb_drvdata, miscdev);
-	atomic_set(&drvdata->in_use, 0);
+	local_set(&drvdata->reading, 0);
 
 	dev_dbg(drvdata->dev, "%s: released\n", __func__);
 	return 0;
@@ -489,15 +737,6 @@
 	return ret;
 }
 
-static int etb_remove(struct amba_device *adev)
-{
-	struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
-	misc_deregister(&drvdata->miscdev);
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int etb_runtime_suspend(struct device *dev)
 {
@@ -537,14 +776,10 @@
 		.name	= "coresight-etb10",
 		.owner	= THIS_MODULE,
 		.pm	= &etb_dev_pm_ops,
+		.suppress_bind_attrs = true,
 
 	},
 	.probe		= etb_probe,
-	.remove		= etb_remove,
 	.id_table	= etb_ids,
 };
-
-module_amba_driver(etb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
+builtin_amba_driver(etb_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
new file mode 100644
index 0000000..36153a7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "coresight-priv.h"
+
+static struct pmu etm_pmu;
+static bool etm_perf_up;
+
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work:		Handle to free allocated memory outside IRQ context.
+ * @mask:		Hold the CPU(s) this event was set for.
+ * @snk_config:		The sink configuration.
+ * @path:		An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+	struct work_struct work;
+	cpumask_t mask;
+	void *snk_config;
+	struct list_head **path;
+};
+
+static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
+
+/* ETMv3.5/PTM's ETMCR is 'config' */
+PMU_FORMAT_ATTR(cycacc,		"config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(timestamp,	"config:" __stringify(ETM_OPT_TS));
+
+static struct attribute *etm_config_formats_attr[] = {
+	&format_attr_cycacc.attr,
+	&format_attr_timestamp.attr,
+	NULL,
+};
+
+static struct attribute_group etm_pmu_format_group = {
+	.name   = "format",
+	.attrs  = etm_config_formats_attr,
+};
+
+static const struct attribute_group *etm_pmu_attr_groups[] = {
+	&etm_pmu_format_group,
+	NULL,
+};
+
+static void etm_event_read(struct perf_event *event) {}
+
+static int etm_event_init(struct perf_event *event)
+{
+	if (event->attr.type != etm_pmu.type)
+		return -ENOENT;
+
+	return 0;
+}
+
+static void free_event_data(struct work_struct *work)
+{
+	int cpu;
+	cpumask_t *mask;
+	struct etm_event_data *event_data;
+	struct coresight_device *sink;
+
+	event_data = container_of(work, struct etm_event_data, work);
+	mask = &event_data->mask;
+	/*
+	 * First deal with the sink configuration.  See comment in
+	 * etm_setup_aux() about why we take the first available path.
+	 */
+	if (event_data->snk_config) {
+		cpu = cpumask_first(mask);
+		sink = coresight_get_sink(event_data->path[cpu]);
+		if (sink_ops(sink)->free_buffer)
+			sink_ops(sink)->free_buffer(event_data->snk_config);
+	}
+
+	for_each_cpu(cpu, mask) {
+		if (event_data->path[cpu])
+			coresight_release_path(event_data->path[cpu]);
+	}
+
+	kfree(event_data->path);
+	kfree(event_data);
+}
+
+static void *alloc_event_data(int cpu)
+{
+	int size;
+	cpumask_t *mask;
+	struct etm_event_data *event_data;
+
+	/* First get memory for the session's data */
+	event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
+	if (!event_data)
+		return NULL;
+
+	/* Make sure nothing disappears under us */
+	get_online_cpus();
+	size = num_online_cpus();
+
+	mask = &event_data->mask;
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, mask);
+	else
+		cpumask_copy(mask, cpu_online_mask);
+	put_online_cpus();
+
+	/*
+	 * Each CPU has a single path between source and destination.  As such
+	 * allocate an array using CPU numbers as indexes.  That way a path
+	 * for any CPU can easily be accessed at any given time.  We proceed
+	 * the same way for sessions involving a single CPU.  The cost of
+	 * unused memory when dealing with single CPU trace scenarios is small
+	 * compared to the cost of searching through an optimized array.
+	 */
+	event_data->path = kcalloc(size,
+				   sizeof(struct list_head *), GFP_KERNEL);
+	if (!event_data->path) {
+		kfree(event_data);
+		return NULL;
+	}
+
+	return event_data;
+}
+
+static void etm_free_aux(void *data)
+{
+	struct etm_event_data *event_data = data;
+
+	schedule_work(&event_data->work);
+}
+
+static void *etm_setup_aux(int event_cpu, void **pages,
+			   int nr_pages, bool overwrite)
+{
+	int cpu;
+	cpumask_t *mask;
+	struct coresight_device *sink;
+	struct etm_event_data *event_data = NULL;
+
+	event_data = alloc_event_data(event_cpu);
+	if (!event_data)
+		return NULL;
+
+	INIT_WORK(&event_data->work, free_event_data);
+
+	mask = &event_data->mask;
+
+	/* Setup the path for each CPU in a trace session */
+	for_each_cpu(cpu, mask) {
+		struct coresight_device *csdev;
+
+		csdev = per_cpu(csdev_src, cpu);
+		if (!csdev)
+			goto err;
+
+		/*
+		 * Building a path doesn't enable it, it simply builds a
+		 * list of devices from source to sink that can be
+		 * referenced later when the path is actually needed.
+		 */
+		event_data->path[cpu] = coresight_build_path(csdev);
+		if (!event_data->path[cpu])
+			goto err;
+	}
+
+	/*
+	 * In theory nothing prevent tracers in a trace session from being
+	 * associated with different sinks, nor having a sink per tracer.  But
+	 * until we have HW with this kind of topology and a way to convey
+	 * sink assignement from the perf cmd line we need to assume tracers
+	 * in a trace session are using the same sink.  Therefore pick the sink
+	 * found at the end of the first available path.
+	 */
+	cpu = cpumask_first(mask);
+	/* Grab the sink at the end of the path */
+	sink = coresight_get_sink(event_data->path[cpu]);
+	if (!sink)
+		goto err;
+
+	if (!sink_ops(sink)->alloc_buffer)
+		goto err;
+
+	/* Get the AUX specific data from the sink buffer */
+	event_data->snk_config =
+			sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+						     nr_pages, overwrite);
+	if (!event_data->snk_config)
+		goto err;
+
+out:
+	return event_data;
+
+err:
+	etm_free_aux(event_data);
+	event_data = NULL;
+	goto out;
+}
+
+static void etm_event_start(struct perf_event *event, int flags)
+{
+	int cpu = smp_processor_id();
+	struct etm_event_data *event_data;
+	struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+
+	if (!csdev)
+		goto fail;
+
+	/*
+	 * Deal with the ring buffer API and get a handle on the
+	 * session's information.
+	 */
+	event_data = perf_aux_output_begin(handle, event);
+	if (!event_data)
+		goto fail;
+
+	/* We need a sink, no need to continue without one */
+	sink = coresight_get_sink(event_data->path[cpu]);
+	if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
+		goto fail_end_stop;
+
+	/* Configure the sink */
+	if (sink_ops(sink)->set_buffer(sink, handle,
+				       event_data->snk_config))
+		goto fail_end_stop;
+
+	/* Nothing will happen without a path */
+	if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+		goto fail_end_stop;
+
+	/* Tell the perf core the event is alive */
+	event->hw.state = 0;
+
+	/* Finally enable the tracer */
+	if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+		goto fail_end_stop;
+
+out:
+	return;
+
+fail_end_stop:
+	perf_aux_output_end(handle, 0, true);
+fail:
+	event->hw.state = PERF_HES_STOPPED;
+	goto out;
+}
+
+static void etm_event_stop(struct perf_event *event, int mode)
+{
+	bool lost;
+	int cpu = smp_processor_id();
+	unsigned long size;
+	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+	struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+	struct etm_event_data *event_data = perf_get_aux(handle);
+
+	if (event->hw.state == PERF_HES_STOPPED)
+		return;
+
+	if (!csdev)
+		return;
+
+	sink = coresight_get_sink(event_data->path[cpu]);
+	if (!sink)
+		return;
+
+	/* stop tracer */
+	source_ops(csdev)->disable(csdev);
+
+	/* tell the core */
+	event->hw.state = PERF_HES_STOPPED;
+
+	if (mode & PERF_EF_UPDATE) {
+		if (WARN_ON_ONCE(handle->event != event))
+			return;
+
+		/* update trace information */
+		if (!sink_ops(sink)->update_buffer)
+			return;
+
+		sink_ops(sink)->update_buffer(sink, handle,
+					      event_data->snk_config);
+
+		if (!sink_ops(sink)->reset_buffer)
+			return;
+
+		size = sink_ops(sink)->reset_buffer(sink, handle,
+						    event_data->snk_config,
+						    &lost);
+
+		perf_aux_output_end(handle, size, lost);
+	}
+
+	/* Disabling the path make its elements available to other sessions */
+	coresight_disable_path(event_data->path[cpu]);
+}
+
+static int etm_event_add(struct perf_event *event, int mode)
+{
+	int ret = 0;
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (mode & PERF_EF_START) {
+		etm_event_start(event, 0);
+		if (hwc->state & PERF_HES_STOPPED)
+			ret = -EINVAL;
+	} else {
+		hwc->state = PERF_HES_STOPPED;
+	}
+
+	return ret;
+}
+
+static void etm_event_del(struct perf_event *event, int mode)
+{
+	etm_event_stop(event, PERF_EF_UPDATE);
+}
+
+int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{
+	char entry[sizeof("cpu9999999")];
+	int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
+	struct device *pmu_dev = etm_pmu.dev;
+	struct device *cs_dev = &csdev->dev;
+
+	sprintf(entry, "cpu%d", cpu);
+
+	if (!etm_perf_up)
+		return -EPROBE_DEFER;
+
+	if (link) {
+		ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
+		if (ret)
+			return ret;
+		per_cpu(csdev_src, cpu) = csdev;
+	} else {
+		sysfs_remove_link(&pmu_dev->kobj, entry);
+		per_cpu(csdev_src, cpu) = NULL;
+	}
+
+	return 0;
+}
+
+static int __init etm_perf_init(void)
+{
+	int ret;
+
+	etm_pmu.capabilities	= PERF_PMU_CAP_EXCLUSIVE;
+
+	etm_pmu.attr_groups	= etm_pmu_attr_groups;
+	etm_pmu.task_ctx_nr	= perf_sw_context;
+	etm_pmu.read		= etm_event_read;
+	etm_pmu.event_init	= etm_event_init;
+	etm_pmu.setup_aux	= etm_setup_aux;
+	etm_pmu.free_aux	= etm_free_aux;
+	etm_pmu.start		= etm_event_start;
+	etm_pmu.stop		= etm_event_stop;
+	etm_pmu.add		= etm_event_add;
+	etm_pmu.del		= etm_event_del;
+
+	ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+	if (ret == 0)
+		etm_perf_up = true;
+
+	return ret;
+}
+module_init(etm_perf_init);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
new file mode 100644
index 0000000..87f5a13
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_ETM_PERF_H
+#define _CORESIGHT_ETM_PERF_H
+
+struct coresight_device;
+
+#ifdef CONFIG_CORESIGHT
+int etm_perf_symlink(struct coresight_device *csdev, bool link);
+
+#else
+static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{ return -EINVAL; }
+
+#endif /* CONFIG_CORESIGHT */
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index b4481eb..51597cb 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -13,6 +13,7 @@
 #ifndef _CORESIGHT_CORESIGHT_ETM_H
 #define _CORESIGHT_CORESIGHT_ETM_H
 
+#include <asm/local.h>
 #include <linux/spinlock.h>
 #include "coresight-priv.h"
 
@@ -109,7 +110,10 @@
 #define ETM_MODE_STALL		BIT(2)
 #define ETM_MODE_TIMESTAMP	BIT(3)
 #define ETM_MODE_CTXID		BIT(4)
-#define ETM_MODE_ALL		0x1f
+#define ETM_MODE_ALL		(ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
+				 ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+				 ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
+				 ETM_MODE_EXCL_USER)
 
 #define ETM_SQR_MASK		0x3
 #define ETM_TRACEID_MASK	0x3f
@@ -136,35 +140,16 @@
 #define ETM_DEFAULT_EVENT_VAL	(ETM_HARD_WIRE_RES_A	|	\
 				 ETM_ADD_COMP_0		|	\
 				 ETM_EVENT_NOT_A)
+
 /**
- * struct etm_drvdata - specifics associated to an ETM component
- * @base:	memory mapped base address for this component.
- * @dev:	the device entity associated to this component.
- * @atclk:	optional clock for the core parts of the ETM.
- * @csdev:	component vitals needed by the framework.
- * @spinlock:	only one at a time pls.
- * @cpu:	the cpu this component is affined to.
- * @port_size:	port size as reported by ETMCR bit 4-6 and 21.
- * @arch:	ETM/PTM version number.
- * @use_cpu14:	true if management registers need to be accessed via CP14.
- * @enable:	is this ETM/PTM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:true if we should start tracing at boot time.
- * @os_unlock:	true if access to management registers is allowed.
- * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
- * @nr_cntr:	Number of counters as found in ETMCCR bit 13-15.
- * @nr_ext_inp:	Number of external input as found in ETMCCR bit 17-19.
- * @nr_ext_out:	Number of external output as found in ETMCCR bit 20-22.
- * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
- * @etmccr:	value of register ETMCCR.
- * @etmccer:	value of register ETMCCER.
- * @traceid:	value of the current ID for this component.
+ * struct etm_config - configuration information related to an ETM
  * @mode:	controls various modes supported by this ETM/PTM.
  * @ctrl:	used in conjunction with @mode.
  * @trigger_event: setting for register ETMTRIGGER.
  * @startstop_ctrl: setting for register ETMTSSCR.
  * @enable_event: setting for register ETMTEEVR.
  * @enable_ctrl1: setting for register ETMTECR1.
+ * @enable_ctrl2: setting for register ETMTECR2.
  * @fifofull_level: setting for register ETMFFLR.
  * @addr_idx:	index for the address comparator selection.
  * @addr_val:	value for address comparator register.
@@ -189,36 +174,16 @@
  * @ctxid_mask: mask applicable to all the context IDs.
  * @sync_freq:	Synchronisation frequency.
  * @timestamp_event: Defines an event that requests the insertion
-		     of a timestamp into the trace stream.
+ *		     of a timestamp into the trace stream.
  */
-struct etm_drvdata {
-	void __iomem			*base;
-	struct device			*dev;
-	struct clk			*atclk;
-	struct coresight_device		*csdev;
-	spinlock_t			spinlock;
-	int				cpu;
-	int				port_size;
-	u8				arch;
-	bool				use_cp14;
-	bool				enable;
-	bool				sticky_enable;
-	bool				boot_enable;
-	bool				os_unlock;
-	u8				nr_addr_cmp;
-	u8				nr_cntr;
-	u8				nr_ext_inp;
-	u8				nr_ext_out;
-	u8				nr_ctxid_cmp;
-	u32				etmccr;
-	u32				etmccer;
-	u32				traceid;
+struct etm_config {
 	u32				mode;
 	u32				ctrl;
 	u32				trigger_event;
 	u32				startstop_ctrl;
 	u32				enable_event;
 	u32				enable_ctrl1;
+	u32				enable_ctrl2;
 	u32				fifofull_level;
 	u8				addr_idx;
 	u32				addr_val[ETM_MAX_ADDR_CMP];
@@ -244,6 +209,56 @@
 	u32				timestamp_event;
 };
 
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base:	memory mapped base address for this component.
+ * @dev:	the device entity associated to this component.
+ * @atclk:	optional clock for the core parts of the ETM.
+ * @csdev:	component vitals needed by the framework.
+ * @spinlock:	only one at a time pls.
+ * @cpu:	the cpu this component is affined to.
+ * @port_size:	port size as reported by ETMCR bit 4-6 and 21.
+ * @arch:	ETM/PTM version number.
+ * @use_cpu14:	true if management registers need to be accessed via CP14.
+ * @mode:	this tracer's mode, i.e sysFS, Perf or disabled.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock:	true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr:	Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp:	Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out:	Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr:	value of register ETMCCR.
+ * @etmccer:	value of register ETMCCER.
+ * @traceid:	value of the current ID for this component.
+ * @config:	structure holding configuration parameters.
+ */
+struct etm_drvdata {
+	void __iomem			*base;
+	struct device			*dev;
+	struct clk			*atclk;
+	struct coresight_device		*csdev;
+	spinlock_t			spinlock;
+	int				cpu;
+	int				port_size;
+	u8				arch;
+	bool				use_cp14;
+	local_t				mode;
+	bool				sticky_enable;
+	bool				boot_enable;
+	bool				os_unlock;
+	u8				nr_addr_cmp;
+	u8				nr_cntr;
+	u8				nr_ext_inp;
+	u8				nr_ext_out;
+	u8				nr_ctxid_cmp;
+	u32				etmccr;
+	u32				etmccer;
+	u32				traceid;
+	struct etm_config		config;
+};
+
 enum etm_addr_type {
 	ETM_ADDR_TYPE_NONE,
 	ETM_ADDR_TYPE_SINGLE,
@@ -251,4 +266,39 @@
 	ETM_ADDR_TYPE_START,
 	ETM_ADDR_TYPE_STOP,
 };
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+			      u32 val, u32 off)
+{
+	if (drvdata->use_cp14) {
+		if (etm_writel_cp14(off, val)) {
+			dev_err(drvdata->dev,
+				"invalid CP14 access to ETM reg: %#x", off);
+		}
+	} else {
+		writel_relaxed(val, drvdata->base + off);
+	}
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+	u32 val;
+
+	if (drvdata->use_cp14) {
+		if (etm_readl_cp14(off, &val)) {
+			dev_err(drvdata->dev,
+				"invalid CP14 access to ETM reg: %#x", off);
+		}
+	} else {
+		val = readl_relaxed(drvdata->base + off);
+	}
+
+	return val;
+}
+
+extern const struct attribute_group *coresight_etm_groups[];
+int etm_get_trace_id(struct etm_drvdata *drvdata);
+void etm_set_default(struct etm_config *config);
+void etm_config_trace_mode(struct etm_config *config);
+struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
 #endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
new file mode 100644
index 0000000..cbb4046
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -0,0 +1,1272 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm.h"
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->nr_addr_cmp;
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->nr_cntr;
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->nr_ctxid_cmp;
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	unsigned long flags, val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	pm_runtime_get_sync(drvdata->dev);
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	CS_UNLOCK(drvdata->base);
+
+	val = etm_readl(drvdata, ETMSR);
+
+	CS_LOCK(drvdata->base);
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	pm_runtime_put(drvdata->dev);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+			   struct device_attribute *attr,
+			   const char *buf, size_t size)
+{
+	int i, ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val) {
+		spin_lock(&drvdata->spinlock);
+		memset(config, 0, sizeof(struct etm_config));
+		config->mode = ETM_MODE_EXCLUDE;
+		config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+			config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+		}
+
+		etm_set_default(config);
+		spin_unlock(&drvdata->spinlock);
+	}
+
+	return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->mode;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->mode = val & ETM_MODE_ALL;
+
+	if (config->mode & ETM_MODE_EXCLUDE)
+		config->enable_ctrl1 |= ETMTECR1_INC_EXC;
+	else
+		config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+	if (config->mode & ETM_MODE_CYCACC)
+		config->ctrl |= ETMCR_CYC_ACC;
+	else
+		config->ctrl &= ~ETMCR_CYC_ACC;
+
+	if (config->mode & ETM_MODE_STALL) {
+		if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+			dev_warn(drvdata->dev, "stall mode not supported\n");
+			ret = -EINVAL;
+			goto err_unlock;
+		}
+		config->ctrl |= ETMCR_STALL_MODE;
+	 } else
+		config->ctrl &= ~ETMCR_STALL_MODE;
+
+	if (config->mode & ETM_MODE_TIMESTAMP) {
+		if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+			dev_warn(drvdata->dev, "timestamp not supported\n");
+			ret = -EINVAL;
+			goto err_unlock;
+		}
+		config->ctrl |= ETMCR_TIMESTAMP_EN;
+	} else
+		config->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+	if (config->mode & ETM_MODE_CTXID)
+		config->ctrl |= ETMCR_CTXID_SIZE;
+	else
+		config->ctrl &= ~ETMCR_CTXID_SIZE;
+
+	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+		etm_config_trace_mode(config);
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+
+err_unlock:
+	spin_unlock(&drvdata->spinlock);
+	return ret;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->trigger_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->trigger_event = val & ETM_EVENT_MASK;
+
+	return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->enable_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->enable_event = val & ETM_EVENT_MASK;
+
+	return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->fifofull_level;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->fifofull_level = val;
+
+	return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->addr_idx;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val >= drvdata->nr_addr_cmp)
+		return -EINVAL;
+
+	/*
+	 * Use spinlock to ensure index doesn't change while it gets
+	 * dereferenced multiple times within a spinlock block elsewhere.
+	 */
+	spin_lock(&drvdata->spinlock);
+	config->addr_idx = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EINVAL;
+	}
+
+	val = config->addr_val[idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	u8 idx;
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EINVAL;
+	}
+
+	config->addr_val[idx] = val;
+	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val1, val2;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val1 = config->addr_val[idx];
+	val2 = config->addr_val[idx + 1];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	u8 idx;
+	unsigned long val1, val2;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+	/* Lower address comparator cannot have a higher address value */
+	if (val1 > val2)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	config->addr_val[idx] = val1;
+	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+	config->addr_val[idx + 1] = val2;
+	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+	config->enable_ctrl1 |= (1 << (idx/2));
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val = config->addr_val[idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	u8 idx;
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	config->addr_val[idx] = val;
+	config->addr_type[idx] = ETM_ADDR_TYPE_START;
+	config->startstop_ctrl |= (1 << idx);
+	config->enable_ctrl1 |= BIT(25);
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val = config->addr_val[idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	u8 idx;
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	config->addr_val[idx] = val;
+	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+	config->startstop_ctrl |= (1 << (idx + 16));
+	config->enable_ctrl1 |= ETMTECR1_START_STOP;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->addr_acctype[config->addr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->addr_acctype[config->addr_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->cntr_idx;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val >= drvdata->nr_cntr)
+		return -EINVAL;
+	/*
+	 * Use spinlock to ensure index doesn't change while it gets
+	 * dereferenced multiple times within a spinlock block elsewhere.
+	 */
+	spin_lock(&drvdata->spinlock);
+	config->cntr_idx = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->cntr_rld_val[config->cntr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_rld_val[config->cntr_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->cntr_event[config->cntr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->cntr_rld_event[config->cntr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	int i, ret = 0;
+	u32 val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	if (!local_read(&drvdata->mode)) {
+		spin_lock(&drvdata->spinlock);
+		for (i = 0; i < drvdata->nr_cntr; i++)
+			ret += sprintf(buf, "counter %d: %x\n",
+				       i, config->cntr_val[i]);
+		spin_unlock(&drvdata->spinlock);
+		return ret;
+	}
+
+	for (i = 0; i < drvdata->nr_cntr; i++) {
+		val = etm_readl(drvdata, ETMCNTVRn(i));
+		ret += sprintf(buf, "counter %d: %x\n", i, val);
+	}
+
+	return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_val[config->cntr_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_12_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_12_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_21_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_21_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_23_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_23_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_31_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_31_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_32_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_32_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_13_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_13_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val, flags;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	if (!local_read(&drvdata->mode)) {
+		val = config->seq_curr_state;
+		goto out;
+	}
+
+	pm_runtime_get_sync(drvdata->dev);
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+
+	CS_UNLOCK(drvdata->base);
+	val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+	CS_LOCK(drvdata->base);
+
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	pm_runtime_put(drvdata->dev);
+out:
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val > ETM_SEQ_STATE_MAX_VAL)
+		return -EINVAL;
+
+	config->seq_curr_state = val;
+
+	return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->ctxid_idx;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val >= drvdata->nr_ctxid_cmp)
+		return -EINVAL;
+
+	/*
+	 * Use spinlock to ensure index doesn't change while it gets
+	 * dereferenced multiple times within a spinlock block elsewhere.
+	 */
+	spin_lock(&drvdata->spinlock);
+	config->ctxid_idx = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->ctxid_vpid[config->ctxid_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	int ret;
+	unsigned long vpid, pid;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &vpid);
+	if (ret)
+		return ret;
+
+	pid = coresight_vpid_to_pid(vpid);
+
+	spin_lock(&drvdata->spinlock);
+	config->ctxid_pid[config->ctxid_idx] = pid;
+	config->ctxid_vpid[config->ctxid_idx] = vpid;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->ctxid_mask;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->ctxid_mask = val;
+	return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->sync_freq;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->sync_freq = val & ETM_SYNC_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->timestamp_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->timestamp_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t cpu_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->cpu;
+	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static ssize_t traceid_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = etm_get_trace_id(drvdata);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	drvdata->traceid = val & ETM_TRACEID_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+	&dev_attr_nr_addr_cmp.attr,
+	&dev_attr_nr_cntr.attr,
+	&dev_attr_nr_ctxid_cmp.attr,
+	&dev_attr_etmsr.attr,
+	&dev_attr_reset.attr,
+	&dev_attr_mode.attr,
+	&dev_attr_trigger_event.attr,
+	&dev_attr_enable_event.attr,
+	&dev_attr_fifofull_level.attr,
+	&dev_attr_addr_idx.attr,
+	&dev_attr_addr_single.attr,
+	&dev_attr_addr_range.attr,
+	&dev_attr_addr_start.attr,
+	&dev_attr_addr_stop.attr,
+	&dev_attr_addr_acctype.attr,
+	&dev_attr_cntr_idx.attr,
+	&dev_attr_cntr_rld_val.attr,
+	&dev_attr_cntr_event.attr,
+	&dev_attr_cntr_rld_event.attr,
+	&dev_attr_cntr_val.attr,
+	&dev_attr_seq_12_event.attr,
+	&dev_attr_seq_21_event.attr,
+	&dev_attr_seq_23_event.attr,
+	&dev_attr_seq_31_event.attr,
+	&dev_attr_seq_32_event.attr,
+	&dev_attr_seq_13_event.attr,
+	&dev_attr_seq_curr_state.attr,
+	&dev_attr_ctxid_idx.attr,
+	&dev_attr_ctxid_pid.attr,
+	&dev_attr_ctxid_mask.attr,
+	&dev_attr_sync_freq.attr,
+	&dev_attr_timestamp_event.attr,
+	&dev_attr_traceid.attr,
+	&dev_attr_cpu.attr,
+	NULL,
+};
+
+#define coresight_simple_func(name, offset)                             \
+static ssize_t name##_show(struct device *_dev,                         \
+			   struct device_attribute *attr, char *buf)    \
+{                                                                       \
+	struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
+			 readl_relaxed(drvdata->base + offset));        \
+}                                                                       \
+DEVICE_ATTR_RO(name)
+
+coresight_simple_func(etmccr, ETMCCR);
+coresight_simple_func(etmccer, ETMCCER);
+coresight_simple_func(etmscr, ETMSCR);
+coresight_simple_func(etmidr, ETMIDR);
+coresight_simple_func(etmcr, ETMCR);
+coresight_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_simple_func(etmteevr, ETMTEEVR);
+coresight_simple_func(etmtssvr, ETMTSSCR);
+coresight_simple_func(etmtecr1, ETMTECR1);
+coresight_simple_func(etmtecr2, ETMTECR2);
+
+static struct attribute *coresight_etm_mgmt_attrs[] = {
+	&dev_attr_etmccr.attr,
+	&dev_attr_etmccer.attr,
+	&dev_attr_etmscr.attr,
+	&dev_attr_etmidr.attr,
+	&dev_attr_etmcr.attr,
+	&dev_attr_etmtraceidr.attr,
+	&dev_attr_etmteevr.attr,
+	&dev_attr_etmtssvr.attr,
+	&dev_attr_etmtecr1.attr,
+	&dev_attr_etmtecr2.attr,
+	NULL,
+};
+
+static const struct attribute_group coresight_etm_group = {
+	.attrs = coresight_etm_attrs,
+};
+
+static const struct attribute_group coresight_etm_mgmt_group = {
+	.attrs = coresight_etm_mgmt_attrs,
+	.name = "mgmt",
+};
+
+const struct attribute_group *coresight_etm_groups[] = {
+	&coresight_etm_group,
+	&coresight_etm_mgmt_group,
+	NULL,
+};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d630b7e..d83ab82 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -1,5 +1,7 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
+ * Description: CoreSight Program Flow Trace driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -27,14 +29,21 @@
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm.h"
+#include "coresight-etm-perf.h"
 
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
 static int boot_enable;
 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 
@@ -42,45 +51,16 @@
 static int etm_count;
 static struct etm_drvdata *etmdrvdata[NR_CPUS];
 
-static inline void etm_writel(struct etm_drvdata *drvdata,
-			      u32 val, u32 off)
-{
-	if (drvdata->use_cp14) {
-		if (etm_writel_cp14(off, val)) {
-			dev_err(drvdata->dev,
-				"invalid CP14 access to ETM reg: %#x", off);
-		}
-	} else {
-		writel_relaxed(val, drvdata->base + off);
-	}
-}
-
-static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
-{
-	u32 val;
-
-	if (drvdata->use_cp14) {
-		if (etm_readl_cp14(off, &val)) {
-			dev_err(drvdata->dev,
-				"invalid CP14 access to ETM reg: %#x", off);
-		}
-	} else {
-		val = readl_relaxed(drvdata->base + off);
-	}
-
-	return val;
-}
-
 /*
  * Memory mapped writes to clear os lock are not supported on some processors
  * and OS lock must be unlocked before any memory mapped access on such
  * processors, otherwise memory mapped reads/writes will be invalid.
  */
-static void etm_os_unlock(void *info)
+static void etm_os_unlock(struct etm_drvdata *drvdata)
 {
-	struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
 	/* Writing any value to ETMOSLAR unlocks the trace registers */
 	etm_writel(drvdata, 0x0, ETMOSLAR);
+	drvdata->os_unlock = true;
 	isb();
 }
 
@@ -215,36 +195,156 @@
 	}
 }
 
-static void etm_set_default(struct etm_drvdata *drvdata)
+void etm_set_default(struct etm_config *config)
 {
 	int i;
 
-	drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->enable_event = ETM_HARD_WIRE_RES_A;
+	if (WARN_ON_ONCE(!config))
+		return;
 
-	drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+	/*
+	 * Taken verbatim from the TRM:
+	 *
+	 * To trace all memory:
+	 *  set bit [24] in register 0x009, the ETMTECR1, to 1
+	 *  set all other bits in register 0x009, the ETMTECR1, to 0
+	 *  set all bits in register 0x007, the ETMTECR2, to 0
+	 *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
+	 */
+	config->enable_ctrl1 = BIT(24);
+	config->enable_ctrl2 = 0x0;
+	config->enable_event = ETM_HARD_WIRE_RES_A;
 
-	for (i = 0; i < drvdata->nr_cntr; i++) {
-		drvdata->cntr_rld_val[i] = 0x0;
-		drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
-		drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
-		drvdata->cntr_val[i] = 0x0;
+	config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+	config->enable_event = ETM_HARD_WIRE_RES_A;
+
+	config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+	config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+
+	for (i = 0; i < ETM_MAX_CNTR; i++) {
+		config->cntr_rld_val[i] = 0x0;
+		config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+		config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+		config->cntr_val[i] = 0x0;
 	}
 
-	drvdata->seq_curr_state = 0x0;
-	drvdata->ctxid_idx = 0x0;
-	for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
-		drvdata->ctxid_pid[i] = 0x0;
-		drvdata->ctxid_vpid[i] = 0x0;
+	config->seq_curr_state = 0x0;
+	config->ctxid_idx = 0x0;
+	for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+		config->ctxid_pid[i] = 0x0;
+		config->ctxid_vpid[i] = 0x0;
 	}
 
-	drvdata->ctxid_mask = 0x0;
+	config->ctxid_mask = 0x0;
+}
+
+void etm_config_trace_mode(struct etm_config *config)
+{
+	u32 flags, mode;
+
+	mode = config->mode;
+
+	mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
+
+	/* excluding kernel AND user space doesn't make sense */
+	if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+		return;
+
+	/* nothing to do if neither flags are set */
+	if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+		return;
+
+	flags = (1 << 0 |	/* instruction execute */
+		 3 << 3 |	/* ARM instruction */
+		 0 << 5 |	/* No data value comparison */
+		 0 << 7 |	/* No exact mach */
+		 0 << 8);	/* Ignore context ID */
+
+	/* No need to worry about single address comparators. */
+	config->enable_ctrl2 = 0x0;
+
+	/* Bit 0 is address range comparator 1 */
+	config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
+
+	/*
+	 * On ETMv3.5:
+	 * ETMACTRn[13,11] == Non-secure state comparison control
+	 * ETMACTRn[12,10] == Secure state comparison control
+	 *
+	 * b00 == Match in all modes in this state
+	 * b01 == Do not match in any more in this state
+	 * b10 == Match in all modes excepts user mode in this state
+	 * b11 == Match only in user mode in this state
+	 */
+
+	/* Tracing in secure mode is not supported at this time */
+	flags |= (0 << 12 | 1 << 10);
+
+	if (mode & ETM_MODE_EXCL_USER) {
+		/* exclude user, match all modes except user mode */
+		flags |= (1 << 13 | 0 << 11);
+	} else {
+		/* exclude kernel, match only in user mode */
+		flags |= (1 << 13 | 1 << 11);
+	}
+
+	/*
+	 * The ETMEEVR register is already set to "hard wire A".  As such
+	 * all there is to do is setup an address comparator that spans
+	 * the entire address range and configure the state and mode bits.
+	 */
+	config->addr_val[0] = (u32) 0x0;
+	config->addr_val[1] = (u32) ~0x0;
+	config->addr_acctype[0] = flags;
+	config->addr_acctype[1] = flags;
+	config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+	config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+}
+
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+
+static int etm_parse_event_config(struct etm_drvdata *drvdata,
+				  struct perf_event_attr *attr)
+{
+	struct etm_config *config = &drvdata->config;
+
+	if (!attr)
+		return -EINVAL;
+
+	/* Clear configuration from previous run */
+	memset(config, 0, sizeof(struct etm_config));
+
+	if (attr->exclude_kernel)
+		config->mode = ETM_MODE_EXCL_KERN;
+
+	if (attr->exclude_user)
+		config->mode = ETM_MODE_EXCL_USER;
+
+	/* Always start from the default config */
+	etm_set_default(config);
+
+	/*
+	 * By default the tracers are configured to trace the whole address
+	 * range.  Narrow the field only if requested by user space.
+	 */
+	if (config->mode)
+		etm_config_trace_mode(config);
+
+	/*
+	 * At this time only cycle accurate and timestamp options are
+	 * available.
+	 */
+	if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
+		return -EINVAL;
+
+	config->ctrl = attr->config;
+
+	return 0;
 }
 
 static void etm_enable_hw(void *info)
@@ -252,6 +352,7 @@
 	int i;
 	u32 etmcr;
 	struct etm_drvdata *drvdata = info;
+	struct etm_config *config = &drvdata->config;
 
 	CS_UNLOCK(drvdata->base);
 
@@ -265,65 +366,74 @@
 	etm_set_prog(drvdata);
 
 	etmcr = etm_readl(drvdata, ETMCR);
-	etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
+	/* Clear setting from a previous run if need be */
+	etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
 	etmcr |= drvdata->port_size;
-	etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
-	etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
-	etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
-	etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
-	etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
-	etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
+	etmcr |= ETMCR_ETM_EN;
+	etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
+	etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
+	etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
+	etm_writel(drvdata, config->enable_event, ETMTEEVR);
+	etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
+	etm_writel(drvdata, config->fifofull_level, ETMFFLR);
 	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-		etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
-		etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
+		etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
+		etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
 	}
 	for (i = 0; i < drvdata->nr_cntr; i++) {
-		etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
-		etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
-		etm_writel(drvdata, drvdata->cntr_rld_event[i],
+		etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
+		etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
+		etm_writel(drvdata, config->cntr_rld_event[i],
 			   ETMCNTRLDEVRn(i));
-		etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
+		etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
 	}
-	etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
-	etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
-	etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
-	etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
-	etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
-	etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
-	etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
+	etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
+	etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
+	etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
+	etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
+	etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
+	etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
+	etm_writel(drvdata, config->seq_curr_state, ETMSQR);
 	for (i = 0; i < drvdata->nr_ext_out; i++)
 		etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
 	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-		etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
-	etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
-	etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
+		etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
+	etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
+	etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
 	/* No external input selected */
 	etm_writel(drvdata, 0x0, ETMEXTINSELR);
-	etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
+	etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
 	/* No auxiliary control selected */
 	etm_writel(drvdata, 0x0, ETMAUXCR);
 	etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
 	/* No VMID comparator value selected */
 	etm_writel(drvdata, 0x0, ETMVMIDCVR);
 
-	/* Ensures trace output is enabled from this ETM */
-	etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
-
 	etm_clr_prog(drvdata);
 	CS_LOCK(drvdata->base);
 
 	dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static int etm_trace_id(struct coresight_device *csdev)
+static int etm_cpu_id(struct coresight_device *csdev)
 {
 	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	return drvdata->cpu;
+}
+
+int etm_get_trace_id(struct etm_drvdata *drvdata)
+{
 	unsigned long flags;
 	int trace_id = -1;
 
-	if (!drvdata->enable)
+	if (!drvdata)
+		goto out;
+
+	if (!local_read(&drvdata->mode))
 		return drvdata->traceid;
-	pm_runtime_get_sync(csdev->dev.parent);
+
+	pm_runtime_get_sync(drvdata->dev);
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
@@ -332,17 +442,41 @@
 	CS_LOCK(drvdata->base);
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(csdev->dev.parent);
+	pm_runtime_put(drvdata->dev);
 
+out:
 	return trace_id;
+
 }
 
-static int etm_enable(struct coresight_device *csdev)
+static int etm_trace_id(struct coresight_device *csdev)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	return etm_get_trace_id(drvdata);
+}
+
+static int etm_enable_perf(struct coresight_device *csdev,
+			   struct perf_event_attr *attr)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+		return -EINVAL;
+
+	/* Configure the tracer based on the session's specifics */
+	etm_parse_event_config(drvdata, attr);
+	/* And enable it */
+	etm_enable_hw(drvdata);
+
+	return 0;
+}
+
+static int etm_enable_sysfs(struct coresight_device *csdev)
 {
 	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 	int ret;
 
-	pm_runtime_get_sync(csdev->dev.parent);
 	spin_lock(&drvdata->spinlock);
 
 	/*
@@ -357,16 +491,45 @@
 			goto err;
 	}
 
-	drvdata->enable = true;
 	drvdata->sticky_enable = true;
-
 	spin_unlock(&drvdata->spinlock);
 
 	dev_info(drvdata->dev, "ETM tracing enabled\n");
 	return 0;
+
 err:
 	spin_unlock(&drvdata->spinlock);
-	pm_runtime_put(csdev->dev.parent);
+	return ret;
+}
+
+static int etm_enable(struct coresight_device *csdev,
+		      struct perf_event_attr *attr, u32 mode)
+{
+	int ret;
+	u32 val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
+
+	/* Someone is already using the tracer */
+	if (val)
+		return -EBUSY;
+
+	switch (mode) {
+	case CS_MODE_SYSFS:
+		ret = etm_enable_sysfs(csdev);
+		break;
+	case CS_MODE_PERF:
+		ret = etm_enable_perf(csdev, attr);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	/* The tracer didn't start */
+	if (ret)
+		local_set(&drvdata->mode, CS_MODE_DISABLED);
+
 	return ret;
 }
 
@@ -374,18 +537,16 @@
 {
 	int i;
 	struct etm_drvdata *drvdata = info;
+	struct etm_config *config = &drvdata->config;
 
 	CS_UNLOCK(drvdata->base);
 	etm_set_prog(drvdata);
 
-	/* Program trace enable to low by using always false event */
-	etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
-
 	/* Read back sequencer and counters for post trace analysis */
-	drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+	config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 
 	for (i = 0; i < drvdata->nr_cntr; i++)
-		drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+		config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
 
 	etm_set_pwrdwn(drvdata);
 	CS_LOCK(drvdata->base);
@@ -393,7 +554,28 @@
 	dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
 }
 
-static void etm_disable(struct coresight_device *csdev)
+static void etm_disable_perf(struct coresight_device *csdev)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+		return;
+
+	CS_UNLOCK(drvdata->base);
+
+	/* Setting the prog bit disables tracing immediately */
+	etm_set_prog(drvdata);
+
+	/*
+	 * There is no way to know when the tracer will be used again so
+	 * power down the tracer.
+	 */
+	etm_set_pwrdwn(drvdata);
+
+	CS_LOCK(drvdata->base);
+}
+
+static void etm_disable_sysfs(struct coresight_device *csdev)
 {
 	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
@@ -411,16 +593,45 @@
 	 * ensures that register writes occur when cpu is powered.
 	 */
 	smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
-	drvdata->enable = false;
 
 	spin_unlock(&drvdata->spinlock);
 	put_online_cpus();
-	pm_runtime_put(csdev->dev.parent);
 
 	dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
+static void etm_disable(struct coresight_device *csdev)
+{
+	u32 mode;
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	/*
+	 * For as long as the tracer isn't disabled another entity can't
+	 * change its status.  As such we can read the status here without
+	 * fearing it will change under us.
+	 */
+	mode = local_read(&drvdata->mode);
+
+	switch (mode) {
+	case CS_MODE_DISABLED:
+		break;
+	case CS_MODE_SYSFS:
+		etm_disable_sysfs(csdev);
+		break;
+	case CS_MODE_PERF:
+		etm_disable_perf(csdev);
+		break;
+	default:
+		WARN_ON_ONCE(mode);
+		return;
+	}
+
+	if (mode)
+		local_set(&drvdata->mode, CS_MODE_DISABLED);
+}
+
 static const struct coresight_ops_source etm_source_ops = {
+	.cpu_id		= etm_cpu_id,
 	.trace_id	= etm_trace_id,
 	.enable		= etm_enable,
 	.disable	= etm_disable,
@@ -430,1218 +641,6 @@
 	.source_ops	= &etm_source_ops,
 };
 
-static ssize_t nr_addr_cmp_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->nr_addr_cmp;
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->nr_cntr;
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ctxid_cmp_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->nr_ctxid_cmp;
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ctxid_cmp);
-
-static ssize_t etmsr_show(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	unsigned long flags, val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	pm_runtime_get_sync(drvdata->dev);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-	CS_UNLOCK(drvdata->base);
-
-	val = etm_readl(drvdata, ETMSR);
-
-	CS_LOCK(drvdata->base);
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(etmsr);
-
-static ssize_t reset_store(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t size)
-{
-	int i, ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val) {
-		spin_lock(&drvdata->spinlock);
-		drvdata->mode = ETM_MODE_EXCLUDE;
-		drvdata->ctrl = 0x0;
-		drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-		drvdata->startstop_ctrl = 0x0;
-		drvdata->addr_idx = 0x0;
-		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-			drvdata->addr_val[i] = 0x0;
-			drvdata->addr_acctype[i] = 0x0;
-			drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
-		}
-		drvdata->cntr_idx = 0x0;
-
-		etm_set_default(drvdata);
-		spin_unlock(&drvdata->spinlock);
-	}
-
-	return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
-			 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->mode;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->mode = val & ETM_MODE_ALL;
-
-	if (drvdata->mode & ETM_MODE_EXCLUDE)
-		drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
-	else
-		drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
-
-	if (drvdata->mode & ETM_MODE_CYCACC)
-		drvdata->ctrl |= ETMCR_CYC_ACC;
-	else
-		drvdata->ctrl &= ~ETMCR_CYC_ACC;
-
-	if (drvdata->mode & ETM_MODE_STALL) {
-		if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
-			dev_warn(drvdata->dev, "stall mode not supported\n");
-			ret = -EINVAL;
-			goto err_unlock;
-		}
-		drvdata->ctrl |= ETMCR_STALL_MODE;
-	 } else
-		drvdata->ctrl &= ~ETMCR_STALL_MODE;
-
-	if (drvdata->mode & ETM_MODE_TIMESTAMP) {
-		if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
-			dev_warn(drvdata->dev, "timestamp not supported\n");
-			ret = -EINVAL;
-			goto err_unlock;
-		}
-		drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
-	} else
-		drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
-
-	if (drvdata->mode & ETM_MODE_CTXID)
-		drvdata->ctrl |= ETMCR_CTXID_SIZE;
-	else
-		drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-
-err_unlock:
-	spin_unlock(&drvdata->spinlock);
-	return ret;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t trigger_event_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->trigger_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t trigger_event_store(struct device *dev,
-				   struct device_attribute *attr,
-				   const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->trigger_event = val & ETM_EVENT_MASK;
-
-	return size;
-}
-static DEVICE_ATTR_RW(trigger_event);
-
-static ssize_t enable_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->enable_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t enable_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->enable_event = val & ETM_EVENT_MASK;
-
-	return size;
-}
-static DEVICE_ATTR_RW(enable_event);
-
-static ssize_t fifofull_level_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->fifofull_level;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t fifofull_level_store(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->fifofull_level = val;
-
-	return size;
-}
-static DEVICE_ATTR_RW(fifofull_level);
-
-static ssize_t addr_idx_show(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->addr_idx;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val >= drvdata->nr_addr_cmp)
-		return -EINVAL;
-
-	/*
-	 * Use spinlock to ensure index doesn't change while it gets
-	 * dereferenced multiple times within a spinlock block elsewhere.
-	 */
-	spin_lock(&drvdata->spinlock);
-	drvdata->addr_idx = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_single_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EINVAL;
-	}
-
-	val = drvdata->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t size)
-{
-	u8 idx;
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EINVAL;
-	}
-
-	drvdata->addr_val[idx] = val;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val1, val2;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (idx % 2 != 0) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	val1 = drvdata->addr_val[idx];
-	val2 = drvdata->addr_val[idx + 1];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	u8 idx;
-	unsigned long val1, val2;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-		return -EINVAL;
-	/* Lower address comparator cannot have a higher address value */
-	if (val1 > val2)
-		return -EINVAL;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (idx % 2 != 0) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	drvdata->addr_val[idx] = val1;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
-	drvdata->addr_val[idx + 1] = val2;
-	drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
-	drvdata->enable_ctrl1 |= (1 << (idx/2));
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	val = drvdata->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	u8 idx;
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	drvdata->addr_val[idx] = val;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
-	drvdata->startstop_ctrl |= (1 << idx);
-	drvdata->enable_ctrl1 |= BIT(25);
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	val = drvdata->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t size)
-{
-	u8 idx;
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	drvdata->addr_val[idx] = val;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
-	drvdata->startstop_ctrl |= (1 << (idx + 16));
-	drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_acctype_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->addr_acctype[drvdata->addr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_acctype_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->addr_acctype[drvdata->addr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_acctype);
-
-static ssize_t cntr_idx_show(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->cntr_idx;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val >= drvdata->nr_cntr)
-		return -EINVAL;
-	/*
-	 * Use spinlock to ensure index doesn't change while it gets
-	 * dereferenced multiple times within a spinlock block elsewhere.
-	 */
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_idx = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntr_rld_val_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->cntr_rld_val[drvdata->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_val_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_val);
-
-static ssize_t cntr_event_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->cntr_event[drvdata->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_event_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_event);
-
-static ssize_t cntr_rld_event_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->cntr_rld_event[drvdata->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_event_store(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_event);
-
-static ssize_t cntr_val_show(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	int i, ret = 0;
-	u32 val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	if (!drvdata->enable) {
-		spin_lock(&drvdata->spinlock);
-		for (i = 0; i < drvdata->nr_cntr; i++)
-			ret += sprintf(buf, "counter %d: %x\n",
-				       i, drvdata->cntr_val[i]);
-		spin_unlock(&drvdata->spinlock);
-		return ret;
-	}
-
-	for (i = 0; i < drvdata->nr_cntr; i++) {
-		val = etm_readl(drvdata, ETMCNTVRn(i));
-		ret += sprintf(buf, "counter %d: %x\n", i, val);
-	}
-
-	return ret;
-}
-
-static ssize_t cntr_val_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_val[drvdata->cntr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_val);
-
-static ssize_t seq_12_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->seq_12_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_12_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->seq_12_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_12_event);
-
-static ssize_t seq_21_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->seq_21_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_21_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->seq_21_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_21_event);
-
-static ssize_t seq_23_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->seq_23_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_23_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->seq_23_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_23_event);
-
-static ssize_t seq_31_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->seq_31_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_31_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->seq_31_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_31_event);
-
-static ssize_t seq_32_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->seq_32_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_32_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->seq_32_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_32_event);
-
-static ssize_t seq_13_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->seq_13_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_13_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->seq_13_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_13_event);
-
-static ssize_t seq_curr_state_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
-{
-	unsigned long val, flags;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	if (!drvdata->enable) {
-		val = drvdata->seq_curr_state;
-		goto out;
-	}
-
-	pm_runtime_get_sync(drvdata->dev);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-
-	CS_UNLOCK(drvdata->base);
-	val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
-	CS_LOCK(drvdata->base);
-
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
-out:
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_curr_state_store(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val > ETM_SEQ_STATE_MAX_VAL)
-		return -EINVAL;
-
-	drvdata->seq_curr_state = val;
-
-	return size;
-}
-static DEVICE_ATTR_RW(seq_curr_state);
-
-static ssize_t ctxid_idx_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->ctxid_idx;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_idx_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val >= drvdata->nr_ctxid_cmp)
-		return -EINVAL;
-
-	/*
-	 * Use spinlock to ensure index doesn't change while it gets
-	 * dereferenced multiple times within a spinlock block elsewhere.
-	 */
-	spin_lock(&drvdata->spinlock);
-	drvdata->ctxid_idx = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(ctxid_idx);
-
-static ssize_t ctxid_pid_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_pid_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t size)
-{
-	int ret;
-	unsigned long vpid, pid;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &vpid);
-	if (ret)
-		return ret;
-
-	pid = coresight_vpid_to_pid(vpid);
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
-	drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(ctxid_pid);
-
-static ssize_t ctxid_mask_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->ctxid_mask;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_mask_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->ctxid_mask = val;
-	return size;
-}
-static DEVICE_ATTR_RW(ctxid_mask);
-
-static ssize_t sync_freq_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->sync_freq;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t sync_freq_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->sync_freq = val & ETM_SYNC_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(sync_freq);
-
-static ssize_t timestamp_event_show(struct device *dev,
-				    struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->timestamp_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t timestamp_event_store(struct device *dev,
-				     struct device_attribute *attr,
-				     const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->timestamp_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(timestamp_event);
-
-static ssize_t cpu_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{
-	int val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->cpu;
-	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
-
-}
-static DEVICE_ATTR_RO(cpu);
-
-static ssize_t traceid_show(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	unsigned long val, flags;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	if (!drvdata->enable) {
-		val = drvdata->traceid;
-		goto out;
-	}
-
-	pm_runtime_get_sync(drvdata->dev);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-	CS_UNLOCK(drvdata->base);
-
-	val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
-
-	CS_LOCK(drvdata->base);
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
-out:
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t traceid_store(struct device *dev,
-			     struct device_attribute *attr,
-			     const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->traceid = val & ETM_TRACEID_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(traceid);
-
-static struct attribute *coresight_etm_attrs[] = {
-	&dev_attr_nr_addr_cmp.attr,
-	&dev_attr_nr_cntr.attr,
-	&dev_attr_nr_ctxid_cmp.attr,
-	&dev_attr_etmsr.attr,
-	&dev_attr_reset.attr,
-	&dev_attr_mode.attr,
-	&dev_attr_trigger_event.attr,
-	&dev_attr_enable_event.attr,
-	&dev_attr_fifofull_level.attr,
-	&dev_attr_addr_idx.attr,
-	&dev_attr_addr_single.attr,
-	&dev_attr_addr_range.attr,
-	&dev_attr_addr_start.attr,
-	&dev_attr_addr_stop.attr,
-	&dev_attr_addr_acctype.attr,
-	&dev_attr_cntr_idx.attr,
-	&dev_attr_cntr_rld_val.attr,
-	&dev_attr_cntr_event.attr,
-	&dev_attr_cntr_rld_event.attr,
-	&dev_attr_cntr_val.attr,
-	&dev_attr_seq_12_event.attr,
-	&dev_attr_seq_21_event.attr,
-	&dev_attr_seq_23_event.attr,
-	&dev_attr_seq_31_event.attr,
-	&dev_attr_seq_32_event.attr,
-	&dev_attr_seq_13_event.attr,
-	&dev_attr_seq_curr_state.attr,
-	&dev_attr_ctxid_idx.attr,
-	&dev_attr_ctxid_pid.attr,
-	&dev_attr_ctxid_mask.attr,
-	&dev_attr_sync_freq.attr,
-	&dev_attr_timestamp_event.attr,
-	&dev_attr_traceid.attr,
-	&dev_attr_cpu.attr,
-	NULL,
-};
-
-#define coresight_simple_func(name, offset)                             \
-static ssize_t name##_show(struct device *_dev,                         \
-			   struct device_attribute *attr, char *buf)    \
-{                                                                       \
-	struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
-	return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
-			 readl_relaxed(drvdata->base + offset));        \
-}                                                                       \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
-
-static struct attribute *coresight_etm_mgmt_attrs[] = {
-	&dev_attr_etmccr.attr,
-	&dev_attr_etmccer.attr,
-	&dev_attr_etmscr.attr,
-	&dev_attr_etmidr.attr,
-	&dev_attr_etmcr.attr,
-	&dev_attr_etmtraceidr.attr,
-	&dev_attr_etmteevr.attr,
-	&dev_attr_etmtssvr.attr,
-	&dev_attr_etmtecr1.attr,
-	&dev_attr_etmtecr2.attr,
-	NULL,
-};
-
-static const struct attribute_group coresight_etm_group = {
-	.attrs = coresight_etm_attrs,
-};
-
-
-static const struct attribute_group coresight_etm_mgmt_group = {
-	.attrs = coresight_etm_mgmt_attrs,
-	.name = "mgmt",
-};
-
-static const struct attribute_group *coresight_etm_groups[] = {
-	&coresight_etm_group,
-	&coresight_etm_mgmt_group,
-	NULL,
-};
-
 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
 			    void *hcpu)
 {
@@ -1658,7 +657,7 @@
 			etmdrvdata[cpu]->os_unlock = true;
 		}
 
-		if (etmdrvdata[cpu]->enable)
+		if (local_read(&etmdrvdata[cpu]->mode))
 			etm_enable_hw(etmdrvdata[cpu]);
 		spin_unlock(&etmdrvdata[cpu]->spinlock);
 		break;
@@ -1671,7 +670,7 @@
 
 	case CPU_DYING:
 		spin_lock(&etmdrvdata[cpu]->spinlock);
-		if (etmdrvdata[cpu]->enable)
+		if (local_read(&etmdrvdata[cpu]->mode))
 			etm_disable_hw(etmdrvdata[cpu]);
 		spin_unlock(&etmdrvdata[cpu]->spinlock);
 		break;
@@ -1707,6 +706,9 @@
 	u32 etmccr;
 	struct etm_drvdata *drvdata = info;
 
+	/* Make sure all registers are accessible */
+	etm_os_unlock(drvdata);
+
 	CS_UNLOCK(drvdata->base);
 
 	/* First dummy read */
@@ -1743,40 +745,9 @@
 	CS_LOCK(drvdata->base);
 }
 
-static void etm_init_default_data(struct etm_drvdata *drvdata)
+static void etm_init_trace_id(struct etm_drvdata *drvdata)
 {
-	/*
-	 * A trace ID of value 0 is invalid, so let's start at some
-	 * random value that fits in 7 bits and will be just as good.
-	 */
-	static int etm3x_traceid = 0x10;
-
-	u32 flags = (1 << 0 | /* instruction execute*/
-		     3 << 3 | /* ARM instruction */
-		     0 << 5 | /* No data value comparison */
-		     0 << 7 | /* No exact mach */
-		     0 << 8 | /* Ignore context ID */
-		     0 << 10); /* Security ignored */
-
-	/*
-	 * Initial configuration only - guarantees sources handled by
-	 * this driver have a unique ID at startup time but not between
-	 * all other types of sources.  For that we lean on the core
-	 * framework.
-	 */
-	drvdata->traceid = etm3x_traceid++;
-	drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
-	drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
-	if (drvdata->nr_addr_cmp >= 2) {
-		drvdata->addr_val[0] = (u32) _stext;
-		drvdata->addr_val[1] = (u32) _etext;
-		drvdata->addr_acctype[0] = flags;
-		drvdata->addr_acctype[1] = flags;
-		drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
-		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
-	}
-
-	etm_set_default(drvdata);
+	drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
 }
 
 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1831,9 +802,6 @@
 	get_online_cpus();
 	etmdrvdata[drvdata->cpu] = drvdata;
 
-	if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
-		drvdata->os_unlock = true;
-
 	if (smp_call_function_single(drvdata->cpu,
 				     etm_init_arch_data,  drvdata, 1))
 		dev_err(dev, "ETM arch init failed\n");
@@ -1847,7 +815,9 @@
 		ret = -EINVAL;
 		goto err_arch_supported;
 	}
-	etm_init_default_data(drvdata);
+
+	etm_init_trace_id(drvdata);
+	etm_set_default(&drvdata->config);
 
 	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
 	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1861,6 +831,12 @@
 		goto err_arch_supported;
 	}
 
+	ret = etm_perf_symlink(drvdata->csdev, true);
+	if (ret) {
+		coresight_unregister(drvdata->csdev);
+		goto err_arch_supported;
+	}
+
 	pm_runtime_put(&adev->dev);
 	dev_info(dev, "%s initialized\n", (char *)id->data);
 
@@ -1877,17 +853,6 @@
 	return ret;
 }
 
-static int etm_remove(struct amba_device *adev)
-{
-	struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	if (--etm_count == 0)
-		unregister_hotcpu_notifier(&etm_cpu_notifier);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int etm_runtime_suspend(struct device *dev)
 {
@@ -1948,13 +913,9 @@
 		.name	= "coresight-etm3x",
 		.owner	= THIS_MODULE,
 		.pm	= &etm_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= etm_probe,
-	.remove		= etm_remove,
 	.id_table	= etm_ids,
 };
-
-module_amba_driver(etm_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
+builtin_amba_driver(etm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a670764..1c59bd3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -32,6 +31,7 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm4x.h"
@@ -63,6 +63,13 @@
 	return true;
 }
 
+static int etm4_cpu_id(struct coresight_device *csdev)
+{
+	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	return drvdata->cpu;
+}
+
 static int etm4_trace_id(struct coresight_device *csdev)
 {
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -72,7 +79,6 @@
 	if (!drvdata->enable)
 		return drvdata->trcid;
 
-	pm_runtime_get_sync(drvdata->dev);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	CS_UNLOCK(drvdata->base);
@@ -81,7 +87,6 @@
 	CS_LOCK(drvdata->base);
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
 
 	return trace_id;
 }
@@ -182,12 +187,12 @@
 	dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static int etm4_enable(struct coresight_device *csdev)
+static int etm4_enable(struct coresight_device *csdev,
+		       struct perf_event_attr *attr, u32 mode)
 {
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 	int ret;
 
-	pm_runtime_get_sync(drvdata->dev);
 	spin_lock(&drvdata->spinlock);
 
 	/*
@@ -207,7 +212,6 @@
 	return 0;
 err:
 	spin_unlock(&drvdata->spinlock);
-	pm_runtime_put(drvdata->dev);
 	return ret;
 }
 
@@ -256,12 +260,11 @@
 	spin_unlock(&drvdata->spinlock);
 	put_online_cpus();
 
-	pm_runtime_put(drvdata->dev);
-
 	dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
 static const struct coresight_ops_source etm4_source_ops = {
+	.cpu_id		= etm4_cpu_id,
 	.trace_id	= etm4_trace_id,
 	.enable		= etm4_enable,
 	.disable	= etm4_disable,
@@ -2219,7 +2222,7 @@
 	return scnprintf(buf, PAGE_SIZE, "0x%x\n",			\
 			 readl_relaxed(drvdata->base + offset));	\
 }									\
-DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
 
 coresight_simple_func(trcoslsr, TRCOSLSR);
 coresight_simple_func(trcpdcr, TRCPDCR);
@@ -2684,17 +2687,6 @@
 	return ret;
 }
 
-static int etm4_remove(struct amba_device *adev)
-{
-	struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	if (--etm4_count == 0)
-		unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
-	return 0;
-}
-
 static struct amba_id etm4_ids[] = {
 	{       /* ETM 4.0 - Qualcomm */
 		.id	= 0x0003b95d,
@@ -2712,10 +2704,9 @@
 static struct amba_driver etm4x_driver = {
 	.drv = {
 		.name   = "coresight-etm4x",
+		.suppress_bind_attrs = true,
 	},
 	.probe		= etm4_probe,
-	.remove		= etm4_remove,
 	.id_table	= etm4_ids,
 };
-
-module_amba_driver(etm4x_driver);
+builtin_amba_driver(etm4x_driver);
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 2e36bde..0600ca3 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -1,5 +1,7 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
+ * Description: CoreSight Funnel driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -69,7 +70,6 @@
 {
 	struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
 	funnel_enable_hw(drvdata, inport);
 
 	dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -95,7 +95,6 @@
 	struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
 	funnel_disable_hw(drvdata, inport);
-	pm_runtime_put(drvdata->dev);
 
 	dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
 }
@@ -226,14 +225,6 @@
 	return 0;
 }
 
-static int funnel_remove(struct amba_device *adev)
-{
-	struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int funnel_runtime_suspend(struct device *dev)
 {
@@ -273,13 +264,9 @@
 		.name	= "coresight-funnel",
 		.owner	= THIS_MODULE,
 		.pm	= &funnel_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= funnel_probe,
-	.remove		= funnel_remove,
 	.id_table	= funnel_ids,
 };
-
-module_amba_driver(funnel_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
+builtin_amba_driver(funnel_driver);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 62fcd98..333edda 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -34,6 +34,15 @@
 #define TIMEOUT_US		100
 #define BMVAL(val, lsb, msb)	((val & GENMASK(msb, lsb)) >> lsb)
 
+#define ETM_MODE_EXCL_KERN	BIT(30)
+#define ETM_MODE_EXCL_USER	BIT(31)
+
+enum cs_mode {
+	CS_MODE_DISABLED,
+	CS_MODE_SYSFS,
+	CS_MODE_PERF,
+};
+
 static inline void CS_LOCK(void __iomem *addr)
 {
 	do {
@@ -52,6 +61,12 @@
 	} while (0);
 }
 
+void coresight_disable_path(struct list_head *path);
+int coresight_enable_path(struct list_head *path, u32 mode);
+struct coresight_device *coresight_get_sink(struct list_head *path);
+struct list_head *coresight_build_path(struct coresight_device *csdev);
+void coresight_release_path(struct list_head *path);
+
 #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
 extern int etm_readl_cp14(u32 off, unsigned int *val);
 extern int etm_writel_cp14(u32 off, u32 val);
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 584059e..700f710 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -15,7 +15,6 @@
 #include <linux/clk.h>
 #include <linux/coresight.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -48,8 +47,6 @@
 {
 	struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
-
 	CS_UNLOCK(drvdata->base);
 
 	/*
@@ -86,8 +83,6 @@
 
 	CS_LOCK(drvdata->base);
 
-	pm_runtime_put(drvdata->dev);
-
 	dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -156,15 +151,6 @@
 	return 0;
 }
 
-static int replicator_remove(struct amba_device *adev)
-{
-	struct replicator_state *drvdata = amba_get_drvdata(adev);
-
-	pm_runtime_disable(&adev->dev);
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -206,10 +192,9 @@
 	.drv = {
 		.name	= "coresight-replicator-qcom",
 		.pm	= &replicator_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= replicator_probe,
-	.remove		= replicator_remove,
 	.id_table	= replicator_ids,
 };
-
-module_amba_driver(replicator_driver);
+builtin_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 963ac19..4299c05 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -1,5 +1,7 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
+ * Description: CoreSight Replicator driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -41,7 +42,6 @@
 {
 	struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
 	dev_info(drvdata->dev, "REPLICATOR enabled\n");
 	return 0;
 }
@@ -51,7 +51,6 @@
 {
 	struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_put(drvdata->dev);
 	dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -127,20 +126,6 @@
 	return ret;
 }
 
-static int replicator_remove(struct platform_device *pdev)
-{
-	struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
-	coresight_unregister(drvdata->csdev);
-	pm_runtime_get_sync(&pdev->dev);
-	if (!IS_ERR(drvdata->atclk))
-		clk_disable_unprepare(drvdata->atclk);
-	pm_runtime_put_noidle(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -175,15 +160,11 @@
 
 static struct platform_driver replicator_driver = {
 	.probe          = replicator_probe,
-	.remove         = replicator_remove,
 	.driver         = {
 		.name   = "coresight-replicator",
 		.of_match_table = replicator_match,
 		.pm	= &replicator_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
-
 builtin_platform_driver(replicator_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index a57c7ec..1be191f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,5 +1,7 @@
 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
+ * Description: CoreSight Trace Memory Controller driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -124,7 +125,7 @@
 	bool			reading;
 	char			*buf;
 	dma_addr_t		paddr;
-	void __iomem		*vaddr;
+	void			*vaddr;
 	u32			size;
 	bool			enable;
 	enum tmc_config_type	config_type;
@@ -242,12 +243,9 @@
 {
 	unsigned long flags;
 
-	pm_runtime_get_sync(drvdata->dev);
-
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
-		pm_runtime_put(drvdata->dev);
 		return -EBUSY;
 	}
 
@@ -268,7 +266,7 @@
 	return 0;
 }
 
-static int tmc_enable_sink(struct coresight_device *csdev)
+static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
 {
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
@@ -381,8 +379,6 @@
 	drvdata->enable = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	pm_runtime_put(drvdata->dev);
-
 	dev_info(drvdata->dev, "TMC disabled\n");
 }
 
@@ -766,23 +762,10 @@
 err_devm_kzalloc:
 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
 		dma_free_coherent(dev, drvdata->size,
-				&drvdata->paddr, GFP_KERNEL);
+				drvdata->vaddr, drvdata->paddr);
 	return ret;
 }
 
-static int tmc_remove(struct amba_device *adev)
-{
-	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
-	misc_deregister(&drvdata->miscdev);
-	coresight_unregister(drvdata->csdev);
-	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
-		dma_free_coherent(drvdata->dev, drvdata->size,
-				  &drvdata->paddr, GFP_KERNEL);
-
-	return 0;
-}
-
 static struct amba_id tmc_ids[] = {
 	{
 		.id     = 0x0003b961,
@@ -795,13 +778,9 @@
 	.drv = {
 		.name   = "coresight-tmc",
 		.owner  = THIS_MODULE,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= tmc_probe,
-	.remove		= tmc_remove,
 	.id_table	= tmc_ids,
 };
-
-module_amba_driver(tmc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
+builtin_amba_driver(tmc_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7214efd..8fb09d9 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -1,5 +1,7 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
+ * Description: CoreSight Trace Port Interface Unit driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -70,11 +71,10 @@
 	CS_LOCK(drvdata->base);
 }
 
-static int tpiu_enable(struct coresight_device *csdev)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode)
 {
 	struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(csdev->dev.parent);
 	tpiu_enable_hw(drvdata);
 
 	dev_info(drvdata->dev, "TPIU enabled\n");
@@ -98,7 +98,6 @@
 	struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
 	tpiu_disable_hw(drvdata);
-	pm_runtime_put(csdev->dev.parent);
 
 	dev_info(drvdata->dev, "TPIU disabled\n");
 }
@@ -172,14 +171,6 @@
 	return 0;
 }
 
-static int tpiu_remove(struct amba_device *adev)
-{
-	struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int tpiu_runtime_suspend(struct device *dev)
 {
@@ -223,13 +214,9 @@
 		.name	= "coresight-tpiu",
 		.owner	= THIS_MODULE,
 		.pm	= &tpiu_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= tpiu_probe,
-	.remove		= tpiu_remove,
 	.id_table	= tpiu_ids,
 };
-
-module_amba_driver(tpiu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
+builtin_amba_driver(tpiu_driver);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 93738df..2ea5961 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -24,11 +23,28 @@
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
+#include <linux/pm_runtime.h>
 
 #include "coresight-priv.h"
 
 static DEFINE_MUTEX(coresight_mutex);
 
+/**
+ * struct coresight_node - elements of a path, from source to sink
+ * @csdev:	Address of an element.
+ * @link:	hook to the list.
+ */
+struct coresight_node {
+	struct coresight_device *csdev;
+	struct list_head link;
+};
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, sysfs_path);
+
 static int coresight_id_match(struct device *dev, void *data)
 {
 	int trace_id, i_trace_id;
@@ -68,15 +84,12 @@
 				 csdev, coresight_id_match);
 }
 
-static int coresight_find_link_inport(struct coresight_device *csdev)
+static int coresight_find_link_inport(struct coresight_device *csdev,
+				      struct coresight_device *parent)
 {
 	int i;
-	struct coresight_device *parent;
 	struct coresight_connection *conn;
 
-	parent = container_of(csdev->path_link.next,
-			      struct coresight_device, path_link);
-
 	for (i = 0; i < parent->nr_outport; i++) {
 		conn = &parent->conns[i];
 		if (conn->child_dev == csdev)
@@ -89,15 +102,12 @@
 	return 0;
 }
 
-static int coresight_find_link_outport(struct coresight_device *csdev)
+static int coresight_find_link_outport(struct coresight_device *csdev,
+				       struct coresight_device *child)
 {
 	int i;
-	struct coresight_device *child;
 	struct coresight_connection *conn;
 
-	child = container_of(csdev->path_link.prev,
-			     struct coresight_device, path_link);
-
 	for (i = 0; i < csdev->nr_outport; i++) {
 		conn = &csdev->conns[i];
 		if (conn->child_dev == child)
@@ -110,13 +120,13 @@
 	return 0;
 }
 
-static int coresight_enable_sink(struct coresight_device *csdev)
+static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
 {
 	int ret;
 
 	if (!csdev->enable) {
 		if (sink_ops(csdev)->enable) {
-			ret = sink_ops(csdev)->enable(csdev);
+			ret = sink_ops(csdev)->enable(csdev, mode);
 			if (ret)
 				return ret;
 		}
@@ -138,14 +148,19 @@
 	}
 }
 
-static int coresight_enable_link(struct coresight_device *csdev)
+static int coresight_enable_link(struct coresight_device *csdev,
+				 struct coresight_device *parent,
+				 struct coresight_device *child)
 {
 	int ret;
 	int link_subtype;
 	int refport, inport, outport;
 
-	inport = coresight_find_link_inport(csdev);
-	outport = coresight_find_link_outport(csdev);
+	if (!parent || !child)
+		return -EINVAL;
+
+	inport = coresight_find_link_inport(csdev, parent);
+	outport = coresight_find_link_outport(csdev, child);
 	link_subtype = csdev->subtype.link_subtype;
 
 	if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
@@ -168,14 +183,19 @@
 	return 0;
 }
 
-static void coresight_disable_link(struct coresight_device *csdev)
+static void coresight_disable_link(struct coresight_device *csdev,
+				   struct coresight_device *parent,
+				   struct coresight_device *child)
 {
 	int i, nr_conns;
 	int link_subtype;
 	int refport, inport, outport;
 
-	inport = coresight_find_link_inport(csdev);
-	outport = coresight_find_link_outport(csdev);
+	if (!parent || !child)
+		return;
+
+	inport = coresight_find_link_inport(csdev, parent);
+	outport = coresight_find_link_outport(csdev, child);
 	link_subtype = csdev->subtype.link_subtype;
 
 	if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
@@ -201,7 +221,7 @@
 	csdev->enable = false;
 }
 
-static int coresight_enable_source(struct coresight_device *csdev)
+static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 {
 	int ret;
 
@@ -213,7 +233,7 @@
 
 	if (!csdev->enable) {
 		if (source_ops(csdev)->enable) {
-			ret = source_ops(csdev)->enable(csdev);
+			ret = source_ops(csdev)->enable(csdev, NULL, mode);
 			if (ret)
 				return ret;
 		}
@@ -235,109 +255,188 @@
 	}
 }
 
-static int coresight_enable_path(struct list_head *path)
+void coresight_disable_path(struct list_head *path)
 {
+	struct coresight_node *nd;
+	struct coresight_device *csdev, *parent, *child;
+
+	list_for_each_entry(nd, path, link) {
+		csdev = nd->csdev;
+
+		switch (csdev->type) {
+		case CORESIGHT_DEV_TYPE_SINK:
+		case CORESIGHT_DEV_TYPE_LINKSINK:
+			coresight_disable_sink(csdev);
+			break;
+		case CORESIGHT_DEV_TYPE_SOURCE:
+			/* sources are disabled from either sysFS or Perf */
+			break;
+		case CORESIGHT_DEV_TYPE_LINK:
+			parent = list_prev_entry(nd, link)->csdev;
+			child = list_next_entry(nd, link)->csdev;
+			coresight_disable_link(csdev, parent, child);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+int coresight_enable_path(struct list_head *path, u32 mode)
+{
+
 	int ret = 0;
-	struct coresight_device *cd;
+	struct coresight_node *nd;
+	struct coresight_device *csdev, *parent, *child;
 
-	/*
-	 * At this point we have a full @path, from source to sink.  The
-	 * sink is the first entry and the source the last one.  Go through
-	 * all the components and enable them one by one.
-	 */
-	list_for_each_entry(cd, path, path_link) {
-		if (cd == list_first_entry(path, struct coresight_device,
-					   path_link)) {
-			ret = coresight_enable_sink(cd);
-		} else if (list_is_last(&cd->path_link, path)) {
-			/*
-			 * Don't enable the source just yet - this needs to
-			 * happen at the very end when all links and sink
-			 * along the path have been configured properly.
-			 */
-			;
-		} else {
-			ret = coresight_enable_link(cd);
-		}
-		if (ret)
+	list_for_each_entry_reverse(nd, path, link) {
+		csdev = nd->csdev;
+
+		switch (csdev->type) {
+		case CORESIGHT_DEV_TYPE_SINK:
+		case CORESIGHT_DEV_TYPE_LINKSINK:
+			ret = coresight_enable_sink(csdev, mode);
+			if (ret)
+				goto err;
+			break;
+		case CORESIGHT_DEV_TYPE_SOURCE:
+			/* sources are enabled from either sysFS or Perf */
+			break;
+		case CORESIGHT_DEV_TYPE_LINK:
+			parent = list_prev_entry(nd, link)->csdev;
+			child = list_next_entry(nd, link)->csdev;
+			ret = coresight_enable_link(csdev, parent, child);
+			if (ret)
+				goto err;
+			break;
+		default:
 			goto err;
-	}
-
-	return 0;
-err:
-	list_for_each_entry_continue_reverse(cd, path, path_link) {
-		if (cd == list_first_entry(path, struct coresight_device,
-					   path_link)) {
-			coresight_disable_sink(cd);
-		} else if (list_is_last(&cd->path_link, path)) {
-			;
-		} else {
-			coresight_disable_link(cd);
 		}
 	}
 
+out:
 	return ret;
+err:
+	coresight_disable_path(path);
+	goto out;
 }
 
-static int coresight_disable_path(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct list_head *path)
 {
-	struct coresight_device *cd;
+	struct coresight_device *csdev;
 
-	list_for_each_entry_reverse(cd, path, path_link) {
-		if (cd == list_first_entry(path, struct coresight_device,
-					   path_link)) {
-			coresight_disable_sink(cd);
-		} else if (list_is_last(&cd->path_link, path)) {
-			/*
-			 * The source has already been stopped, no need
-			 * to do it again here.
-			 */
-			;
-		} else {
-			coresight_disable_link(cd);
-		}
-	}
+	if (!path)
+		return NULL;
 
-	return 0;
+	csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+	if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+	    csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+		return NULL;
+
+	return csdev;
 }
 
-static int coresight_build_paths(struct coresight_device *csdev,
-				 struct list_head *path,
-				 bool enable)
+/**
+ * _coresight_build_path - recursively build a path from a @csdev to a sink.
+ * @csdev:	The device to start from.
+ * @path:	The list to add devices to.
+ *
+ * The tree of Coresight device is traversed until an activated sink is
+ * found.  From there the sink is added to the list along with all the
+ * devices that led to that point - the end result is a list from source
+ * to sink. In that list the source is the first device and the sink the
+ * last one.
+ */
+static int _coresight_build_path(struct coresight_device *csdev,
+				 struct list_head *path)
 {
-	int i, ret = -EINVAL;
+	int i;
+	bool found = false;
+	struct coresight_node *node;
 	struct coresight_connection *conn;
 
-	list_add(&csdev->path_link, path);
-
+	/* An activated sink has been found.  Enqueue the element */
 	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
-	    csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
-	    csdev->activated) {
-		if (enable)
-			ret = coresight_enable_path(path);
-		else
-			ret = coresight_disable_path(path);
-	} else {
-		for (i = 0; i < csdev->nr_outport; i++) {
-			conn = &csdev->conns[i];
-			if (coresight_build_paths(conn->child_dev,
-						    path, enable) == 0)
-				ret = 0;
+	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+		goto out;
+
+	/* Not a sink - recursively explore each port found on this element */
+	for (i = 0; i < csdev->nr_outport; i++) {
+		conn = &csdev->conns[i];
+		if (_coresight_build_path(conn->child_dev, path) == 0) {
+			found = true;
+			break;
 		}
 	}
 
-	if (list_first_entry(path, struct coresight_device, path_link) != csdev)
-		dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+	if (!found)
+		return -ENODEV;
 
-	list_del(&csdev->path_link);
+out:
+	/*
+	 * A path from this element to a sink has been found.  The elements
+	 * leading to the sink are already enqueued, all that is left to do
+	 * is tell the PM runtime core we need this element and add a node
+	 * for it.
+	 */
+	node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
 
-	return ret;
+	node->csdev = csdev;
+	list_add(&node->link, path);
+	pm_runtime_get_sync(csdev->dev.parent);
+
+	return 0;
+}
+
+struct list_head *coresight_build_path(struct coresight_device *csdev)
+{
+	struct list_head *path;
+
+	path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!path)
+		return NULL;
+
+	INIT_LIST_HEAD(path);
+
+	if (_coresight_build_path(csdev, path)) {
+		kfree(path);
+		path = NULL;
+	}
+
+	return path;
+}
+
+/**
+ * coresight_release_path - release a previously built path.
+ * @path:	the path to release.
+ *
+ * Go through all the elements of a path and 1) removed it from the list and
+ * 2) free the memory allocated for each node.
+ */
+void coresight_release_path(struct list_head *path)
+{
+	struct coresight_device *csdev;
+	struct coresight_node *nd, *next;
+
+	list_for_each_entry_safe(nd, next, path, link) {
+		csdev = nd->csdev;
+
+		pm_runtime_put_sync(csdev->dev.parent);
+		list_del(&nd->link);
+		kfree(nd);
+	}
+
+	kfree(path);
+	path = NULL;
 }
 
 int coresight_enable(struct coresight_device *csdev)
 {
 	int ret = 0;
-	LIST_HEAD(path);
+	int cpu;
+	struct list_head *path;
 
 	mutex_lock(&coresight_mutex);
 	if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -348,22 +447,47 @@
 	if (csdev->enable)
 		goto out;
 
-	if (coresight_build_paths(csdev, &path, true)) {
-		dev_err(&csdev->dev, "building path(s) failed\n");
+	path = coresight_build_path(csdev);
+	if (!path) {
+		pr_err("building path(s) failed\n");
 		goto out;
 	}
 
-	if (coresight_enable_source(csdev))
-		dev_err(&csdev->dev, "source enable failed\n");
+	ret = coresight_enable_path(path, CS_MODE_SYSFS);
+	if (ret)
+		goto err_path;
+
+	ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+	if (ret)
+		goto err_source;
+
+	/*
+	 * When working from sysFS it is important to keep track
+	 * of the paths that were created so that they can be
+	 * undone in 'coresight_disable()'.  Since there can only
+	 * be a single session per tracer (when working from sysFS)
+	 * a per-cpu variable will do just fine.
+	 */
+	cpu = source_ops(csdev)->cpu_id(csdev);
+	per_cpu(sysfs_path, cpu) = path;
+
 out:
 	mutex_unlock(&coresight_mutex);
 	return ret;
+
+err_source:
+	coresight_disable_path(path);
+
+err_path:
+	coresight_release_path(path);
+	goto out;
 }
 EXPORT_SYMBOL_GPL(coresight_enable);
 
 void coresight_disable(struct coresight_device *csdev)
 {
-	LIST_HEAD(path);
+	int cpu;
+	struct list_head *path;
 
 	mutex_lock(&coresight_mutex);
 	if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -373,9 +497,12 @@
 	if (!csdev->enable)
 		goto out;
 
+	cpu = source_ops(csdev)->cpu_id(csdev);
+	path = per_cpu(sysfs_path, cpu);
 	coresight_disable_source(csdev);
-	if (coresight_build_paths(csdev, &path, false))
-		dev_err(&csdev->dev, "releasing path(s) failed\n");
+	coresight_disable_path(path);
+	coresight_release_path(path);
+	per_cpu(sysfs_path, cpu) = NULL;
 
 out:
 	mutex_unlock(&coresight_mutex);
@@ -481,6 +608,8 @@
 {
 	struct coresight_device *csdev = to_coresight_device(dev);
 
+	kfree(csdev->conns);
+	kfree(csdev->refcnt);
 	kfree(csdev);
 }
 
@@ -536,7 +665,7 @@
 	 * are hooked-up with each newly added component.
 	 */
 	bus_for_each_dev(&coresight_bustype, NULL,
-				 csdev, coresight_orphan_match);
+			 csdev, coresight_orphan_match);
 }
 
 
@@ -568,6 +697,8 @@
 
 		if (dev) {
 			conn->child_dev = to_coresight_device(dev);
+			/* and put reference from 'bus_find_device()' */
+			put_device(dev);
 		} else {
 			csdev->orphan = true;
 			conn->child_dev = NULL;
@@ -575,6 +706,50 @@
 	}
 }
 
+static int coresight_remove_match(struct device *dev, void *data)
+{
+	int i;
+	struct coresight_device *csdev, *iterator;
+	struct coresight_connection *conn;
+
+	csdev = data;
+	iterator = to_coresight_device(dev);
+
+	/* No need to check oneself */
+	if (csdev == iterator)
+		return 0;
+
+	/*
+	 * Circle throuch all the connection of that component.  If we find
+	 * a connection whose name matches @csdev, remove it.
+	 */
+	for (i = 0; i < iterator->nr_outport; i++) {
+		conn = &iterator->conns[i];
+
+		if (conn->child_dev == NULL)
+			continue;
+
+		if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+			iterator->orphan = true;
+			conn->child_dev = NULL;
+			/* No need to continue */
+			break;
+		}
+	}
+
+	/*
+	 * Returning '0' ensures that all known component on the
+	 * bus will be checked.
+	 */
+	return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+	bus_for_each_dev(&coresight_bustype, NULL,
+			 csdev, coresight_remove_match);
+}
+
 /**
  * coresight_timeout - loop until a bit has changed to a specific state.
  * @addr: base address of the area of interest.
@@ -713,13 +888,8 @@
 
 void coresight_unregister(struct coresight_device *csdev)
 {
-	mutex_lock(&coresight_mutex);
-
-	kfree(csdev->conns);
+	/* Remove references of that device in the topology */
+	coresight_remove_conns(csdev);
 	device_unregister(&csdev->dev);
-
-	mutex_unlock(&coresight_mutex);
 }
 EXPORT_SYMBOL_GPL(coresight_unregister);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index b097361..b68da18 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/err.h>
 #include <linux/slab.h>
@@ -86,7 +85,7 @@
 		return -ENOMEM;
 
 	/* Children connected to this component via @outports */
-	 pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+	pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
 					  sizeof(*pdata->child_names),
 					  GFP_KERNEL);
 	if (!pdata->child_names)
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
index b7a9073..1b412f8 100644
--- a/drivers/hwtracing/intel_th/Kconfig
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -1,5 +1,6 @@
 config INTEL_TH
 	tristate "Intel(R) Trace Hub controller"
+	depends on HAS_DMA && HAS_IOMEM
 	help
 	  Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that
 	  produce, switch and output trace data from multiple hardware and
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 165d300..4272f2c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -124,17 +124,34 @@
 	.release	= intel_th_device_release,
 };
 
+static struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+	/*
+	 * subdevice tree is flat: if this one is not a switch, its
+	 * parent must be
+	 */
+	if (thdev->type != INTEL_TH_SWITCH)
+		thdev = to_intel_th_hub(thdev);
+
+	if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
+		return NULL;
+
+	return dev_get_drvdata(thdev->dev.parent);
+}
+
 static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
 				     kuid_t *uid, kgid_t *gid)
 {
 	struct intel_th_device *thdev = to_intel_th_device(dev);
+	struct intel_th *th = to_intel_th(thdev);
 	char *node;
 
 	if (thdev->id >= 0)
-		node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name,
-				 thdev->id);
+		node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id,
+				 thdev->name, thdev->id);
 	else
-		node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name);
+		node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id,
+				 thdev->name);
 
 	return node;
 }
@@ -319,6 +336,7 @@
 	unsigned		nres;
 	unsigned		type;
 	unsigned		otype;
+	unsigned		scrpd;
 	int			id;
 } intel_th_subdevices[TH_SUBDEVICE_MAX] = {
 	{
@@ -352,6 +370,7 @@
 		.id	= 0,
 		.type	= INTEL_TH_OUTPUT,
 		.otype	= GTH_MSU,
+		.scrpd	= SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
 	},
 	{
 		.nres	= 2,
@@ -371,6 +390,7 @@
 		.id	= 1,
 		.type	= INTEL_TH_OUTPUT,
 		.otype	= GTH_MSU,
+		.scrpd	= SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
 	},
 	{
 		.nres	= 2,
@@ -403,6 +423,7 @@
 		.name	= "pti",
 		.type	= INTEL_TH_OUTPUT,
 		.otype	= GTH_PTI,
+		.scrpd	= SCRPD_PTI_IS_PRIM_DEST,
 	},
 	{
 		.nres	= 1,
@@ -477,6 +498,7 @@
 			thdev->dev.devt = MKDEV(th->major, i);
 			thdev->output.type = subdev->otype;
 			thdev->output.port = -1;
+			thdev->output.scratchpad = subdev->scrpd;
 		}
 
 		err = device_add(&thdev->dev);
@@ -579,6 +601,8 @@
 	}
 	th->dev = dev;
 
+	dev_set_drvdata(dev, th);
+
 	err = intel_th_populate(th, devres, ndevres, irq);
 	if (err)
 		goto err_chrdev;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 2dc5378..9beea0b 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -146,24 +146,6 @@
 	iowrite32(val, gth->base + reg);
 }
 
-/*static int gth_master_get(struct gth_device *gth, unsigned int master)
-{
-	unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
-	unsigned int shift = (master & 0x7) * 4;
-	u32 val;
-
-	if (master >= 256) {
-		reg = REG_GTH_GSWTDEST;
-		shift = 0;
-	}
-
-	val = ioread32(gth->base + reg);
-	val &= (0xf << shift);
-	val >>= shift;
-
-	return val ? val & 0x7 : -1;
-	}*/
-
 static ssize_t master_attr_show(struct device *dev,
 				struct device_attribute *attr,
 				char *buf)
@@ -304,6 +286,10 @@
 	if (scratchpad & SCRPD_DEBUGGER_IN_USE)
 		return -EBUSY;
 
+	/* Always save/restore STH and TU registers in S0ix entry/exit */
+	scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+	iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+
 	/* output ports */
 	for (port = 0; port < 8; port++) {
 		if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
@@ -506,6 +492,10 @@
 	if (!count)
 		dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
 			output->port);
+
+	reg = ioread32(gth->base + REG_GTH_SCRPD0);
+	reg &= ~output->scratchpad;
+	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
 }
 
 /**
@@ -520,7 +510,7 @@
 				struct intel_th_output *output)
 {
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
-	u32 scr = 0xfc0000;
+	u32 scr = 0xfc0000, scrpd;
 	int master;
 
 	spin_lock(&gth->gth_lock);
@@ -535,6 +525,10 @@
 	output->active = true;
 	spin_unlock(&gth->gth_lock);
 
+	scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
+	scrpd |= output->scratchpad;
+	iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
+
 	iowrite32(scr, gth->base + REG_GTH_SCR);
 	iowrite32(0, gth->base + REG_GTH_SCR2);
 }
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 3b714b7..56f0d26 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -57,9 +57,6 @@
 	REG_GTH_SCRPD3		= 0xec, /* ScratchPad[3] */
 };
 
-/* Externall debugger is using Intel TH */
-#define SCRPD_DEBUGGER_IN_USE	BIT(24)
-
 /* waiting for Pipeline Empty bit(s) to assert for GTH */
 #define GTH_PLE_WAITLOOP_DEPTH	10000
 
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 57fd72b..eedd093 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -30,6 +30,7 @@
  * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
  * @port:	output port number, assigned by the switch
  * @type:	GTH_{MSU,CTP,PTI}
+ * @scratchpad:	scratchpad bits to flag when this output is enabled
  * @multiblock:	true for multiblock output configuration
  * @active:	true when this output is enabled
  *
@@ -41,6 +42,7 @@
 struct intel_th_output {
 	int		port;
 	unsigned int	type;
+	unsigned int	scratchpad;
 	bool		multiblock;
 	bool		active;
 };
@@ -241,4 +243,43 @@
 	GTH_PTI = 4,	/* MIPI-PTI */
 };
 
+/*
+ * Scratchpad bits: tell firmware and external debuggers
+ * what we are up to.
+ */
+enum {
+	/* Memory is the primary destination */
+	SCRPD_MEM_IS_PRIM_DEST		= BIT(0),
+	/* XHCI DbC is the primary destination */
+	SCRPD_DBC_IS_PRIM_DEST		= BIT(1),
+	/* PTI is the primary destination */
+	SCRPD_PTI_IS_PRIM_DEST		= BIT(2),
+	/* BSSB is the primary destination */
+	SCRPD_BSSB_IS_PRIM_DEST		= BIT(3),
+	/* PTI is the alternate destination */
+	SCRPD_PTI_IS_ALT_DEST		= BIT(4),
+	/* BSSB is the alternate destination */
+	SCRPD_BSSB_IS_ALT_DEST		= BIT(5),
+	/* DeepSx exit occurred */
+	SCRPD_DEEPSX_EXIT		= BIT(6),
+	/* S4 exit occurred */
+	SCRPD_S4_EXIT			= BIT(7),
+	/* S5 exit occurred */
+	SCRPD_S5_EXIT			= BIT(8),
+	/* MSU controller 0/1 is enabled */
+	SCRPD_MSC0_IS_ENABLED		= BIT(9),
+	SCRPD_MSC1_IS_ENABLED		= BIT(10),
+	/* Sx exit occurred */
+	SCRPD_SX_EXIT			= BIT(11),
+	/* Trigger Unit is enabled */
+	SCRPD_TRIGGER_IS_ENABLED	= BIT(12),
+	SCRPD_ODLA_IS_ENABLED		= BIT(13),
+	SCRPD_SOCHAP_IS_ENABLED		= BIT(14),
+	SCRPD_STH_IS_ENABLED		= BIT(15),
+	SCRPD_DCIH_IS_ENABLED		= BIT(16),
+	SCRPD_VER_IS_ENABLED		= BIT(17),
+	/* External debugger is using Intel TH */
+	SCRPD_DEBUGGER_IN_USE		= BIT(24),
+};
+
 #endif
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 70ca27e4..d9d6022 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -408,7 +408,7 @@
 		 * Second time (wrap_count==1), it's just like any other block,
 		 * containing data in the range of [MSC_BDESC..data_bytes].
 		 */
-		if (iter->block == iter->start_block && iter->wrap_count) {
+		if (iter->block == iter->start_block && iter->wrap_count == 2) {
 			tocopy = DATA_IN_PAGE - data_bytes;
 			src += data_bytes;
 		}
@@ -1112,12 +1112,11 @@
 		size = msc->nr_pages << PAGE_SHIFT;
 
 	if (!size)
-		return 0;
-
-	if (off >= size) {
-		len = 0;
 		goto put_count;
-	}
+
+	if (off >= size)
+		goto put_count;
+
 	if (off + len >= size)
 		len = size - off;
 
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 641e879..bca7a2a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -46,8 +46,6 @@
 	if (IS_ERR(th))
 		return PTR_ERR(th);
 
-	pci_set_drvdata(pdev, th);
-
 	return 0;
 }
 
@@ -67,6 +65,16 @@
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
 		.driver_data = (kernel_ulong_t)0,
 	},
+	{
+		/* Apollo Lake */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e),
+		.driver_data = (kernel_ulong_t)0,
+	},
+	{
+		/* Broxton */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
+		.driver_data = (kernel_ulong_t)0,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 56101c3..e1aee61 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -94,10 +94,13 @@
 	case STP_PACKET_TRIG:
 		if (flags & STP_PACKET_TIMESTAMPED)
 			reg += 4;
-		iowrite8(*payload, sth->base + reg);
+		writeb_relaxed(*payload, sth->base + reg);
 		break;
 
 	case STP_PACKET_MERR:
+		if (size > 4)
+			size = 4;
+
 		sth_iowrite(&out->MERR, payload, size);
 		break;
 
@@ -107,8 +110,8 @@
 		else
 			outp = (u64 __iomem *)&out->FLAG;
 
-		size = 1;
-		sth_iowrite(outp, payload, size);
+		size = 0;
+		writeb_relaxed(0, outp);
 		break;
 
 	case STP_PACKET_USER:
@@ -129,6 +132,8 @@
 
 		sth_iowrite(outp, payload, size);
 		break;
+	default:
+		return -ENOTSUPP;
 	}
 
 	return size;
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 83e9f59..847a39b 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -1,6 +1,7 @@
 config STM
 	tristate "System Trace Module devices"
 	select CONFIGFS_FS
+	select SRCU
 	help
 	  A System Trace Module (STM) is a device exporting data in System
 	  Trace Protocol (STP) format as defined by MIPI STP standards.
@@ -8,6 +9,8 @@
 
 	  Say Y here to enable System Trace Module device support.
 
+if STM
+
 config STM_DUMMY
 	tristate "Dummy STM driver"
 	help
@@ -24,3 +27,16 @@
 
 	  If you want to send kernel console messages over STM devices,
 	  say Y.
+
+config STM_SOURCE_HEARTBEAT
+	tristate "Heartbeat over STM devices"
+	help
+	  This is a kernel space trace source that sends periodic
+	  heartbeat messages to trace hosts over STM devices. It is
+	  also useful for testing stm class drivers and the stm class
+	  framework itself.
+
+	  If you want to send heartbeat messages over STM devices,
+	  say Y.
+
+endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index f9312c3..a9ce3d4 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -5,5 +5,7 @@
 obj-$(CONFIG_STM_DUMMY)	+= dummy_stm.o
 
 obj-$(CONFIG_STM_SOURCE_CONSOLE)	+= stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT)	+= stm_heartbeat.o
 
 stm_console-y		:= console.o
+stm_heartbeat-y		:= heartbeat.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index b6445d9..de80d45 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -113,6 +113,7 @@
 
 	stm = to_stm_device(dev);
 	if (!try_module_get(stm->owner)) {
+		/* matches class_find_device() above */
 		put_device(dev);
 		return NULL;
 	}
@@ -125,7 +126,7 @@
  * @stm:	stm device, previously acquired by stm_find_device()
  *
  * This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
  */
 void stm_put_device(struct stm_device *stm)
 {
@@ -185,6 +186,9 @@
 {
 	struct stp_master *master = stm_master(stm, output->master);
 
+	lockdep_assert_held(&stm->mc_lock);
+	lockdep_assert_held(&output->lock);
+
 	if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
 		return;
 
@@ -199,6 +203,9 @@
 {
 	struct stp_master *master = stm_master(stm, output->master);
 
+	lockdep_assert_held(&stm->mc_lock);
+	lockdep_assert_held(&output->lock);
+
 	bitmap_release_region(&master->chan_map[0], output->channel,
 			      ilog2(output->nr_chans));
 
@@ -233,7 +240,7 @@
 	return -1;
 }
 
-static unsigned int
+static int
 stm_find_master_chan(struct stm_device *stm, unsigned int width,
 		     unsigned int *mstart, unsigned int mend,
 		     unsigned int *cstart, unsigned int cend)
@@ -288,12 +295,13 @@
 	}
 
 	spin_lock(&stm->mc_lock);
+	spin_lock(&output->lock);
 	/* output is already assigned -- shouldn't happen */
 	if (WARN_ON_ONCE(output->nr_chans))
 		goto unlock;
 
 	ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
-	if (ret)
+	if (ret < 0)
 		goto unlock;
 
 	output->master = midx;
@@ -304,6 +312,7 @@
 
 	ret = 0;
 unlock:
+	spin_unlock(&output->lock);
 	spin_unlock(&stm->mc_lock);
 
 	return ret;
@@ -312,11 +321,18 @@
 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
 {
 	spin_lock(&stm->mc_lock);
+	spin_lock(&output->lock);
 	if (output->nr_chans)
 		stm_output_disclaim(stm, output);
+	spin_unlock(&output->lock);
 	spin_unlock(&stm->mc_lock);
 }
 
+static void stm_output_init(struct stm_output *output)
+{
+	spin_lock_init(&output->lock);
+}
+
 static int major_match(struct device *dev, const void *data)
 {
 	unsigned int major = *(unsigned int *)data;
@@ -339,6 +355,7 @@
 	if (!stmf)
 		return -ENOMEM;
 
+	stm_output_init(&stmf->output);
 	stmf->stm = to_stm_device(dev);
 
 	if (!try_module_get(stmf->stm->owner))
@@ -349,6 +366,8 @@
 	return nonseekable_open(inode, file);
 
 err_free:
+	/* matches class_find_device() above */
+	put_device(dev);
 	kfree(stmf);
 
 	return err;
@@ -357,9 +376,19 @@
 static int stm_char_release(struct inode *inode, struct file *file)
 {
 	struct stm_file *stmf = file->private_data;
+	struct stm_device *stm = stmf->stm;
 
-	stm_output_free(stmf->stm, &stmf->output);
-	stm_put_device(stmf->stm);
+	if (stm->data->unlink)
+		stm->data->unlink(stm->data, stmf->output.master,
+				  stmf->output.channel);
+
+	stm_output_free(stm, &stmf->output);
+
+	/*
+	 * matches the stm_char_open()'s
+	 * class_find_device() + try_module_get()
+	 */
+	stm_put_device(stm);
 	kfree(stmf);
 
 	return 0;
@@ -380,8 +409,8 @@
 	return ret;
 }
 
-static void stm_write(struct stm_data *data, unsigned int master,
-		      unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+			  unsigned int channel, const char *buf, size_t count)
 {
 	unsigned int flags = STP_PACKET_TIMESTAMPED;
 	const unsigned char *p = buf, nil = 0;
@@ -393,9 +422,14 @@
 		sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
 				  sz, p);
 		flags = 0;
+
+		if (sz < 0)
+			break;
 	}
 
 	data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+	return pos;
 }
 
 static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +440,9 @@
 	char *kbuf;
 	int err;
 
+	if (count + 1 > PAGE_SIZE)
+		count = PAGE_SIZE - 1;
+
 	/*
 	 * if no m/c have been assigned to this writer up to this
 	 * point, use "default" policy entry
@@ -430,8 +467,8 @@
 		return -EFAULT;
 	}
 
-	stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
-		  count);
+	count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+			  kbuf, count);
 
 	kfree(kbuf);
 
@@ -515,10 +552,8 @@
 		ret = stm->data->link(stm->data, stmf->output.master,
 				      stmf->output.channel);
 
-	if (ret) {
+	if (ret)
 		stm_output_free(stmf->stm, &stmf->output);
-		stm_put_device(stmf->stm);
-	}
 
 err_free:
 	kfree(id);
@@ -618,7 +653,7 @@
 	if (!stm_data->packet || !stm_data->sw_nchannels)
 		return -EINVAL;
 
-	nmasters = stm_data->sw_end - stm_data->sw_start;
+	nmasters = stm_data->sw_end - stm_data->sw_start + 1;
 	stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
 	if (!stm)
 		return -ENOMEM;
@@ -641,6 +676,7 @@
 	if (err)
 		goto err_device;
 
+	mutex_init(&stm->link_mutex);
 	spin_lock_init(&stm->link_lock);
 	INIT_LIST_HEAD(&stm->link_list);
 
@@ -654,6 +690,7 @@
 	return 0;
 
 err_device:
+	/* matches device_initialize() above */
 	put_device(&stm->dev);
 err_free:
 	kfree(stm);
@@ -662,20 +699,28 @@
 }
 EXPORT_SYMBOL_GPL(stm_register_device);
 
-static void __stm_source_link_drop(struct stm_source_device *src,
-				   struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+				  struct stm_device *stm);
 
 void stm_unregister_device(struct stm_data *stm_data)
 {
 	struct stm_device *stm = stm_data->stm;
 	struct stm_source_device *src, *iter;
-	int i;
+	int i, ret;
 
-	spin_lock(&stm->link_lock);
+	mutex_lock(&stm->link_mutex);
 	list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
-		__stm_source_link_drop(src, stm);
+		ret = __stm_source_link_drop(src, stm);
+		/*
+		 * src <-> stm link must not change under the same
+		 * stm::link_mutex, so complain loudly if it has;
+		 * also in this situation ret!=0 means this src is
+		 * not connected to this stm and it should be otherwise
+		 * safe to proceed with the tear-down of stm.
+		 */
+		WARN_ON_ONCE(ret);
 	}
-	spin_unlock(&stm->link_lock);
+	mutex_unlock(&stm->link_mutex);
 
 	synchronize_srcu(&stm_source_srcu);
 
@@ -686,7 +731,7 @@
 		stp_policy_unbind(stm->policy);
 	mutex_unlock(&stm->policy_mutex);
 
-	for (i = 0; i < stm->sw_nmasters; i++)
+	for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
 		stp_master_free(stm, i);
 
 	device_unregister(&stm->dev);
@@ -694,6 +739,17 @@
 }
 EXPORT_SYMBOL_GPL(stm_unregister_device);
 
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ *   stm::link_mutex
+ *     stm::link_lock
+ *       src::link_lock
+ */
+
 /**
  * stm_source_link_add() - connect an stm_source device to an stm device
  * @src:	stm_source device
@@ -710,6 +766,7 @@
 	char *id;
 	int err;
 
+	mutex_lock(&stm->link_mutex);
 	spin_lock(&stm->link_lock);
 	spin_lock(&src->link_lock);
 
@@ -719,6 +776,7 @@
 
 	spin_unlock(&src->link_lock);
 	spin_unlock(&stm->link_lock);
+	mutex_unlock(&stm->link_mutex);
 
 	id = kstrdup(src->data->name, GFP_KERNEL);
 	if (id) {
@@ -753,9 +811,9 @@
 
 fail_free_output:
 	stm_output_free(stm, &src->output);
-	stm_put_device(stm);
 
 fail_detach:
+	mutex_lock(&stm->link_mutex);
 	spin_lock(&stm->link_lock);
 	spin_lock(&src->link_lock);
 
@@ -764,6 +822,7 @@
 
 	spin_unlock(&src->link_lock);
 	spin_unlock(&stm->link_lock);
+	mutex_unlock(&stm->link_mutex);
 
 	return err;
 }
@@ -776,28 +835,55 @@
  * If @stm is @src::link, disconnect them from one another and put the
  * reference on the @stm device.
  *
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
  */
-static void __stm_source_link_drop(struct stm_source_device *src,
-				   struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+				  struct stm_device *stm)
 {
 	struct stm_device *link;
+	int ret = 0;
 
+	lockdep_assert_held(&stm->link_mutex);
+
+	/* for stm::link_list modification, we hold both mutex and spinlock */
+	spin_lock(&stm->link_lock);
 	spin_lock(&src->link_lock);
 	link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
-	if (WARN_ON_ONCE(link != stm)) {
-		spin_unlock(&src->link_lock);
-		return;
+
+	/*
+	 * The linked device may have changed since we last looked, because
+	 * we weren't holding the src::link_lock back then; if this is the
+	 * case, tell the caller to retry.
+	 */
+	if (link != stm) {
+		ret = -EAGAIN;
+		goto unlock;
 	}
 
 	stm_output_free(link, &src->output);
-	/* caller must hold stm::link_lock */
 	list_del_init(&src->link_entry);
 	/* matches stm_find_device() from stm_source_link_store() */
 	stm_put_device(link);
 	rcu_assign_pointer(src->link, NULL);
 
+unlock:
 	spin_unlock(&src->link_lock);
+	spin_unlock(&stm->link_lock);
+
+	/*
+	 * Call the unlink callbacks for both source and stm, when we know
+	 * that we have actually performed the unlinking.
+	 */
+	if (!ret) {
+		if (src->data->unlink)
+			src->data->unlink(src->data);
+
+		if (stm->data->unlink)
+			stm->data->unlink(stm->data, src->output.master,
+					  src->output.channel);
+	}
+
+	return ret;
 }
 
 /**
@@ -813,21 +899,29 @@
 static void stm_source_link_drop(struct stm_source_device *src)
 {
 	struct stm_device *stm;
-	int idx;
+	int idx, ret;
 
+retry:
 	idx = srcu_read_lock(&stm_source_srcu);
+	/*
+	 * The stm device will be valid for the duration of this
+	 * read section, but the link may change before we grab
+	 * the src::link_lock in __stm_source_link_drop().
+	 */
 	stm = srcu_dereference(src->link, &stm_source_srcu);
 
+	ret = 0;
 	if (stm) {
-		if (src->data->unlink)
-			src->data->unlink(src->data);
-
-		spin_lock(&stm->link_lock);
-		__stm_source_link_drop(src, stm);
-		spin_unlock(&stm->link_lock);
+		mutex_lock(&stm->link_mutex);
+		ret = __stm_source_link_drop(src, stm);
+		mutex_unlock(&stm->link_mutex);
 	}
 
 	srcu_read_unlock(&stm_source_srcu, idx);
+
+	/* if it did change, retry */
+	if (ret == -EAGAIN)
+		goto retry;
 }
 
 static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +956,10 @@
 		return -EINVAL;
 
 	err = stm_source_link_add(src, link);
-	if (err)
+	if (err) {
+		/* matches the stm_find_device() above */
 		stm_put_device(link);
+	}
 
 	return err ? : count;
 }
@@ -925,6 +1021,7 @@
 	if (err)
 		goto err;
 
+	stm_output_init(&src->output);
 	spin_lock_init(&src->link_lock);
 	INIT_LIST_HEAD(&src->link_entry);
 	src->data = data;
@@ -973,9 +1070,9 @@
 
 	stm = srcu_dereference(src->link, &stm_source_srcu);
 	if (stm)
-		stm_write(stm->data, src->output.master,
-			  src->output.channel + chan,
-			  buf, count);
+		count = stm_write(stm->data, src->output.master,
+				  src->output.channel + chan,
+				  buf, count);
 	else
 		count = -ENODEV;
 
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index 3709bef..310adf5 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -40,22 +40,75 @@
 	return size;
 }
 
-static struct stm_data dummy_stm = {
-	.name		= "dummy_stm",
-	.sw_start	= 0x0000,
-	.sw_end		= 0xffff,
-	.sw_nchannels	= 0xffff,
-	.packet		= dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0600);
+
+static unsigned int dummy_stm_nr;
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+			  unsigned int channel)
+{
+	if (fail_mode && (channel & fail_mode))
+		return -EINVAL;
+
+	return 0;
+}
 
 static int dummy_stm_init(void)
 {
-	return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+	int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies);
+
+	if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX)
+		return -EINVAL;
+
+	for (i = 0; i < __nr_dummies; i++) {
+		dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+		if (!dummy_stm[i].name)
+			goto fail_unregister;
+
+		dummy_stm[i].sw_start		= 0x0000;
+		dummy_stm[i].sw_end		= 0xffff;
+		dummy_stm[i].sw_nchannels	= 0xffff;
+		dummy_stm[i].packet		= dummy_stm_packet;
+		dummy_stm[i].link		= dummy_stm_link;
+
+		ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+		if (ret)
+			goto fail_free;
+	}
+
+	dummy_stm_nr = __nr_dummies;
+
+	return 0;
+
+fail_unregister:
+	for (i--; i >= 0; i--) {
+		stm_unregister_device(&dummy_stm[i]);
+fail_free:
+		kfree(dummy_stm[i].name);
+	}
+
+	return ret;
+
 }
 
 static void dummy_stm_exit(void)
 {
-	stm_unregister_device(&dummy_stm);
+	int i;
+
+	for (i = 0; i < dummy_stm_nr; i++) {
+		stm_unregister_device(&dummy_stm[i]);
+		kfree(dummy_stm[i].name);
+	}
 }
 
 module_init(dummy_stm_init);
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
new file mode 100644
index 0000000..0133571
--- /dev/null
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -0,0 +1,130 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX	32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0600);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+	struct stm_source_data	data;
+	struct hrtimer		hrtimer;
+	unsigned int		active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static unsigned int nr_instances;
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+	struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+						       hrtimer);
+
+	stm_source_write(&heartbeat->data, 0, str, sizeof str);
+	if (heartbeat->active)
+		hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+	return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+	struct stm_heartbeat *heartbeat =
+		container_of(data, struct stm_heartbeat, data);
+
+	heartbeat->active = 1;
+	hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+		      HRTIMER_MODE_ABS);
+
+	return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+	struct stm_heartbeat *heartbeat =
+		container_of(data, struct stm_heartbeat, data);
+
+	heartbeat->active = 0;
+	hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+	int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs);
+
+	if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX)
+		return -EINVAL;
+
+	for (i = 0; i < __nr_instances; i++) {
+		stm_heartbeat[i].data.name =
+			kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+		if (!stm_heartbeat[i].data.name)
+			goto fail_unregister;
+
+		stm_heartbeat[i].data.nr_chans	= 1;
+		stm_heartbeat[i].data.link		= stm_heartbeat_link;
+		stm_heartbeat[i].data.unlink	= stm_heartbeat_unlink;
+		hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_ABS);
+		stm_heartbeat[i].hrtimer.function =
+			stm_heartbeat_hrtimer_handler;
+
+		ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+		if (ret)
+			goto fail_free;
+	}
+
+	nr_instances = __nr_instances;
+
+	return 0;
+
+fail_unregister:
+	for (i--; i >= 0; i--) {
+		stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+		kfree(stm_heartbeat[i].data.name);
+	}
+
+	return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_instances; i++) {
+		stm_source_unregister_device(&stm_heartbeat[i].data);
+		kfree(stm_heartbeat[i].data.name);
+	}
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 11ab6d01..1db1896 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -272,13 +272,17 @@
 {
 	struct stm_device *stm = policy->stm;
 
+	/*
+	 * stp_policy_release() will not call here if the policy is already
+	 * unbound; other users should not either, as no link exists between
+	 * this policy and anything else in that case
+	 */
 	if (WARN_ON_ONCE(!policy->stm))
 		return;
 
-	mutex_lock(&stm->policy_mutex);
-	stm->policy = NULL;
-	mutex_unlock(&stm->policy_mutex);
+	lockdep_assert_held(&stm->policy_mutex);
 
+	stm->policy = NULL;
 	policy->stm = NULL;
 
 	stm_put_device(stm);
@@ -287,8 +291,16 @@
 static void stp_policy_release(struct config_item *item)
 {
 	struct stp_policy *policy = to_stp_policy(item);
+	struct stm_device *stm = policy->stm;
 
+	/* a policy *can* be unbound and still exist in configfs tree */
+	if (!stm)
+		return;
+
+	mutex_lock(&stm->policy_mutex);
 	stp_policy_unbind(policy);
+	mutex_unlock(&stm->policy_mutex);
+
 	kfree(policy);
 }
 
@@ -320,10 +332,11 @@
 
 	/*
 	 * node must look like <device_name>.<policy_name>, where
-	 * <device_name> is the name of an existing stm device and
-	 * <policy_name> is an arbitrary string
+	 * <device_name> is the name of an existing stm device; may
+	 *               contain dots;
+	 * <policy_name> is an arbitrary string; may not contain dots
 	 */
-	p = strchr(devname, '.');
+	p = strrchr(devname, '.');
 	if (!p) {
 		kfree(devname);
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 95ece02..4e8c6926 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -45,6 +45,7 @@
 	int			major;
 	unsigned int		sw_nmasters;
 	struct stm_data		*data;
+	struct mutex		link_mutex;
 	spinlock_t		link_lock;
 	struct list_head	link_list;
 	/* master allocation */
@@ -56,6 +57,7 @@
 	container_of((_d), struct stm_device, dev)
 
 struct stm_output {
+	spinlock_t		lock;
 	unsigned int		master;
 	unsigned int		channel;
 	unsigned int		nr_chans;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 833ea9d..b0d3ecf 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -143,7 +143,7 @@
 	select IIO_TRIGGERED_BUFFER
 	help
 	  Say yes here to build support for the following Freescale 3-axis
-	  accelerometers: MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
+	  accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called mma8452.
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index ccc632a..7f4994f 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1,6 +1,7 @@
 /*
  * mma8452.c - Support for following Freescale 3-axis accelerometers:
  *
+ * MMA8451Q (14 bit)
  * MMA8452Q (12 bit)
  * MMA8453Q (10 bit)
  * MMA8652FC (12 bit)
@@ -15,7 +16,7 @@
  *
  * 7-bit I2C slave address 0x1c/0x1d (pin selectable)
  *
- * TODO: orientation / freefall events, autosleep
+ * TODO: orientation events, autosleep
  */
 
 #include <linux/module.h>
@@ -85,8 +86,9 @@
 #define  MMA8452_INT_FF_MT			BIT(2)
 #define  MMA8452_INT_TRANS			BIT(5)
 
-#define  MMA8452_DEVICE_ID			0x2a
-#define  MMA8453_DEVICE_ID			0x3a
+#define MMA8451_DEVICE_ID			0x1a
+#define MMA8452_DEVICE_ID			0x2a
+#define MMA8453_DEVICE_ID			0x3a
 #define MMA8652_DEVICE_ID			0x4a
 #define MMA8653_DEVICE_ID			0x5a
 
@@ -416,6 +418,51 @@
 	return ret;
 }
 
+/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */
+static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
+{
+	int val;
+	const struct mma_chip_info *chip = data->chip_info;
+
+	val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+	if (val < 0)
+		return val;
+
+	return !(val & MMA8452_FF_MT_CFG_OAE);
+}
+
+static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
+{
+	int val;
+	const struct mma_chip_info *chip = data->chip_info;
+
+	if ((state && mma8452_freefall_mode_enabled(data)) ||
+	    (!state && !(mma8452_freefall_mode_enabled(data))))
+		return 0;
+
+	val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+	if (val < 0)
+		return val;
+
+	if (state) {
+		val |= BIT(idx_x + chip->ev_cfg_chan_shift);
+		val |= BIT(idx_y + chip->ev_cfg_chan_shift);
+		val |= BIT(idx_z + chip->ev_cfg_chan_shift);
+		val &= ~MMA8452_FF_MT_CFG_OAE;
+	} else {
+		val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
+		val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
+		val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+		val |= MMA8452_FF_MT_CFG_OAE;
+	}
+
+	val = mma8452_change_config(data, chip->ev_cfg, val);
+	if (val)
+		return val;
+
+	return 0;
+}
+
 static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
 					   int val, int val2)
 {
@@ -609,12 +656,22 @@
 	const struct mma_chip_info *chip = data->chip_info;
 	int ret;
 
-	ret = i2c_smbus_read_byte_data(data->client,
-				       data->chip_info->ev_cfg);
-	if (ret < 0)
-		return ret;
+	switch (dir) {
+	case IIO_EV_DIR_FALLING:
+		return mma8452_freefall_mode_enabled(data);
+	case IIO_EV_DIR_RISING:
+		if (mma8452_freefall_mode_enabled(data))
+			return 0;
 
-	return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+		ret = i2c_smbus_read_byte_data(data->client,
+					       data->chip_info->ev_cfg);
+		if (ret < 0)
+			return ret;
+
+		return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+	default:
+		return -EINVAL;
+	}
 }
 
 static int mma8452_write_event_config(struct iio_dev *indio_dev,
@@ -627,19 +684,35 @@
 	const struct mma_chip_info *chip = data->chip_info;
 	int val;
 
-	val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
-	if (val < 0)
-		return val;
+	switch (dir) {
+	case IIO_EV_DIR_FALLING:
+		return mma8452_set_freefall_mode(data, state);
+	case IIO_EV_DIR_RISING:
+		val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+		if (val < 0)
+			return val;
 
-	if (state)
-		val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
-	else
-		val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+		if (state) {
+			if (mma8452_freefall_mode_enabled(data)) {
+				val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
+				val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
+				val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+				val |= MMA8452_FF_MT_CFG_OAE;
+			}
+			val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+		} else {
+			if (mma8452_freefall_mode_enabled(data))
+				return 0;
 
-	val |= chip->ev_cfg_ele;
-	val |= MMA8452_FF_MT_CFG_OAE;
+			val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+		}
 
-	return mma8452_change_config(data, chip->ev_cfg, val);
+		val |= chip->ev_cfg_ele;
+
+		return mma8452_change_config(data, chip->ev_cfg, val);
+	default:
+		return -EINVAL;
+	}
 }
 
 static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
@@ -652,6 +725,16 @@
 	if (src < 0)
 		return;
 
+	if (mma8452_freefall_mode_enabled(data)) {
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+						  IIO_MOD_X_AND_Y_AND_Z,
+						  IIO_EV_TYPE_MAG,
+						  IIO_EV_DIR_FALLING),
+			       ts);
+		return;
+	}
+
 	if (src & data->chip_info->ev_src_xe)
 		iio_push_event(indio_dev,
 			       IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
@@ -745,6 +828,27 @@
 	return 0;
 }
 
+static const struct iio_event_spec mma8452_freefall_event[] = {
+	{
+		.type = IIO_EV_TYPE_MAG,
+		.dir = IIO_EV_DIR_FALLING,
+		.mask_separate = BIT(IIO_EV_INFO_ENABLE),
+		.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+					BIT(IIO_EV_INFO_PERIOD) |
+					BIT(IIO_EV_INFO_HIGH_PASS_FILTER_3DB)
+	},
+};
+
+static const struct iio_event_spec mma8652_freefall_event[] = {
+	{
+		.type = IIO_EV_TYPE_MAG,
+		.dir = IIO_EV_DIR_FALLING,
+		.mask_separate = BIT(IIO_EV_INFO_ENABLE),
+		.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+					BIT(IIO_EV_INFO_PERIOD)
+	},
+};
+
 static const struct iio_event_spec mma8452_transient_event[] = {
 	{
 		.type = IIO_EV_TYPE_MAG,
@@ -781,6 +885,24 @@
 	.attrs = mma8452_event_attributes,
 };
 
+#define MMA8452_FREEFALL_CHANNEL(modifier) { \
+	.type = IIO_ACCEL, \
+	.modified = 1, \
+	.channel2 = modifier, \
+	.scan_index = -1, \
+	.event_spec = mma8452_freefall_event, \
+	.num_event_specs = ARRAY_SIZE(mma8452_freefall_event), \
+}
+
+#define MMA8652_FREEFALL_CHANNEL(modifier) { \
+	.type = IIO_ACCEL, \
+	.modified = 1, \
+	.channel2 = modifier, \
+	.scan_index = -1, \
+	.event_spec = mma8652_freefall_event, \
+	.num_event_specs = ARRAY_SIZE(mma8652_freefall_event), \
+}
+
 #define MMA8452_CHANNEL(axis, idx, bits) { \
 	.type = IIO_ACCEL, \
 	.modified = 1, \
@@ -822,11 +944,20 @@
 	.num_event_specs = ARRAY_SIZE(mma8452_motion_event), \
 }
 
+static const struct iio_chan_spec mma8451_channels[] = {
+	MMA8452_CHANNEL(X, idx_x, 14),
+	MMA8452_CHANNEL(Y, idx_y, 14),
+	MMA8452_CHANNEL(Z, idx_z, 14),
+	IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+	MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
+};
+
 static const struct iio_chan_spec mma8452_channels[] = {
 	MMA8452_CHANNEL(X, idx_x, 12),
 	MMA8452_CHANNEL(Y, idx_y, 12),
 	MMA8452_CHANNEL(Z, idx_z, 12),
 	IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+	MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
 };
 
 static const struct iio_chan_spec mma8453_channels[] = {
@@ -834,6 +965,7 @@
 	MMA8452_CHANNEL(Y, idx_y, 10),
 	MMA8452_CHANNEL(Z, idx_z, 10),
 	IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+	MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
 };
 
 static const struct iio_chan_spec mma8652_channels[] = {
@@ -841,6 +973,7 @@
 	MMA8652_CHANNEL(Y, idx_y, 12),
 	MMA8652_CHANNEL(Z, idx_z, 12),
 	IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+	MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
 };
 
 static const struct iio_chan_spec mma8653_channels[] = {
@@ -848,9 +981,11 @@
 	MMA8652_CHANNEL(Y, idx_y, 10),
 	MMA8652_CHANNEL(Z, idx_z, 10),
 	IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+	MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
 };
 
 enum {
+	mma8451,
 	mma8452,
 	mma8453,
 	mma8652,
@@ -858,17 +993,34 @@
 };
 
 static const struct mma_chip_info mma_chip_info_table[] = {
+	[mma8451] = {
+		.chip_id = MMA8451_DEVICE_ID,
+		.channels = mma8451_channels,
+		.num_channels = ARRAY_SIZE(mma8451_channels),
+		/*
+		 * Hardware has fullscale of -2G, -4G, -8G corresponding to
+		 * raw value -8192 for 14 bit, -2048 for 12 bit or -512 for 10
+		 * bit.
+		 * The userspace interface uses m/s^2 and we declare micro units
+		 * So scale factor for 12 bit here is given by:
+		 * 	g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+		 */
+		.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
+		.ev_cfg = MMA8452_TRANSIENT_CFG,
+		.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+		.ev_cfg_chan_shift = 1,
+		.ev_src = MMA8452_TRANSIENT_SRC,
+		.ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
+		.ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
+		.ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
+		.ev_ths = MMA8452_TRANSIENT_THS,
+		.ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+		.ev_count = MMA8452_TRANSIENT_COUNT,
+	},
 	[mma8452] = {
 		.chip_id = MMA8452_DEVICE_ID,
 		.channels = mma8452_channels,
 		.num_channels = ARRAY_SIZE(mma8452_channels),
-		/*
-		 * Hardware has fullscale of -2G, -4G, -8G corresponding to
-		 * raw value -2048 for 12 bit or -512 for 10 bit.
-		 * The userspace interface uses m/s^2 and we declare micro units
-		 * So scale factor for 12 bit here is given by:
-		 *	g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
-		 */
 		.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
 		.ev_cfg = MMA8452_TRANSIENT_CFG,
 		.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
@@ -1049,6 +1201,7 @@
 }
 
 static const struct of_device_id mma8452_dt_ids[] = {
+	{ .compatible = "fsl,mma8451", .data = &mma_chip_info_table[mma8451] },
 	{ .compatible = "fsl,mma8452", .data = &mma_chip_info_table[mma8452] },
 	{ .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
 	{ .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
@@ -1085,6 +1238,7 @@
 		return ret;
 
 	switch (ret) {
+	case MMA8451_DEVICE_ID:
 	case MMA8452_DEVICE_ID:
 	case MMA8453_DEVICE_ID:
 	case MMA8652_DEVICE_ID:
@@ -1190,6 +1344,10 @@
 	if (ret < 0)
 		goto buffer_cleanup;
 
+	ret = mma8452_set_freefall_mode(data, false);
+	if (ret)
+		return ret;
+
 	return 0;
 
 buffer_cleanup:
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 70f0427..a03a141 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -67,6 +67,8 @@
 #define ST_ACCEL_1_DRDY_IRQ_ADDR		0x22
 #define ST_ACCEL_1_DRDY_IRQ_INT1_MASK		0x10
 #define ST_ACCEL_1_DRDY_IRQ_INT2_MASK		0x08
+#define ST_ACCEL_1_IHL_IRQ_ADDR			0x25
+#define ST_ACCEL_1_IHL_IRQ_MASK			0x02
 #define ST_ACCEL_1_MULTIREAD_BIT		true
 
 /* CUSTOM VALUES FOR SENSOR 2 */
@@ -92,6 +94,8 @@
 #define ST_ACCEL_2_DRDY_IRQ_ADDR		0x22
 #define ST_ACCEL_2_DRDY_IRQ_INT1_MASK		0x02
 #define ST_ACCEL_2_DRDY_IRQ_INT2_MASK		0x10
+#define ST_ACCEL_2_IHL_IRQ_ADDR			0x22
+#define ST_ACCEL_2_IHL_IRQ_MASK			0x80
 #define ST_ACCEL_2_MULTIREAD_BIT		true
 
 /* CUSTOM VALUES FOR SENSOR 3 */
@@ -125,6 +129,8 @@
 #define ST_ACCEL_3_DRDY_IRQ_ADDR		0x23
 #define ST_ACCEL_3_DRDY_IRQ_INT1_MASK		0x80
 #define ST_ACCEL_3_DRDY_IRQ_INT2_MASK		0x00
+#define ST_ACCEL_3_IHL_IRQ_ADDR			0x23
+#define ST_ACCEL_3_IHL_IRQ_MASK			0x40
 #define ST_ACCEL_3_IG1_EN_ADDR			0x23
 #define ST_ACCEL_3_IG1_EN_MASK			0x08
 #define ST_ACCEL_3_MULTIREAD_BIT		false
@@ -169,6 +175,8 @@
 #define ST_ACCEL_5_DRDY_IRQ_ADDR		0x22
 #define ST_ACCEL_5_DRDY_IRQ_INT1_MASK		0x04
 #define ST_ACCEL_5_DRDY_IRQ_INT2_MASK		0x20
+#define ST_ACCEL_5_IHL_IRQ_ADDR			0x22
+#define ST_ACCEL_5_IHL_IRQ_MASK			0x80
 #define ST_ACCEL_5_IG1_EN_ADDR			0x21
 #define ST_ACCEL_5_IG1_EN_MASK			0x08
 #define ST_ACCEL_5_MULTIREAD_BIT		false
@@ -292,6 +300,8 @@
 			.addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
 			.mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
+			.addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
+			.mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
 		},
 		.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
 		.bootime = 2,
@@ -355,6 +365,8 @@
 			.addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
 			.mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
+			.addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
+			.mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
 		},
 		.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
 		.bootime = 2,
@@ -430,6 +442,8 @@
 			.addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
 			.mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
+			.addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
+			.mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
 			.ig1 = {
 				.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
 				.en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -537,6 +551,8 @@
 			.addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
 			.mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
+			.addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
+			.mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
 		},
 		.multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
 		.bootime = 2, /* guess */
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 283ded7..932de1f 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -131,6 +131,16 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called at91_adc.
 
+config AT91_SAMA5D2_ADC
+	tristate "Atmel AT91 SAMA5D2 ADC"
+	depends on ARCH_AT91 || COMPILE_TEST
+	help
+	  Say yes here to build support for Atmel SAMA5D2 ADC which is
+	  available on SAMA5D2 SoC family.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called at91-sama5d2_adc.
+
 config AXP288_ADC
 	tristate "X-Powers AXP288 ADC driver"
 	depends on MFD_AXP20X
@@ -267,11 +277,11 @@
 	  called mcp320x.
 
 config MCP3422
-	tristate "Microchip Technology MCP3422/3/4/6/7/8 driver"
+	tristate "Microchip Technology MCP3421/2/3/4/5/6/7/8 driver"
 	depends on I2C
 	help
-	  Say yes here to build support for Microchip Technology's
-	  MCP3422, MCP3423, MCP3424, MCP3426, MCP3427 or MCP3428
+	  Say yes here to build support for Microchip Technology's MCP3421
+	  MCP3422, MCP3423, MCP3424, MCP3425, MCP3426, MCP3427 or MCP3428
 	  analog to digital converters.
 
 	  This driver can also be built as a module. If so, the module will be
@@ -287,6 +297,20 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called men_z188_adc.
 
+config MXS_LRADC
+        tristate "Freescale i.MX23/i.MX28 LRADC"
+        depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM
+        depends on INPUT
+        select STMP_DEVICE
+        select IIO_BUFFER
+        select IIO_TRIGGERED_BUFFER
+        help
+          Say yes here to build support for i.MX23/i.MX28 LRADC convertor
+          built into these chips.
+
+          To compile this driver as a module, choose M here: the
+          module will be called mxs-lradc.
+
 config NAU7802
 	tristate "Nuvoton NAU7802 ADC driver"
 	depends on I2C
@@ -352,6 +376,16 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called ti-adc081c.
 
+config TI_ADC0832
+	tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
+	depends on SPI
+	help
+	  If you say yes here you get support for Texas Instruments ADC0831,
+	  ADC0832, ADC0834, ADC0838 ADC chips.
+
+	  This driver can also be built as a module. If so, the module will be
+	  called ti-adc0832.
+
 config TI_ADC128S052
 	tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021"
 	depends on SPI
@@ -362,6 +396,19 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called ti-adc128s052.
 
+config TI_ADS1015
+	tristate "Texas Instruments ADS1015 ADC"
+	depends on I2C && !SENSORS_ADS1015
+	select REGMAP_I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  If you say yes here you get support for Texas Instruments ADS1015
+	  ADC chip.
+
+	  This driver can also be built as a module. If so, the module will be
+	  called ti-ads1015.
+
 config TI_ADS8688
 	tristate "Texas Instruments ADS8688"
 	depends on SPI && OF
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 6435780..b1aa456 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_AD7887) += ad7887.o
 obj-$(CONFIG_AD799X) += ad799x.o
 obj-$(CONFIG_AT91_ADC) += at91_adc.o
+obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
 obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
 obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
 obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
@@ -28,13 +29,16 @@
 obj-$(CONFIG_MCP320X) += mcp320x.o
 obj-$(CONFIG_MCP3422) += mcp3422.o
 obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
+obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
 obj-$(CONFIG_NAU7802) += nau7802.o
 obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
 obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
 obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
 obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
 obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
+obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
 obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
 obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
 obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
new file mode 100644
index 0000000..dbee13a
--- /dev/null
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -0,0 +1,508 @@
+/*
+ * Atmel ADC driver for SAMA5D2 devices and compatible.
+ *
+ * Copyright (C) 2015 Atmel,
+ *               2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+/* Control Register */
+#define AT91_SAMA5D2_CR		0x00
+/* Software Reset */
+#define	AT91_SAMA5D2_CR_SWRST		BIT(0)
+/* Start Conversion */
+#define	AT91_SAMA5D2_CR_START		BIT(1)
+/* Touchscreen Calibration */
+#define	AT91_SAMA5D2_CR_TSCALIB		BIT(2)
+/* Comparison Restart */
+#define	AT91_SAMA5D2_CR_CMPRST		BIT(4)
+
+/* Mode Register */
+#define AT91_SAMA5D2_MR		0x04
+/* Trigger Selection */
+#define	AT91_SAMA5D2_MR_TRGSEL(v)	((v) << 1)
+/* ADTRG */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG0	0
+/* TIOA0 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG1	1
+/* TIOA1 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG2	2
+/* TIOA2 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG3	3
+/* PWM event line 0 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG4	4
+/* PWM event line 1 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG5	5
+/* TIOA3 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG6	6
+/* RTCOUT0 */
+#define	AT91_SAMA5D2_MR_TRGSEL_TRIG7	7
+/* Sleep Mode */
+#define	AT91_SAMA5D2_MR_SLEEP		BIT(5)
+/* Fast Wake Up */
+#define	AT91_SAMA5D2_MR_FWUP		BIT(6)
+/* Prescaler Rate Selection */
+#define	AT91_SAMA5D2_MR_PRESCAL(v)	((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
+#define	AT91_SAMA5D2_MR_PRESCAL_OFFSET	8
+#define	AT91_SAMA5D2_MR_PRESCAL_MAX	0xff
+/* Startup Time */
+#define	AT91_SAMA5D2_MR_STARTUP(v)	((v) << 16)
+/* Analog Change */
+#define	AT91_SAMA5D2_MR_ANACH		BIT(23)
+/* Tracking Time */
+#define	AT91_SAMA5D2_MR_TRACKTIM(v)	((v) << 24)
+#define	AT91_SAMA5D2_MR_TRACKTIM_MAX	0xff
+/* Transfer Time */
+#define	AT91_SAMA5D2_MR_TRANSFER(v)	((v) << 28)
+#define	AT91_SAMA5D2_MR_TRANSFER_MAX	0x3
+/* Use Sequence Enable */
+#define	AT91_SAMA5D2_MR_USEQ		BIT(31)
+
+/* Channel Sequence Register 1 */
+#define AT91_SAMA5D2_SEQR1	0x08
+/* Channel Sequence Register 2 */
+#define AT91_SAMA5D2_SEQR2	0x0c
+/* Channel Enable Register */
+#define AT91_SAMA5D2_CHER	0x10
+/* Channel Disable Register */
+#define AT91_SAMA5D2_CHDR	0x14
+/* Channel Status Register */
+#define AT91_SAMA5D2_CHSR	0x18
+/* Last Converted Data Register */
+#define AT91_SAMA5D2_LCDR	0x20
+/* Interrupt Enable Register */
+#define AT91_SAMA5D2_IER		0x24
+/* Interrupt Disable Register */
+#define AT91_SAMA5D2_IDR		0x28
+/* Interrupt Mask Register */
+#define AT91_SAMA5D2_IMR		0x2c
+/* Interrupt Status Register */
+#define AT91_SAMA5D2_ISR		0x30
+/* Last Channel Trigger Mode Register */
+#define AT91_SAMA5D2_LCTMR	0x34
+/* Last Channel Compare Window Register */
+#define AT91_SAMA5D2_LCCWR	0x38
+/* Overrun Status Register */
+#define AT91_SAMA5D2_OVER	0x3c
+/* Extended Mode Register */
+#define AT91_SAMA5D2_EMR		0x40
+/* Compare Window Register */
+#define AT91_SAMA5D2_CWR		0x44
+/* Channel Gain Register */
+#define AT91_SAMA5D2_CGR		0x48
+/* Channel Offset Register */
+#define AT91_SAMA5D2_COR		0x4c
+/* Channel Data Register 0 */
+#define AT91_SAMA5D2_CDR0	0x50
+/* Analog Control Register */
+#define AT91_SAMA5D2_ACR		0x94
+/* Touchscreen Mode Register */
+#define AT91_SAMA5D2_TSMR	0xb0
+/* Touchscreen X Position Register */
+#define AT91_SAMA5D2_XPOSR	0xb4
+/* Touchscreen Y Position Register */
+#define AT91_SAMA5D2_YPOSR	0xb8
+/* Touchscreen Pressure Register */
+#define AT91_SAMA5D2_PRESSR	0xbc
+/* Trigger Register */
+#define AT91_SAMA5D2_TRGR	0xc0
+/* Correction Select Register */
+#define AT91_SAMA5D2_COSR	0xd0
+/* Correction Value Register */
+#define AT91_SAMA5D2_CVR		0xd4
+/* Channel Error Correction Register */
+#define AT91_SAMA5D2_CECR	0xd8
+/* Write Protection Mode Register */
+#define AT91_SAMA5D2_WPMR	0xe4
+/* Write Protection Status Register */
+#define AT91_SAMA5D2_WPSR	0xe8
+/* Version Register */
+#define AT91_SAMA5D2_VERSION	0xfc
+
+#define AT91_AT91_SAMA5D2_CHAN(num, addr)				\
+	{								\
+		.type = IIO_VOLTAGE,					\
+		.channel = num,						\
+		.address = addr,					\
+		.scan_type = {						\
+			.sign = 'u',					\
+			.realbits = 12,					\
+		},							\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+		.datasheet_name = "CH"#num,				\
+		.indexed = 1,						\
+	}
+
+#define at91_adc_readl(st, reg)		readl_relaxed(st->base + reg)
+#define at91_adc_writel(st, reg, val)	writel_relaxed(val, st->base + reg)
+
+struct at91_adc_soc_info {
+	unsigned			startup_time;
+	unsigned			min_sample_rate;
+	unsigned			max_sample_rate;
+};
+
+struct at91_adc_state {
+	void __iomem			*base;
+	int				irq;
+	struct clk			*per_clk;
+	struct regulator		*reg;
+	struct regulator		*vref;
+	int				vref_uv;
+	const struct iio_chan_spec	*chan;
+	bool				conversion_done;
+	u32				conversion_value;
+	struct at91_adc_soc_info	soc_info;
+	wait_queue_head_t		wq_data_available;
+	/*
+	 * lock to prevent concurrent 'single conversion' requests through
+	 * sysfs.
+	 */
+	struct mutex			lock;
+};
+
+static const struct iio_chan_spec at91_adc_channels[] = {
+	AT91_AT91_SAMA5D2_CHAN(0, 0x50),
+	AT91_AT91_SAMA5D2_CHAN(1, 0x54),
+	AT91_AT91_SAMA5D2_CHAN(2, 0x58),
+	AT91_AT91_SAMA5D2_CHAN(3, 0x5c),
+	AT91_AT91_SAMA5D2_CHAN(4, 0x60),
+	AT91_AT91_SAMA5D2_CHAN(5, 0x64),
+	AT91_AT91_SAMA5D2_CHAN(6, 0x68),
+	AT91_AT91_SAMA5D2_CHAN(7, 0x6c),
+	AT91_AT91_SAMA5D2_CHAN(8, 0x70),
+	AT91_AT91_SAMA5D2_CHAN(9, 0x74),
+	AT91_AT91_SAMA5D2_CHAN(10, 0x78),
+	AT91_AT91_SAMA5D2_CHAN(11, 0x7c),
+};
+
+static unsigned at91_adc_startup_time(unsigned startup_time_min,
+				      unsigned adc_clk_khz)
+{
+	const unsigned startup_lookup[] = {
+		  0,   8,  16,  24,
+		 64,  80,  96, 112,
+		512, 576, 640, 704,
+		768, 832, 896, 960
+		};
+	unsigned ticks_min, i;
+
+	/*
+	 * Since the adc frequency is checked before, there is no reason
+	 * to not meet the startup time constraint.
+	 */
+
+	ticks_min = startup_time_min * adc_clk_khz / 1000;
+	for (i = 0; i < ARRAY_SIZE(startup_lookup); i++)
+		if (startup_lookup[i] > ticks_min)
+			break;
+
+	return i;
+}
+
+static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	unsigned f_per, prescal, startup;
+
+	f_per = clk_get_rate(st->per_clk);
+	prescal = (f_per / (2 * freq)) - 1;
+
+	startup = at91_adc_startup_time(st->soc_info.startup_time,
+					freq / 1000);
+
+	at91_adc_writel(st, AT91_SAMA5D2_MR,
+			AT91_SAMA5D2_MR_TRANSFER(2)
+			| AT91_SAMA5D2_MR_STARTUP(startup)
+			| AT91_SAMA5D2_MR_PRESCAL(prescal));
+
+	dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
+		freq, startup, prescal);
+}
+
+static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
+{
+	unsigned f_adc, f_per = clk_get_rate(st->per_clk);
+	unsigned mr, prescal;
+
+	mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
+	prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET)
+		  & AT91_SAMA5D2_MR_PRESCAL_MAX;
+	f_adc = f_per / (2 * (prescal + 1));
+
+	return f_adc;
+}
+
+static irqreturn_t at91_adc_interrupt(int irq, void *private)
+{
+	struct iio_dev *indio = private;
+	struct at91_adc_state *st = iio_priv(indio);
+	u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
+	u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
+
+	if (status & imr) {
+		st->conversion_value = at91_adc_readl(st, st->chan->address);
+		st->conversion_done = true;
+		wake_up_interruptible(&st->wq_data_available);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int at91_adc_read_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int *val, int *val2, long mask)
+{
+	struct at91_adc_state *st = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&st->lock);
+
+		st->chan = chan;
+
+		at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+		at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
+		at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
+		ret = wait_event_interruptible_timeout(st->wq_data_available,
+						       st->conversion_done,
+						       msecs_to_jiffies(1000));
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+
+		if (ret > 0) {
+			*val = st->conversion_value;
+			ret = IIO_VAL_INT;
+			st->conversion_done = false;
+		}
+
+		at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
+		at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
+		mutex_unlock(&st->lock);
+		return ret;
+
+	case IIO_CHAN_INFO_SCALE:
+		*val = st->vref_uv / 1000;
+		*val2 = chan->scan_type.realbits;
+		return IIO_VAL_FRACTIONAL_LOG2;
+
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = at91_adc_get_sample_freq(st);
+		return IIO_VAL_INT;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int at91_adc_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int val, int val2, long mask)
+{
+	struct at91_adc_state *st = iio_priv(indio_dev);
+
+	if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+		return -EINVAL;
+
+	if (val < st->soc_info.min_sample_rate ||
+	    val > st->soc_info.max_sample_rate)
+		return -EINVAL;
+
+	at91_adc_setup_samp_freq(st, val);
+
+	return 0;
+}
+
+static const struct iio_info at91_adc_info = {
+	.read_raw = &at91_adc_read_raw,
+	.write_raw = &at91_adc_write_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static int at91_adc_probe(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev;
+	struct at91_adc_state *st;
+	struct resource	*res;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	indio_dev->dev.parent = &pdev->dev;
+	indio_dev->name = dev_name(&pdev->dev);
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &at91_adc_info;
+	indio_dev->channels = at91_adc_channels;
+	indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
+
+	st = iio_priv(indio_dev);
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "atmel,min-sample-rate-hz",
+				   &st->soc_info.min_sample_rate);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"invalid or missing value for atmel,min-sample-rate-hz\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "atmel,max-sample-rate-hz",
+				   &st->soc_info.max_sample_rate);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"invalid or missing value for atmel,max-sample-rate-hz\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms",
+				   &st->soc_info.startup_time);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"invalid or missing value for atmel,startup-time-ms\n");
+		return ret;
+	}
+
+	init_waitqueue_head(&st->wq_data_available);
+	mutex_init(&st->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	st->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(st->base))
+		return PTR_ERR(st->base);
+
+	st->irq = platform_get_irq(pdev, 0);
+	if (st->irq <= 0) {
+		if (!st->irq)
+			st->irq = -ENXIO;
+
+		return st->irq;
+	}
+
+	st->per_clk = devm_clk_get(&pdev->dev, "adc_clk");
+	if (IS_ERR(st->per_clk))
+		return PTR_ERR(st->per_clk);
+
+	st->reg = devm_regulator_get(&pdev->dev, "vddana");
+	if (IS_ERR(st->reg))
+		return PTR_ERR(st->reg);
+
+	st->vref = devm_regulator_get(&pdev->dev, "vref");
+	if (IS_ERR(st->vref))
+		return PTR_ERR(st->vref);
+
+	ret = devm_request_irq(&pdev->dev, st->irq, at91_adc_interrupt, 0,
+			       pdev->dev.driver->name, indio_dev);
+	if (ret)
+		return ret;
+
+	ret = regulator_enable(st->reg);
+	if (ret)
+		return ret;
+
+	ret = regulator_enable(st->vref);
+	if (ret)
+		goto reg_disable;
+
+	st->vref_uv = regulator_get_voltage(st->vref);
+	if (st->vref_uv <= 0) {
+		ret = -EINVAL;
+		goto vref_disable;
+	}
+
+	at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
+	at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+
+	at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+
+	ret = clk_prepare_enable(st->per_clk);
+	if (ret)
+		goto vref_disable;
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0)
+		goto per_clk_disable_unprepare;
+
+	dev_info(&pdev->dev, "version: %x\n",
+		 readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
+
+	return 0;
+
+per_clk_disable_unprepare:
+	clk_disable_unprepare(st->per_clk);
+vref_disable:
+	regulator_disable(st->vref);
+reg_disable:
+	regulator_disable(st->reg);
+	return ret;
+}
+
+static int at91_adc_remove(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct at91_adc_state *st = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	clk_disable_unprepare(st->per_clk);
+
+	regulator_disable(st->vref);
+	regulator_disable(st->reg);
+
+	return 0;
+}
+
+static const struct of_device_id at91_adc_dt_match[] = {
+	{
+		.compatible = "atmel,sama5d2-adc",
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, at91_adc_dt_match);
+
+static struct platform_driver at91_adc_driver = {
+	.probe = at91_adc_probe,
+	.remove = at91_adc_remove,
+	.driver = {
+		.name = "at91-sama5d2_adc",
+		.of_match_table = at91_adc_dt_match,
+	},
+};
+module_platform_driver(at91_adc_driver)
+
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_DESCRIPTION("Atmel AT91 SAMA5D2 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 0c904ed..7fd2494 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -46,7 +46,7 @@
 	struct regmap *regmap;
 };
 
-static const struct iio_chan_spec const axp288_adc_channels[] = {
+static const struct iio_chan_spec axp288_adc_channels[] = {
 	{
 		.indexed = 1,
 		.type = IIO_TEMP,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index d1c05f6..a850ca7 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -187,26 +187,27 @@
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
 	}
 
-#define MCP320X_VOLTAGE_CHANNEL_DIFF(num)			\
+#define MCP320X_VOLTAGE_CHANNEL_DIFF(chan1, chan2)		\
 	{							\
 		.type = IIO_VOLTAGE,				\
 		.indexed = 1,					\
-		.channel = (num * 2),				\
-		.channel2 = (num * 2 + 1),			\
-		.address = (num * 2),				\
+		.channel = (chan1),				\
+		.channel2 = (chan2),				\
+		.address = (chan1),				\
 		.differential = 1,				\
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
 	}
 
 static const struct iio_chan_spec mcp3201_channels[] = {
-	MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
 };
 
 static const struct iio_chan_spec mcp3202_channels[] = {
 	MCP320X_VOLTAGE_CHANNEL(0),
 	MCP320X_VOLTAGE_CHANNEL(1),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
 };
 
 static const struct iio_chan_spec mcp3204_channels[] = {
@@ -214,8 +215,10 @@
 	MCP320X_VOLTAGE_CHANNEL(1),
 	MCP320X_VOLTAGE_CHANNEL(2),
 	MCP320X_VOLTAGE_CHANNEL(3),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(0),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(1),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2),
 };
 
 static const struct iio_chan_spec mcp3208_channels[] = {
@@ -227,10 +230,14 @@
 	MCP320X_VOLTAGE_CHANNEL(5),
 	MCP320X_VOLTAGE_CHANNEL(6),
 	MCP320X_VOLTAGE_CHANNEL(7),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(0),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(1),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(2),
-	MCP320X_VOLTAGE_CHANNEL_DIFF(3),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(4, 5),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(5, 4),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(6, 7),
+	MCP320X_VOLTAGE_CHANNEL_DIFF(7, 6),
 };
 
 static const struct iio_info mcp320x_info = {
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 6eca7ae..ebad83e 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -1,11 +1,12 @@
 /*
- * mcp3422.c - driver for the Microchip mcp3422/3/4/6/7/8 chip family
+ * mcp3422.c - driver for the Microchip mcp3421/2/3/4/5/6/7/8 chip family
  *
  * Copyright (C) 2013, Angelo Compagnucci
  * Author: Angelo Compagnucci <angelo.compagnucci@gmail.com>
  *
  * Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf
  *            http://ww1.microchip.com/downloads/en/DeviceDoc/22226a.pdf
+ *            http://ww1.microchip.com/downloads/en/DeviceDoc/22072b.pdf
  *
  * This driver exports the value of analog input voltage to sysfs, the
  * voltage unit is nV.
@@ -357,6 +358,7 @@
 
 	switch (adc->id) {
 	case 1:
+	case 5:
 		indio_dev->channels = mcp3421_channels;
 		indio_dev->num_channels = ARRAY_SIZE(mcp3421_channels);
 		break;
@@ -395,6 +397,7 @@
 	{ "mcp3422", 2 },
 	{ "mcp3423", 3 },
 	{ "mcp3424", 4 },
+	{ "mcp3425", 5 },
 	{ "mcp3426", 6 },
 	{ "mcp3427", 7 },
 	{ "mcp3428", 8 },
@@ -421,5 +424,5 @@
 module_i2c_driver(mcp3422_driver);
 
 MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
-MODULE_DESCRIPTION("Microchip mcp3422/3/4/6/7/8 driver");
+MODULE_DESCRIPTION("Microchip mcp3421/2/3/4/5/6/7/8 driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
similarity index 99%
rename from drivers/staging/iio/adc/mxs-lradc.c
rename to drivers/iio/adc/mxs-lradc.c
index bb1f152..33051b8 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -443,7 +443,8 @@
 			  LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1),
 			  LRADC_CH(ch));
 
-	/* from the datasheet:
+	/*
+	 * from the datasheet:
 	 * "Software must clear this register in preparation for a
 	 * multi-cycle accumulation.
 	 */
@@ -504,7 +505,8 @@
 	mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1));
 	mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2));
 
-	/* from the datasheet:
+	/*
+	 * from the datasheet:
 	 * "Software must clear this register in preparation for a
 	 * multi-cycle accumulation.
 	 */
@@ -914,7 +916,8 @@
 
 	case IIO_CHAN_INFO_SCALE:
 		if (chan->type == IIO_TEMP) {
-			/* From the datasheet, we have to multiply by 1.012 and
+			/*
+			 * From the datasheet, we have to multiply by 1.012 and
 			 * divide by 4
 			 */
 			*val = 0;
@@ -929,7 +932,8 @@
 
 	case IIO_CHAN_INFO_OFFSET:
 		if (chan->type == IIO_TEMP) {
-			/* The calculated value from the ADC is in Kelvin, we
+			/*
+			 * The calculated value from the ADC is in Kelvin, we
 			 * want Celsius for hwmon so the offset is -273.15
 			 * The offset is applied before scaling so it is
 			 * actually -213.15 * 4 / 1.012 = -1079.644268
@@ -1750,6 +1754,7 @@
 	iio_triggered_buffer_cleanup(iio);
 
 	clk_disable_unprepare(lradc->clk);
+
 	return 0;
 }
 
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
new file mode 100644
index 0000000..0afeac0
--- /dev/null
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -0,0 +1,288 @@
+/*
+ * ADC0831/ADC0832/ADC0834/ADC0838 8-bit ADC driver
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.ti.com/lit/ds/symlink/adc0832-n.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+enum {
+	adc0831,
+	adc0832,
+	adc0834,
+	adc0838,
+};
+
+struct adc0832 {
+	struct spi_device *spi;
+	struct regulator *reg;
+	struct mutex lock;
+	u8 mux_bits;
+
+	u8 tx_buf[2] ____cacheline_aligned;
+	u8 rx_buf[2];
+};
+
+#define ADC0832_VOLTAGE_CHANNEL(chan)					\
+	{								\
+		.type = IIO_VOLTAGE,					\
+		.indexed = 1,						\
+		.channel = chan,					\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE)	\
+	}
+
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2)			\
+	{								\
+		.type = IIO_VOLTAGE,					\
+		.indexed = 1,						\
+		.channel = (chan1),					\
+		.channel2 = (chan2),					\
+		.differential = 1,					\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE)	\
+	}
+
+static const struct iio_chan_spec adc0831_channels[] = {
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+};
+
+static const struct iio_chan_spec adc0832_channels[] = {
+	ADC0832_VOLTAGE_CHANNEL(0),
+	ADC0832_VOLTAGE_CHANNEL(1),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+};
+
+static const struct iio_chan_spec adc0834_channels[] = {
+	ADC0832_VOLTAGE_CHANNEL(0),
+	ADC0832_VOLTAGE_CHANNEL(1),
+	ADC0832_VOLTAGE_CHANNEL(2),
+	ADC0832_VOLTAGE_CHANNEL(3),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+};
+
+static const struct iio_chan_spec adc0838_channels[] = {
+	ADC0832_VOLTAGE_CHANNEL(0),
+	ADC0832_VOLTAGE_CHANNEL(1),
+	ADC0832_VOLTAGE_CHANNEL(2),
+	ADC0832_VOLTAGE_CHANNEL(3),
+	ADC0832_VOLTAGE_CHANNEL(4),
+	ADC0832_VOLTAGE_CHANNEL(5),
+	ADC0832_VOLTAGE_CHANNEL(6),
+	ADC0832_VOLTAGE_CHANNEL(7),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+};
+
+static int adc0831_adc_conversion(struct adc0832 *adc)
+{
+	struct spi_device *spi = adc->spi;
+	int ret;
+
+	ret = spi_read(spi, &adc->rx_buf, 2);
+	if (ret)
+		return ret;
+
+	/*
+	 * Skip TRI-STATE and a leading zero
+	 */
+	return (adc->rx_buf[0] << 2 & 0xff) | (adc->rx_buf[1] >> 6);
+}
+
+static int adc0832_adc_conversion(struct adc0832 *adc, int channel,
+				bool differential)
+{
+	struct spi_device *spi = adc->spi;
+	struct spi_transfer xfer = {
+		.tx_buf = adc->tx_buf,
+		.rx_buf = adc->rx_buf,
+		.len = 2,
+	};
+	int ret;
+
+	if (!adc->mux_bits)
+		return adc0831_adc_conversion(adc);
+
+	/* start bit */
+	adc->tx_buf[0] = 1 << (adc->mux_bits + 1);
+	/* single-ended or differential */
+	adc->tx_buf[0] |= differential ? 0 : (1 << adc->mux_bits);
+	/* odd / sign */
+	adc->tx_buf[0] |= (channel % 2) << (adc->mux_bits - 1);
+	/* select */
+	if (adc->mux_bits > 1)
+		adc->tx_buf[0] |= channel / 2;
+
+	/* align Data output BIT7 (MSB) to 8-bit boundary */
+	adc->tx_buf[0] <<= 1;
+
+	ret = spi_sync_transfer(spi, &xfer, 1);
+	if (ret)
+		return ret;
+
+	return adc->rx_buf[1];
+}
+
+static int adc0832_read_raw(struct iio_dev *iio,
+			struct iio_chan_spec const *channel, int *value,
+			int *shift, long mask)
+{
+	struct adc0832 *adc = iio_priv(iio);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&adc->lock);
+		*value = adc0832_adc_conversion(adc, channel->channel,
+						channel->differential);
+		mutex_unlock(&adc->lock);
+		if (*value < 0)
+			return *value;
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*value = regulator_get_voltage(adc->reg);
+		if (*value < 0)
+			return *value;
+
+		/* convert regulator output voltage to mV */
+		*value /= 1000;
+		*shift = 8;
+
+		return IIO_VAL_FRACTIONAL_LOG2;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info adc0832_info = {
+	.read_raw = adc0832_read_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static int adc0832_probe(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev;
+	struct adc0832 *adc;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	adc = iio_priv(indio_dev);
+	adc->spi = spi;
+	mutex_init(&adc->lock);
+
+	indio_dev->name = spi_get_device_id(spi)->name;
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->info = &adc0832_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	switch (spi_get_device_id(spi)->driver_data) {
+	case adc0831:
+		adc->mux_bits = 0;
+		indio_dev->channels = adc0831_channels;
+		indio_dev->num_channels = ARRAY_SIZE(adc0831_channels);
+		break;
+	case adc0832:
+		adc->mux_bits = 1;
+		indio_dev->channels = adc0832_channels;
+		indio_dev->num_channels = ARRAY_SIZE(adc0832_channels);
+		break;
+	case adc0834:
+		adc->mux_bits = 2;
+		indio_dev->channels = adc0834_channels;
+		indio_dev->num_channels = ARRAY_SIZE(adc0834_channels);
+		break;
+	case adc0838:
+		adc->mux_bits = 3;
+		indio_dev->channels = adc0838_channels;
+		indio_dev->num_channels = ARRAY_SIZE(adc0838_channels);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	adc->reg = devm_regulator_get(&spi->dev, "vref");
+	if (IS_ERR(adc->reg))
+		return PTR_ERR(adc->reg);
+
+	ret = regulator_enable(adc->reg);
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, indio_dev);
+
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		regulator_disable(adc->reg);
+
+	return ret;
+}
+
+static int adc0832_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct adc0832 *adc = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	regulator_disable(adc->reg);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id adc0832_dt_ids[] = {
+	{ .compatible = "ti,adc0831", },
+	{ .compatible = "ti,adc0832", },
+	{ .compatible = "ti,adc0834", },
+	{ .compatible = "ti,adc0838", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, adc0832_dt_ids);
+
+#endif
+
+static const struct spi_device_id adc0832_id[] = {
+	{ "adc0831", adc0831 },
+	{ "adc0832", adc0832 },
+	{ "adc0834", adc0834 },
+	{ "adc0838", adc0838 },
+	{}
+};
+MODULE_DEVICE_TABLE(spi, adc0832_id);
+
+static struct spi_driver adc0832_driver = {
+	.driver = {
+		.name = "adc0832",
+		.of_match_table = of_match_ptr(adc0832_dt_ids),
+	},
+	.probe = adc0832_probe,
+	.remove = adc0832_remove,
+	.id_table = adc0832_id,
+};
+module_spi_driver(adc0832_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("ADC0831/ADC0832/ADC0834/ADC0838 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
new file mode 100644
index 0000000..73cbf0b
--- /dev/null
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -0,0 +1,612 @@
+/*
+ * ADS1015 - Texas Instruments Analog-to-Digital Converter
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for ADS1015 ADC 7-bit I2C slave address:
+ *	* 0x48 - ADDR connected to Ground
+ *	* 0x49 - ADDR connected to Vdd
+ *	* 0x4A - ADDR connected to SDA
+ *	* 0x4B - ADDR connected to SCL
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include <linux/i2c/ads1015.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define ADS1015_DRV_NAME "ads1015"
+
+#define ADS1015_CONV_REG	0x00
+#define ADS1015_CFG_REG		0x01
+
+#define ADS1015_CFG_DR_SHIFT	5
+#define ADS1015_CFG_MOD_SHIFT	8
+#define ADS1015_CFG_PGA_SHIFT	9
+#define ADS1015_CFG_MUX_SHIFT	12
+
+#define ADS1015_CFG_DR_MASK	GENMASK(7, 5)
+#define ADS1015_CFG_MOD_MASK	BIT(8)
+#define ADS1015_CFG_PGA_MASK	GENMASK(11, 9)
+#define ADS1015_CFG_MUX_MASK	GENMASK(14, 12)
+
+/* device operating modes */
+#define ADS1015_CONTINUOUS	0
+#define ADS1015_SINGLESHOT	1
+
+#define ADS1015_SLEEP_DELAY_MS		2000
+#define ADS1015_DEFAULT_PGA		2
+#define ADS1015_DEFAULT_DATA_RATE	4
+#define ADS1015_DEFAULT_CHAN		0
+
+enum ads1015_channels {
+	ADS1015_AIN0_AIN1 = 0,
+	ADS1015_AIN0_AIN3,
+	ADS1015_AIN1_AIN3,
+	ADS1015_AIN2_AIN3,
+	ADS1015_AIN0,
+	ADS1015_AIN1,
+	ADS1015_AIN2,
+	ADS1015_AIN3,
+	ADS1015_TIMESTAMP,
+};
+
+static const unsigned int ads1015_data_rate[] = {
+	128, 250, 490, 920, 1600, 2400, 3300, 3300
+};
+
+static const struct {
+	int scale;
+	int uscale;
+} ads1015_scale[] = {
+	{3, 0},
+	{2, 0},
+	{1, 0},
+	{0, 500000},
+	{0, 250000},
+	{0, 125000},
+	{0, 125000},
+	{0, 125000},
+};
+
+#define ADS1015_V_CHAN(_chan, _addr) {				\
+	.type = IIO_VOLTAGE,					\
+	.indexed = 1,						\
+	.address = _addr,					\
+	.channel = _chan,					\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |		\
+				BIT(IIO_CHAN_INFO_SCALE) |	\
+				BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
+	.scan_index = _addr,					\
+	.scan_type = {						\
+		.sign = 's',					\
+		.realbits = 12,					\
+		.storagebits = 16,				\
+		.shift = 4,					\
+		.endianness = IIO_CPU,				\
+	},							\
+}
+
+#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) {		\
+	.type = IIO_VOLTAGE,					\
+	.differential = 1,					\
+	.indexed = 1,						\
+	.address = _addr,					\
+	.channel = _chan,					\
+	.channel2 = _chan2,					\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |		\
+				BIT(IIO_CHAN_INFO_SCALE) |	\
+				BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
+	.scan_index = _addr,					\
+	.scan_type = {						\
+		.sign = 's',					\
+		.realbits = 12,					\
+		.storagebits = 16,				\
+		.shift = 4,					\
+		.endianness = IIO_CPU,				\
+	},							\
+}
+
+struct ads1015_data {
+	struct regmap *regmap;
+	/*
+	 * Protects ADC ops, e.g: concurrent sysfs/buffered
+	 * data reads, configuration updates
+	 */
+	struct mutex lock;
+	struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+};
+
+static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+	return (reg == ADS1015_CFG_REG);
+}
+
+static const struct regmap_config ads1015_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 16,
+	.max_register = ADS1015_CFG_REG,
+	.writeable_reg = ads1015_is_writeable_reg,
+};
+
+static const struct iio_chan_spec ads1015_channels[] = {
+	ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
+	ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
+	ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
+	ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
+	ADS1015_V_CHAN(0, ADS1015_AIN0),
+	ADS1015_V_CHAN(1, ADS1015_AIN1),
+	ADS1015_V_CHAN(2, ADS1015_AIN2),
+	ADS1015_V_CHAN(3, ADS1015_AIN3),
+	IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
+static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+{
+	int ret;
+	struct device *dev = regmap_get_device(data->regmap);
+
+	if (on) {
+		ret = pm_runtime_get_sync(dev);
+		if (ret < 0)
+			pm_runtime_put_noidle(dev);
+	} else {
+		pm_runtime_mark_last_busy(dev);
+		ret = pm_runtime_put_autosuspend(dev);
+	}
+
+	return ret;
+}
+
+static
+int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
+{
+	int ret, pga, dr, conv_time;
+	bool change;
+
+	if (chan < 0 || chan >= ADS1015_CHANNELS)
+		return -EINVAL;
+
+	pga = data->channel_data[chan].pga;
+	dr = data->channel_data[chan].data_rate;
+
+	ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
+				       ADS1015_CFG_MUX_MASK |
+				       ADS1015_CFG_PGA_MASK,
+				       chan << ADS1015_CFG_MUX_SHIFT |
+				       pga << ADS1015_CFG_PGA_SHIFT,
+				       &change);
+	if (ret < 0)
+		return ret;
+
+	if (change) {
+		conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]);
+		usleep_range(conv_time, conv_time + 1);
+	}
+
+	return regmap_read(data->regmap, ADS1015_CONV_REG, val);
+}
+
+static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct ads1015_data *data = iio_priv(indio_dev);
+	s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding +  4x s16 timestamp */
+	int chan, ret, res;
+
+	memset(buf, 0, sizeof(buf));
+
+	mutex_lock(&data->lock);
+	chan = find_first_bit(indio_dev->active_scan_mask,
+			      indio_dev->masklength);
+	ret = ads1015_get_adc_result(data, chan, &res);
+	if (ret < 0) {
+		mutex_unlock(&data->lock);
+		goto err;
+	}
+
+	buf[0] = res;
+	mutex_unlock(&data->lock);
+
+	iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+
+err:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int ads1015_set_scale(struct ads1015_data *data, int chan,
+			     int scale, int uscale)
+{
+	int i, ret, rindex = -1;
+
+	for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
+		if (ads1015_scale[i].scale == scale &&
+		    ads1015_scale[i].uscale == uscale) {
+			rindex = i;
+			break;
+		}
+	if (rindex < 0)
+		return -EINVAL;
+
+	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+				 ADS1015_CFG_PGA_MASK,
+				 rindex << ADS1015_CFG_PGA_SHIFT);
+	if (ret < 0)
+		return ret;
+
+	data->channel_data[chan].pga = rindex;
+
+	return 0;
+}
+
+static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
+{
+	int i, ret, rindex = -1;
+
+	for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
+		if (ads1015_data_rate[i] == rate) {
+			rindex = i;
+			break;
+		}
+	if (rindex < 0)
+		return -EINVAL;
+
+	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+				 ADS1015_CFG_DR_MASK,
+				 rindex << ADS1015_CFG_DR_SHIFT);
+	if (ret < 0)
+		return ret;
+
+	data->channel_data[chan].data_rate = rindex;
+
+	return 0;
+}
+
+static int ads1015_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan, int *val,
+			    int *val2, long mask)
+{
+	int ret, idx;
+	struct ads1015_data *data = iio_priv(indio_dev);
+
+	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&data->lock);
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (iio_buffer_enabled(indio_dev)) {
+			ret = -EBUSY;
+			break;
+		}
+
+		ret = ads1015_set_power_state(data, true);
+		if (ret < 0)
+			break;
+
+		ret = ads1015_get_adc_result(data, chan->address, val);
+		if (ret < 0) {
+			ads1015_set_power_state(data, false);
+			break;
+		}
+
+		/* 12 bit res, D0 is bit 4 in conversion register */
+		*val = sign_extend32(*val >> 4, 11);
+
+		ret = ads1015_set_power_state(data, false);
+		if (ret < 0)
+			break;
+
+		ret = IIO_VAL_INT;
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		idx = data->channel_data[chan->address].pga;
+		*val = ads1015_scale[idx].scale;
+		*val2 = ads1015_scale[idx].uscale;
+		ret = IIO_VAL_INT_PLUS_MICRO;
+		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		idx = data->channel_data[chan->address].data_rate;
+		*val = ads1015_data_rate[idx];
+		ret = IIO_VAL_INT;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	mutex_unlock(&data->lock);
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static int ads1015_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan, int val,
+			     int val2, long mask)
+{
+	struct ads1015_data *data = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&data->lock);
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		ret = ads1015_set_scale(data, chan->address, val, val2);
+		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		ret = ads1015_set_data_rate(data, chan->address, val);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	mutex_unlock(&data->lock);
+
+	return ret;
+}
+
+static int ads1015_buffer_preenable(struct iio_dev *indio_dev)
+{
+	return ads1015_set_power_state(iio_priv(indio_dev), true);
+}
+
+static int ads1015_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	return ads1015_set_power_state(iio_priv(indio_dev), false);
+}
+
+static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
+	.preenable	= ads1015_buffer_preenable,
+	.postenable	= iio_triggered_buffer_postenable,
+	.predisable	= iio_triggered_buffer_predisable,
+	.postdisable	= ads1015_buffer_postdisable,
+	.validate_scan_mask = &iio_validate_scan_mask_onehot,
+};
+
+static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR(sampling_frequency_available,
+		      "128 250 490 920 1600 2400 3300");
+
+static struct attribute *ads1015_attributes[] = {
+	&iio_const_attr_scale_available.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ads1015_attribute_group = {
+	.attrs = ads1015_attributes,
+};
+
+static const struct iio_info ads1015_info = {
+	.driver_module	= THIS_MODULE,
+	.read_raw	= ads1015_read_raw,
+	.write_raw	= ads1015_write_raw,
+	.attrs		= &ads1015_attribute_group,
+};
+
+#ifdef CONFIG_OF
+static int ads1015_get_channels_config_of(struct i2c_client *client)
+{
+	struct ads1015_data *data = i2c_get_clientdata(client);
+	struct device_node *node;
+
+	if (!client->dev.of_node ||
+	    !of_get_next_child(client->dev.of_node, NULL))
+		return -EINVAL;
+
+	for_each_child_of_node(client->dev.of_node, node) {
+		u32 pval;
+		unsigned int channel;
+		unsigned int pga = ADS1015_DEFAULT_PGA;
+		unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
+
+		if (of_property_read_u32(node, "reg", &pval)) {
+			dev_err(&client->dev, "invalid reg on %s\n",
+				node->full_name);
+			continue;
+		}
+
+		channel = pval;
+		if (channel >= ADS1015_CHANNELS) {
+			dev_err(&client->dev,
+				"invalid channel index %d on %s\n",
+				channel, node->full_name);
+			continue;
+		}
+
+		if (!of_property_read_u32(node, "ti,gain", &pval)) {
+			pga = pval;
+			if (pga > 6) {
+				dev_err(&client->dev, "invalid gain on %s\n",
+					node->full_name);
+				return -EINVAL;
+			}
+		}
+
+		if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+			data_rate = pval;
+			if (data_rate > 7) {
+				dev_err(&client->dev,
+					"invalid data_rate on %s\n",
+					node->full_name);
+				return -EINVAL;
+			}
+		}
+
+		data->channel_data[channel].pga = pga;
+		data->channel_data[channel].data_rate = data_rate;
+	}
+
+	return 0;
+}
+#endif
+
+static void ads1015_get_channels_config(struct i2c_client *client)
+{
+	unsigned int k;
+
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct ads1015_data *data = iio_priv(indio_dev);
+	struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
+
+	/* prefer platform data */
+	if (pdata) {
+		memcpy(data->channel_data, pdata->channel_data,
+		       sizeof(data->channel_data));
+		return;
+	}
+
+#ifdef CONFIG_OF
+	if (!ads1015_get_channels_config_of(client))
+		return;
+#endif
+	/* fallback on default configuration */
+	for (k = 0; k < ADS1015_CHANNELS; ++k) {
+		data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
+		data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
+	}
+}
+
+static int ads1015_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct iio_dev *indio_dev;
+	struct ads1015_data *data;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+
+	mutex_init(&data->lock);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &ads1015_info;
+	indio_dev->name = ADS1015_DRV_NAME;
+	indio_dev->channels = ads1015_channels;
+	indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	/* we need to keep this ABI the same as used by hwmon ADS1015 driver */
+	ads1015_get_channels_config(client);
+
+	data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config);
+	if (IS_ERR(data->regmap)) {
+		dev_err(&client->dev, "Failed to allocate register map\n");
+		return PTR_ERR(data->regmap);
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 ads1015_trigger_handler,
+					 &ads1015_buffer_setup_ops);
+	if (ret < 0) {
+		dev_err(&client->dev, "iio triggered buffer setup failed\n");
+		return ret;
+	}
+	ret = pm_runtime_set_active(&client->dev);
+	if (ret)
+		goto err_buffer_cleanup;
+	pm_runtime_set_autosuspend_delay(&client->dev, ADS1015_SLEEP_DELAY_MS);
+	pm_runtime_use_autosuspend(&client->dev);
+	pm_runtime_enable(&client->dev);
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failed to register IIO device\n");
+		goto err_buffer_cleanup;
+	}
+
+	return 0;
+
+err_buffer_cleanup:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	return ret;
+}
+
+static int ads1015_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct ads1015_data *data = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	/* power down single shot mode */
+	return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+				  ADS1015_CFG_MOD_MASK,
+				  ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+}
+
+#ifdef CONFIG_PM
+static int ads1015_runtime_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct ads1015_data *data = iio_priv(indio_dev);
+
+	return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+				  ADS1015_CFG_MOD_MASK,
+				  ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+}
+
+static int ads1015_runtime_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct ads1015_data *data = iio_priv(indio_dev);
+
+	return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+				  ADS1015_CFG_MOD_MASK,
+				  ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
+}
+#endif
+
+static const struct dev_pm_ops ads1015_pm_ops = {
+	SET_RUNTIME_PM_OPS(ads1015_runtime_suspend,
+			   ads1015_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id ads1015_id[] = {
+	{"ads1015", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, ads1015_id);
+
+static struct i2c_driver ads1015_driver = {
+	.driver = {
+		.name = ADS1015_DRV_NAME,
+		.pm = &ads1015_pm_ops,
+	},
+	.probe		= ads1015_probe,
+	.remove		= ads1015_remove,
+	.id_table	= ads1015_id,
+};
+
+module_i2c_driver(ads1015_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS1015 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index ebdb838..9fabed4 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -93,12 +93,7 @@
 	struct dmaengine_buffer *dmaengine_buffer =
 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
 
-	dmaengine_terminate_all(dmaengine_buffer->chan);
-	/* FIXME: There is a slight chance of a race condition here.
-	 * dmaengine_terminate_all() does not guarantee that all transfer
-	 * callbacks have finished running. Need to introduce a
-	 * dmaengine_terminate_all_sync().
-	 */
+	dmaengine_terminate_sync(dmaengine_buffer->chan);
 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
 }
 
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index f16de61..f73290f 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -4,6 +4,20 @@
 
 menu "Chemical Sensors"
 
+config ATLAS_PH_SENSOR
+	tristate "Atlas Scientific OEM pH-SM sensor"
+	depends on I2C
+	select REGMAP_I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	select IRQ_WORK
+	help
+	 Say Y here to build I2C interface support for the Atlas
+	 Scientific OEM pH-SM sensor.
+
+	 To compile this driver as module, choose M here: the
+	 module will be called atlas-ph-sensor.
+
 config IAQCORE
 	tristate "AMS iAQ-Core VOC sensors"
 	depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 167861f..b02202b 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -3,5 +3,6 @@
 #
 
 # When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ATLAS_PH_SENSOR)	+= atlas-ph-sensor.o
 obj-$(CONFIG_IAQCORE)		+= ams-iaq-core.o
 obj-$(CONFIG_VZ89X)		+= vz89x.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
new file mode 100644
index 0000000..06cd49c
--- /dev/null
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -0,0 +1,511 @@
+/*
+ * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
+ *
+ * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pm_runtime.h>
+
+#define ATLAS_REGMAP_NAME	"atlas_ph_regmap"
+#define ATLAS_DRV_NAME		"atlas_ph"
+
+#define ATLAS_REG_DEV_TYPE		0x00
+#define ATLAS_REG_DEV_VERSION		0x01
+
+#define ATLAS_REG_INT_CONTROL		0x04
+#define ATLAS_REG_INT_CONTROL_EN	BIT(3)
+
+#define ATLAS_REG_PWR_CONTROL		0x06
+
+#define ATLAS_REG_CALIB_STATUS		0x0d
+#define ATLAS_REG_CALIB_STATUS_MASK	0x07
+#define ATLAS_REG_CALIB_STATUS_LOW	BIT(0)
+#define ATLAS_REG_CALIB_STATUS_MID	BIT(1)
+#define ATLAS_REG_CALIB_STATUS_HIGH	BIT(2)
+
+#define ATLAS_REG_TEMP_DATA		0x0e
+#define ATLAS_REG_PH_DATA		0x16
+
+#define ATLAS_PH_INT_TIME_IN_US		450000
+
+struct atlas_data {
+	struct i2c_client *client;
+	struct iio_trigger *trig;
+	struct regmap *regmap;
+	struct irq_work work;
+
+	__be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */
+};
+
+static const struct regmap_range atlas_volatile_ranges[] = {
+	regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
+	regmap_reg_range(ATLAS_REG_CALIB_STATUS, ATLAS_REG_CALIB_STATUS),
+	regmap_reg_range(ATLAS_REG_TEMP_DATA, ATLAS_REG_TEMP_DATA + 4),
+	regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
+};
+
+static const struct regmap_access_table atlas_volatile_table = {
+	.yes_ranges	= atlas_volatile_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(atlas_volatile_ranges),
+};
+
+static const struct regmap_config atlas_regmap_config = {
+	.name = ATLAS_REGMAP_NAME,
+
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.volatile_table = &atlas_volatile_table,
+	.max_register = ATLAS_REG_PH_DATA + 4,
+	.cache_type = REGCACHE_FLAT,
+};
+
+static const struct iio_chan_spec atlas_channels[] = {
+	{
+		.type = IIO_PH,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 32,
+			.storagebits = 32,
+			.endianness = IIO_BE,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+	{
+		.type = IIO_TEMP,
+		.address = ATLAS_REG_TEMP_DATA,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.output = 1,
+		.scan_index = -1
+	},
+};
+
+static int atlas_set_powermode(struct atlas_data *data, int on)
+{
+	return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
+}
+
+static int atlas_set_interrupt(struct atlas_data *data, bool state)
+{
+	return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL,
+				  ATLAS_REG_INT_CONTROL_EN,
+				  state ? ATLAS_REG_INT_CONTROL_EN : 0);
+}
+
+static int atlas_buffer_postenable(struct iio_dev *indio_dev)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+	int ret;
+
+	ret = iio_triggered_buffer_postenable(indio_dev);
+	if (ret)
+		return ret;
+
+	ret = pm_runtime_get_sync(&data->client->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(&data->client->dev);
+		return ret;
+	}
+
+	return atlas_set_interrupt(data, true);
+}
+
+static int atlas_buffer_predisable(struct iio_dev *indio_dev)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+	int ret;
+
+	ret = iio_triggered_buffer_predisable(indio_dev);
+	if (ret)
+		return ret;
+
+	ret = atlas_set_interrupt(data, false);
+	if (ret)
+		return ret;
+
+	pm_runtime_mark_last_busy(&data->client->dev);
+	return pm_runtime_put_autosuspend(&data->client->dev);
+}
+
+static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
+	.owner = THIS_MODULE,
+};
+
+static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
+	.postenable = atlas_buffer_postenable,
+	.predisable = atlas_buffer_predisable,
+};
+
+static void atlas_work_handler(struct irq_work *work)
+{
+	struct atlas_data *data = container_of(work, struct atlas_data, work);
+
+	iio_trigger_poll(data->trig);
+}
+
+static irqreturn_t atlas_trigger_handler(int irq, void *private)
+{
+	struct iio_poll_func *pf = private;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct atlas_data *data = iio_priv(indio_dev);
+	int ret;
+
+	ret = i2c_smbus_read_i2c_block_data(data->client, ATLAS_REG_PH_DATA,
+				sizeof(data->buffer[0]), (u8 *) &data->buffer);
+
+	if (ret > 0)
+		iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+				iio_get_time_ns());
+
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t atlas_interrupt_handler(int irq, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct atlas_data *data = iio_priv(indio_dev);
+
+	irq_work_queue(&data->work);
+
+	return IRQ_HANDLED;
+}
+
+static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
+{
+	struct device *dev = &data->client->dev;
+	int suspended = pm_runtime_suspended(dev);
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(dev);
+		return ret;
+	}
+
+	if (suspended)
+		usleep_range(ATLAS_PH_INT_TIME_IN_US,
+			     ATLAS_PH_INT_TIME_IN_US + 100000);
+
+	ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
+			      (u8 *) val, sizeof(*val));
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
+	return ret;
+}
+
+static int atlas_read_raw(struct iio_dev *indio_dev,
+			  struct iio_chan_spec const *chan,
+			  int *val, int *val2, long mask)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW: {
+		int ret;
+		__be32 reg;
+
+		switch (chan->type) {
+		case IIO_TEMP:
+			ret = regmap_bulk_read(data->regmap, chan->address,
+					      (u8 *) &reg, sizeof(reg));
+			break;
+		case IIO_PH:
+			mutex_lock(&indio_dev->mlock);
+
+			if (iio_buffer_enabled(indio_dev))
+				ret = -EBUSY;
+			else
+				ret = atlas_read_ph_measurement(data, &reg);
+
+			mutex_unlock(&indio_dev->mlock);
+			break;
+		default:
+			ret = -EINVAL;
+		}
+
+		if (!ret) {
+			*val = be32_to_cpu(reg);
+			ret = IIO_VAL_INT;
+		}
+		return ret;
+	}
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->type) {
+		case IIO_TEMP:
+			*val = 1; /* 0.01 */
+			*val2 = 100;
+			break;
+		case IIO_PH:
+			*val = 1; /* 0.001 */
+			*val2 = 1000;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return IIO_VAL_FRACTIONAL;
+	}
+
+	return -EINVAL;
+}
+
+static int atlas_write_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan,
+			   int val, int val2, long mask)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+	__be32 reg = cpu_to_be32(val);
+
+	if (val2 != 0 || val < 0 || val > 20000)
+		return -EINVAL;
+
+	if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP)
+		return -EINVAL;
+
+	return regmap_bulk_write(data->regmap, chan->address,
+				 &reg, sizeof(reg));
+}
+
+static const struct iio_info atlas_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = atlas_read_raw,
+	.write_raw = atlas_write_raw,
+};
+
+static int atlas_check_calibration(struct atlas_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int ret;
+	unsigned int val;
+
+	ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val);
+	if (ret)
+		return ret;
+
+	if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) {
+		dev_warn(dev, "device has not been calibrated\n");
+		return 0;
+	}
+
+	if (!(val & ATLAS_REG_CALIB_STATUS_LOW))
+		dev_warn(dev, "device missing low point calibration\n");
+
+	if (!(val & ATLAS_REG_CALIB_STATUS_MID))
+		dev_warn(dev, "device missing mid point calibration\n");
+
+	if (!(val & ATLAS_REG_CALIB_STATUS_HIGH))
+		dev_warn(dev, "device missing high point calibration\n");
+
+	return 0;
+};
+
+static int atlas_probe(struct i2c_client *client,
+		       const struct i2c_device_id *id)
+{
+	struct atlas_data *data;
+	struct iio_trigger *trig;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	indio_dev->info = &atlas_info;
+	indio_dev->name = ATLAS_DRV_NAME;
+	indio_dev->channels = atlas_channels;
+	indio_dev->num_channels = ARRAY_SIZE(atlas_channels);
+	indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
+	indio_dev->dev.parent = &client->dev;
+
+	trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+				      indio_dev->name, indio_dev->id);
+
+	if (!trig)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	data->client = client;
+	data->trig = trig;
+	trig->dev.parent = indio_dev->dev.parent;
+	trig->ops = &atlas_interrupt_trigger_ops;
+	iio_trigger_set_drvdata(trig, indio_dev);
+
+	i2c_set_clientdata(client, indio_dev);
+
+	data->regmap = devm_regmap_init_i2c(client, &atlas_regmap_config);
+	if (IS_ERR(data->regmap)) {
+		dev_err(&client->dev, "regmap initialization failed\n");
+		return PTR_ERR(data->regmap);
+	}
+
+	ret = pm_runtime_set_active(&client->dev);
+	if (ret)
+		return ret;
+
+	if (client->irq <= 0) {
+		dev_err(&client->dev, "no valid irq defined\n");
+		return -EINVAL;
+	}
+
+	ret = atlas_check_calibration(data);
+	if (ret)
+		return ret;
+
+	ret = iio_trigger_register(trig);
+	if (ret) {
+		dev_err(&client->dev, "failed to register trigger\n");
+		return ret;
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+		&atlas_trigger_handler, &atlas_buffer_setup_ops);
+	if (ret) {
+		dev_err(&client->dev, "cannot setup iio trigger\n");
+		goto unregister_trigger;
+	}
+
+	init_irq_work(&data->work, atlas_work_handler);
+
+	/* interrupt pin toggles on new conversion */
+	ret = devm_request_threaded_irq(&client->dev, client->irq,
+					NULL, atlas_interrupt_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"atlas_irq",
+					indio_dev);
+	if (ret) {
+		dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
+		goto unregister_buffer;
+	}
+
+	ret = atlas_set_powermode(data, 1);
+	if (ret) {
+		dev_err(&client->dev, "cannot power device on");
+		goto unregister_buffer;
+	}
+
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev, 2500);
+	pm_runtime_use_autosuspend(&client->dev);
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(&client->dev, "unable to register device\n");
+		goto unregister_pm;
+	}
+
+	return 0;
+
+unregister_pm:
+	pm_runtime_disable(&client->dev);
+	atlas_set_powermode(data, 0);
+
+unregister_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+unregister_trigger:
+	iio_trigger_unregister(data->trig);
+
+	return ret;
+}
+
+static int atlas_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct atlas_data *data = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+	iio_trigger_unregister(data->trig);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+
+	return atlas_set_powermode(data, 0);
+}
+
+#ifdef CONFIG_PM
+static int atlas_runtime_suspend(struct device *dev)
+{
+	struct atlas_data *data =
+		     iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+	return atlas_set_powermode(data, 0);
+}
+
+static int atlas_runtime_resume(struct device *dev)
+{
+	struct atlas_data *data =
+		     iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+	return atlas_set_powermode(data, 1);
+}
+#endif
+
+static const struct dev_pm_ops atlas_pm_ops = {
+	SET_RUNTIME_PM_OPS(atlas_runtime_suspend,
+			   atlas_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id atlas_id[] = {
+	{ "atlas-ph-sm", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
+
+static const struct of_device_id atlas_dt_ids[] = {
+	{ .compatible = "atlas,ph-sm" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
+
+static struct i2c_driver atlas_driver = {
+	.driver = {
+		.name	= ATLAS_DRV_NAME,
+		.of_match_table	= of_match_ptr(atlas_dt_ids),
+		.pm	= &atlas_pm_ops,
+	},
+	.probe		= atlas_probe,
+	.remove		= atlas_remove,
+	.id_table	= atlas_id,
+};
+module_i2c_driver(atlas_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8447c31..f5a2d44 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -18,13 +18,15 @@
 #include <asm/unaligned.h>
 #include <linux/iio/common/st_sensors.h>
 
+#include "st_sensors_core.h"
+
 static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
 {
 	return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
 }
 
-static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
-						u8 reg_addr, u8 mask, u8 data)
+int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+				    u8 reg_addr, u8 mask, u8 data)
 {
 	int err;
 	u8 new_data;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.h b/drivers/iio/common/st_sensors/st_sensors_core.h
new file mode 100644
index 0000000..cd88098
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.h
@@ -0,0 +1,8 @@
+/*
+ * Local functions in the ST Sensors core
+ */
+#ifndef __ST_SENSORS_CORE_H
+#define __ST_SENSORS_CORE_H
+int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+				    u8 reg_addr, u8 mask, u8 data);
+#endif
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 3e90704..6a8c983 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -14,32 +14,65 @@
 #include <linux/iio/iio.h>
 #include <linux/iio/trigger.h>
 #include <linux/interrupt.h>
-
 #include <linux/iio/common/st_sensors.h>
-
+#include "st_sensors_core.h"
 
 int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
 				const struct iio_trigger_ops *trigger_ops)
 {
-	int err;
+	int err, irq;
 	struct st_sensor_data *sdata = iio_priv(indio_dev);
+	unsigned long irq_trig;
 
 	sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
 	if (sdata->trig == NULL) {
-		err = -ENOMEM;
 		dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
-		goto iio_trigger_alloc_error;
+		return -ENOMEM;
 	}
 
-	err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+	irq = sdata->get_irq_data_ready(indio_dev);
+	irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+	/*
+	 * If the IRQ is triggered on falling edge, we need to mark the
+	 * interrupt as active low, if the hardware supports this.
+	 */
+	if (irq_trig == IRQF_TRIGGER_FALLING) {
+		if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
+			dev_err(&indio_dev->dev,
+				"falling edge specified for IRQ but hardware "
+				"only support rising edge, will request "
+				"rising edge\n");
+			irq_trig = IRQF_TRIGGER_RISING;
+		} else {
+			/* Set up INT active low i.e. falling edge */
+			err = st_sensors_write_data_with_mask(indio_dev,
+				sdata->sensor_settings->drdy_irq.addr_ihl,
+				sdata->sensor_settings->drdy_irq.mask_ihl, 1);
+			if (err < 0)
+				goto iio_trigger_free;
+			dev_info(&indio_dev->dev,
+				 "interrupts on the falling edge\n");
+		}
+	} else if (irq_trig == IRQF_TRIGGER_RISING) {
+		dev_info(&indio_dev->dev,
+			 "interrupts on the rising edge\n");
+
+	} else {
+		dev_err(&indio_dev->dev,
+		"unsupported IRQ trigger specified (%lx), only "
+			"rising and falling edges supported, enforce "
+			"rising edge\n", irq_trig);
+		irq_trig = IRQF_TRIGGER_RISING;
+	}
+	err = request_threaded_irq(irq,
 			iio_trigger_generic_data_rdy_poll,
 			NULL,
-			IRQF_TRIGGER_RISING,
+			irq_trig,
 			sdata->trig->name,
 			sdata->trig);
 	if (err) {
 		dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
-		goto request_irq_error;
+		goto iio_trigger_free;
 	}
 
 	iio_trigger_set_drvdata(sdata->trig, indio_dev);
@@ -57,9 +90,8 @@
 
 iio_trigger_register_error:
 	free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
-request_irq_error:
+iio_trigger_free:
 	iio_trigger_free(sdata->trig);
-iio_trigger_alloc_error:
 	return err;
 }
 EXPORT_SYMBOL(st_sensors_allocate_trigger);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index e701e28..31a1985 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -10,8 +10,10 @@
 	depends on (SPI_MASTER && I2C!=m) || I2C
 	help
 	  Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
-	  AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
-	  AD5669R Digital to Analog Converter.
+	  AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R, AD5627, AD5627R,
+	  AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R, AD5666,
+	  AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
+	  LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to Analog Converter.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad5064.
@@ -111,6 +113,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad5755.
 
+config AD5761
+	tristate "Analog Devices AD5761/61R/21/21R DAC driver"
+	depends on SPI_MASTER
+	help
+	  Say yes here to build support for Analog Devices AD5761, AD5761R, AD5721,
+	  AD5721R Digital to Analog Converter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad5761.
+
 config AD5764
 	tristate "Analog Devices AD5764/64R/44/44R DAC driver"
 	depends on SPI_MASTER
@@ -176,11 +188,11 @@
 	  10 bits DAC.
 
 config MCP4725
-	tristate "MCP4725 DAC driver"
+	tristate "MCP4725/6 DAC driver"
 	depends on I2C
 	---help---
 	  Say Y here if you want to build a driver for the Microchip
-	  MCP 4725 12-bit digital-to-analog converter (DAC) with I2C
+	  MCP 4725/6 12-bit digital-to-analog converter (DAC) with I2C
 	  interface.
 
 	  To compile this driver as a module, choose M here: the module
@@ -196,4 +208,13 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mcp4922.
 
+config STX104
+	tristate "Apex Embedded Systems STX104 DAC driver"
+	depends on ISA
+	help
+	  Say yes here to build support for the 2-channel DAC on the Apex
+	  Embedded Systems STX104 integrated analog PC/104 card. The base port
+	  addresses for the devices may be configured via the "base" module
+	  parameter array.
+
 endmenu
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 63ae056..e2deda9 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -12,6 +12,7 @@
 obj-$(CONFIG_AD5446) += ad5446.o
 obj-$(CONFIG_AD5449) += ad5449.o
 obj-$(CONFIG_AD5755) += ad5755.o
+obj-$(CONFIG_AD5761) += ad5761.o
 obj-$(CONFIG_AD5764) += ad5764.o
 obj-$(CONFIG_AD5791) += ad5791.o
 obj-$(CONFIG_AD5686) += ad5686.o
@@ -21,3 +22,4 @@
 obj-$(CONFIG_MAX5821) += max5821.o
 obj-$(CONFIG_MCP4725) += mcp4725.o
 obj-$(CONFIG_MCP4922) += mcp4922.o
+obj-$(CONFIG_STX104) += stx104.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 81ca008..6803e4a 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -1,6 +1,9 @@
 /*
- * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R,
- * AD5648, AD5666, AD5668, AD5669R Digital to analog converters driver
+ * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R,
+ * AD5627, AD5627R, AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R,
+ * AD5666, AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
+ * LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to analog converters
+ * driver
  *
  * Copyright 2011 Analog Devices Inc.
  *
@@ -39,6 +42,9 @@
 #define AD5064_CMD_RESET			0x7
 #define AD5064_CMD_CONFIG			0x8
 
+#define AD5064_CMD_RESET_V2			0x5
+#define AD5064_CMD_CONFIG_V2			0x7
+
 #define AD5064_CONFIG_DAISY_CHAIN_ENABLE	BIT(1)
 #define AD5064_CONFIG_INT_VREF_ENABLE		BIT(0)
 
@@ -48,12 +54,25 @@
 #define AD5064_LDAC_PWRDN_3STATE		0x3
 
 /**
+ * enum ad5064_regmap_type - Register layout variant
+ * @AD5064_REGMAP_ADI: Old Analog Devices register map layout
+ * @AD5064_REGMAP_ADI2: New Analog Devices register map layout
+ * @AD5064_REGMAP_LTC: LTC register map layout
+ */
+enum ad5064_regmap_type {
+	AD5064_REGMAP_ADI,
+	AD5064_REGMAP_ADI2,
+	AD5064_REGMAP_LTC,
+};
+
+/**
  * struct ad5064_chip_info - chip specific information
  * @shared_vref:	whether the vref supply is shared between channels
- * @internal_vref:	internal reference voltage. 0 if the chip has no internal
- *			vref.
+ * @internal_vref:	internal reference voltage. 0 if the chip has no
+			internal vref.
  * @channel:		channel specification
  * @num_channels:	number of channels
+ * @regmap_type:	register map layout variant
  */
 
 struct ad5064_chip_info {
@@ -61,6 +80,7 @@
 	unsigned long internal_vref;
 	const struct iio_chan_spec *channels;
 	unsigned int num_channels;
+	enum ad5064_regmap_type regmap_type;
 };
 
 struct ad5064_state;
@@ -111,18 +131,43 @@
 	ID_AD5064,
 	ID_AD5064_1,
 	ID_AD5065,
+	ID_AD5625,
+	ID_AD5625R_1V25,
+	ID_AD5625R_2V5,
+	ID_AD5627,
+	ID_AD5627R_1V25,
+	ID_AD5627R_2V5,
 	ID_AD5628_1,
 	ID_AD5628_2,
 	ID_AD5629_1,
 	ID_AD5629_2,
+	ID_AD5645R_1V25,
+	ID_AD5645R_2V5,
+	ID_AD5647R_1V25,
+	ID_AD5647R_2V5,
 	ID_AD5648_1,
 	ID_AD5648_2,
+	ID_AD5665,
+	ID_AD5665R_1V25,
+	ID_AD5665R_2V5,
 	ID_AD5666_1,
 	ID_AD5666_2,
+	ID_AD5667,
+	ID_AD5667R_1V25,
+	ID_AD5667R_2V5,
 	ID_AD5668_1,
 	ID_AD5668_2,
 	ID_AD5669_1,
 	ID_AD5669_2,
+	ID_LTC2606,
+	ID_LTC2607,
+	ID_LTC2609,
+	ID_LTC2616,
+	ID_LTC2617,
+	ID_LTC2619,
+	ID_LTC2626,
+	ID_LTC2627,
+	ID_LTC2629,
 };
 
 static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -136,15 +181,27 @@
 static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
 	const struct iio_chan_spec *chan)
 {
-	unsigned int val;
+	unsigned int val, address;
+	unsigned int shift;
 	int ret;
 
-	val = (0x1 << chan->address);
+	if (st->chip_info->regmap_type == AD5064_REGMAP_LTC) {
+		val = 0;
+		address = chan->address;
+	} else {
+		if (st->chip_info->regmap_type == AD5064_REGMAP_ADI2)
+			shift = 4;
+		else
+			shift = 8;
 
-	if (st->pwr_down[chan->channel])
-		val |= st->pwr_down_mode[chan->channel] << 8;
+		val = (0x1 << chan->address);
+		address = 0;
 
-	ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
+		if (st->pwr_down[chan->channel])
+			val |= st->pwr_down_mode[chan->channel] << shift;
+	}
+
+	ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, address, val, 0);
 
 	return ret;
 }
@@ -155,6 +212,10 @@
 	"three_state",
 };
 
+static const char * const ltc2617_powerdown_modes[] = {
+	"90kohm_to_gnd",
+};
+
 static int ad5064_get_powerdown_mode(struct iio_dev *indio_dev,
 	const struct iio_chan_spec *chan)
 {
@@ -185,6 +246,13 @@
 	.set = ad5064_set_powerdown_mode,
 };
 
+static const struct iio_enum ltc2617_powerdown_mode_enum = {
+	.items = ltc2617_powerdown_modes,
+	.num_items = ARRAY_SIZE(ltc2617_powerdown_modes),
+	.get = ad5064_get_powerdown_mode,
+	.set = ad5064_set_powerdown_mode,
+};
+
 static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
 	uintptr_t private, const struct iio_chan_spec *chan, char *buf)
 {
@@ -295,7 +363,19 @@
 	{ },
 };
 
-#define AD5064_CHANNEL(chan, addr, bits, _shift) {		\
+static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
+	{
+		.name = "powerdown",
+		.read = ad5064_read_dac_powerdown,
+		.write = ad5064_write_dac_powerdown,
+		.shared = IIO_SEPARATE,
+	},
+	IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ltc2617_powerdown_mode_enum),
+	IIO_ENUM_AVAILABLE("powerdown_mode", &ltc2617_powerdown_mode_enum),
+	{ },
+};
+
+#define AD5064_CHANNEL(chan, addr, bits, _shift, _ext_info) {		\
 	.type = IIO_VOLTAGE,					\
 	.indexed = 1,						\
 	.output = 1,						\
@@ -309,145 +389,340 @@
 		.storagebits = 16,				\
 		.shift = (_shift),				\
 	},							\
-	.ext_info = ad5064_ext_info,				\
+	.ext_info = (_ext_info),				\
 }
 
-#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
+#define DECLARE_AD5064_CHANNELS(name, bits, shift, ext_info) \
 const struct iio_chan_spec name[] = { \
-	AD5064_CHANNEL(0, 0, bits, shift), \
-	AD5064_CHANNEL(1, 1, bits, shift), \
-	AD5064_CHANNEL(2, 2, bits, shift), \
-	AD5064_CHANNEL(3, 3, bits, shift), \
-	AD5064_CHANNEL(4, 4, bits, shift), \
-	AD5064_CHANNEL(5, 5, bits, shift), \
-	AD5064_CHANNEL(6, 6, bits, shift), \
-	AD5064_CHANNEL(7, 7, bits, shift), \
+	AD5064_CHANNEL(0, 0, bits, shift, ext_info), \
+	AD5064_CHANNEL(1, 1, bits, shift, ext_info), \
+	AD5064_CHANNEL(2, 2, bits, shift, ext_info), \
+	AD5064_CHANNEL(3, 3, bits, shift, ext_info), \
+	AD5064_CHANNEL(4, 4, bits, shift, ext_info), \
+	AD5064_CHANNEL(5, 5, bits, shift, ext_info), \
+	AD5064_CHANNEL(6, 6, bits, shift, ext_info), \
+	AD5064_CHANNEL(7, 7, bits, shift, ext_info), \
 }
 
-#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
+#define DECLARE_AD5065_CHANNELS(name, bits, shift, ext_info) \
 const struct iio_chan_spec name[] = { \
-	AD5064_CHANNEL(0, 0, bits, shift), \
-	AD5064_CHANNEL(1, 3, bits, shift), \
+	AD5064_CHANNEL(0, 0, bits, shift, ext_info), \
+	AD5064_CHANNEL(1, 3, bits, shift, ext_info), \
 }
 
-static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
-static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
-static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4, ad5064_ext_info);
 
-static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
-static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
-static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
+static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8, ad5064_ext_info);
+static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6, ad5064_ext_info);
+static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4, ad5064_ext_info);
 
-static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
-static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
+static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5645_channels, 14, 2, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0, ad5064_ext_info);
+
+static DECLARE_AD5064_CHANNELS(ltc2607_channels, 16, 0, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2617_channels, 14, 2, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2627_channels, 12, 4, ltc2617_ext_info);
 
 static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
 	[ID_AD5024] = {
 		.shared_vref = false,
 		.channels = ad5024_channels,
 		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5025] = {
 		.shared_vref = false,
 		.channels = ad5025_channels,
 		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5044] = {
 		.shared_vref = false,
 		.channels = ad5044_channels,
 		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5045] = {
 		.shared_vref = false,
 		.channels = ad5045_channels,
 		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5064] = {
 		.shared_vref = false,
 		.channels = ad5064_channels,
 		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5064_1] = {
 		.shared_vref = true,
 		.channels = ad5064_channels,
 		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5065] = {
 		.shared_vref = false,
 		.channels = ad5065_channels,
 		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI,
+	},
+	[ID_AD5625] = {
+		.shared_vref = true,
+		.channels = ad5629_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5625R_1V25] = {
+		.shared_vref = true,
+		.internal_vref = 1250000,
+		.channels = ad5629_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5625R_2V5] = {
+		.shared_vref = true,
+		.internal_vref = 2500000,
+		.channels = ad5629_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5627] = {
+		.shared_vref = true,
+		.channels = ad5629_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5627R_1V25] = {
+		.shared_vref = true,
+		.internal_vref = 1250000,
+		.channels = ad5629_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5627R_2V5] = {
+		.shared_vref = true,
+		.internal_vref = 2500000,
+		.channels = ad5629_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
 	},
 	[ID_AD5628_1] = {
 		.shared_vref = true,
 		.internal_vref = 2500000,
 		.channels = ad5024_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5628_2] = {
 		.shared_vref = true,
 		.internal_vref = 5000000,
 		.channels = ad5024_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5629_1] = {
 		.shared_vref = true,
 		.internal_vref = 2500000,
 		.channels = ad5629_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5629_2] = {
 		.shared_vref = true,
 		.internal_vref = 5000000,
 		.channels = ad5629_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
+	},
+	[ID_AD5645R_1V25] = {
+		.shared_vref = true,
+		.internal_vref = 1250000,
+		.channels = ad5645_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5645R_2V5] = {
+		.shared_vref = true,
+		.internal_vref = 2500000,
+		.channels = ad5645_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5647R_1V25] = {
+		.shared_vref = true,
+		.internal_vref = 1250000,
+		.channels = ad5645_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5647R_2V5] = {
+		.shared_vref = true,
+		.internal_vref = 2500000,
+		.channels = ad5645_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
 	},
 	[ID_AD5648_1] = {
 		.shared_vref = true,
 		.internal_vref = 2500000,
 		.channels = ad5044_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5648_2] = {
 		.shared_vref = true,
 		.internal_vref = 5000000,
 		.channels = ad5044_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
+	},
+	[ID_AD5665] = {
+		.shared_vref = true,
+		.channels = ad5669_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5665R_1V25] = {
+		.shared_vref = true,
+		.internal_vref = 1250000,
+		.channels = ad5669_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5665R_2V5] = {
+		.shared_vref = true,
+		.internal_vref = 2500000,
+		.channels = ad5669_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI2
 	},
 	[ID_AD5666_1] = {
 		.shared_vref = true,
 		.internal_vref = 2500000,
 		.channels = ad5064_channels,
 		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5666_2] = {
 		.shared_vref = true,
 		.internal_vref = 5000000,
 		.channels = ad5064_channels,
 		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_ADI,
+	},
+	[ID_AD5667] = {
+		.shared_vref = true,
+		.channels = ad5669_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5667R_1V25] = {
+		.shared_vref = true,
+		.internal_vref = 1250000,
+		.channels = ad5669_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
+	},
+	[ID_AD5667R_2V5] = {
+		.shared_vref = true,
+		.internal_vref = 2500000,
+		.channels = ad5669_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_ADI2
 	},
 	[ID_AD5668_1] = {
 		.shared_vref = true,
 		.internal_vref = 2500000,
 		.channels = ad5064_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5668_2] = {
 		.shared_vref = true,
 		.internal_vref = 5000000,
 		.channels = ad5064_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5669_1] = {
 		.shared_vref = true,
 		.internal_vref = 2500000,
 		.channels = ad5669_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
 	},
 	[ID_AD5669_2] = {
 		.shared_vref = true,
 		.internal_vref = 5000000,
 		.channels = ad5669_channels,
 		.num_channels = 8,
+		.regmap_type = AD5064_REGMAP_ADI,
+	},
+	[ID_LTC2606] = {
+		.shared_vref = true,
+		.internal_vref = 0,
+		.channels = ltc2607_channels,
+		.num_channels = 1,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2607] = {
+		.shared_vref = true,
+		.internal_vref = 0,
+		.channels = ltc2607_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2609] = {
+		.shared_vref = false,
+		.internal_vref = 0,
+		.channels = ltc2607_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2616] = {
+		.shared_vref = true,
+		.internal_vref = 0,
+		.channels = ltc2617_channels,
+		.num_channels = 1,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2617] = {
+		.shared_vref = true,
+		.internal_vref = 0,
+		.channels = ltc2617_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2619] = {
+		.shared_vref = false,
+		.internal_vref = 0,
+		.channels = ltc2617_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2626] = {
+		.shared_vref = true,
+		.internal_vref = 0,
+		.channels = ltc2627_channels,
+		.num_channels = 1,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2627] = {
+		.shared_vref = true,
+		.internal_vref = 0,
+		.channels = ltc2627_channels,
+		.num_channels = 2,
+		.regmap_type = AD5064_REGMAP_LTC,
+	},
+	[ID_LTC2629] = {
+		.shared_vref = false,
+		.internal_vref = 0,
+		.channels = ltc2627_channels,
+		.num_channels = 4,
+		.regmap_type = AD5064_REGMAP_LTC,
 	},
 };
 
@@ -469,6 +744,22 @@
 	return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
 }
 
+static int ad5064_set_config(struct ad5064_state *st, unsigned int val)
+{
+	unsigned int cmd;
+
+	switch (st->chip_info->regmap_type) {
+	case AD5064_REGMAP_ADI2:
+		cmd = AD5064_CMD_CONFIG_V2;
+		break;
+	default:
+		cmd = AD5064_CMD_CONFIG;
+		break;
+	}
+
+	return ad5064_write(st, cmd, 0, val, 0);
+}
+
 static int ad5064_probe(struct device *dev, enum ad5064_type type,
 			const char *name, ad5064_write_func write)
 {
@@ -498,8 +789,7 @@
 		if (!st->chip_info->internal_vref)
 			return ret;
 		st->use_internal_vref = true;
-		ret = ad5064_write(st, AD5064_CMD_CONFIG, 0,
-			AD5064_CONFIG_INT_VREF_ENABLE, 0);
+		ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
 		if (ret) {
 			dev_err(dev, "Failed to enable internal vref: %d\n",
 				ret);
@@ -628,9 +918,19 @@
 	unsigned int addr, unsigned int val)
 {
 	struct i2c_client *i2c = to_i2c_client(st->dev);
+	unsigned int cmd_shift;
 	int ret;
 
-	st->data.i2c[0] = (cmd << 4) | addr;
+	switch (st->chip_info->regmap_type) {
+	case AD5064_REGMAP_ADI2:
+		cmd_shift = 3;
+		break;
+	default:
+		cmd_shift = 4;
+		break;
+	}
+
+	st->data.i2c[0] = (cmd << cmd_shift) | addr;
 	put_unaligned_be16(val, &st->data.i2c[1]);
 
 	ret = i2c_master_send(i2c, st->data.i2c, 3);
@@ -653,12 +953,35 @@
 }
 
 static const struct i2c_device_id ad5064_i2c_ids[] = {
+	{"ad5625", ID_AD5625 },
+	{"ad5625r-1v25", ID_AD5625R_1V25 },
+	{"ad5625r-2v5", ID_AD5625R_2V5 },
+	{"ad5627", ID_AD5627 },
+	{"ad5627r-1v25", ID_AD5627R_1V25 },
+	{"ad5627r-2v5", ID_AD5627R_2V5 },
 	{"ad5629-1", ID_AD5629_1},
 	{"ad5629-2", ID_AD5629_2},
 	{"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
+	{"ad5645r-1v25", ID_AD5645R_1V25 },
+	{"ad5645r-2v5", ID_AD5645R_2V5 },
+	{"ad5665", ID_AD5665 },
+	{"ad5665r-1v25", ID_AD5665R_1V25 },
+	{"ad5665r-2v5", ID_AD5665R_2V5 },
+	{"ad5667", ID_AD5667 },
+	{"ad5667r-1v25", ID_AD5667R_1V25 },
+	{"ad5667r-2v5", ID_AD5667R_2V5 },
 	{"ad5669-1", ID_AD5669_1},
 	{"ad5669-2", ID_AD5669_2},
 	{"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
+	{"ltc2606", ID_LTC2606},
+	{"ltc2607", ID_LTC2607},
+	{"ltc2609", ID_LTC2609},
+	{"ltc2616", ID_LTC2616},
+	{"ltc2617", ID_LTC2617},
+	{"ltc2619", ID_LTC2619},
+	{"ltc2626", ID_LTC2626},
+	{"ltc2627", ID_LTC2627},
+	{"ltc2629", ID_LTC2629},
 	{}
 };
 MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
new file mode 100644
index 0000000..d6510d6
--- /dev/null
+++ b/drivers/iio/dac/ad5761.c
@@ -0,0 +1,430 @@
+/*
+ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
+ *
+ * Copyright 2016 Qtechnology A/S
+ * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/bitops.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_data/ad5761.h>
+
+#define AD5761_ADDR(addr)		((addr & 0xf) << 16)
+#define AD5761_ADDR_NOOP		0x0
+#define AD5761_ADDR_DAC_WRITE		0x3
+#define AD5761_ADDR_CTRL_WRITE_REG	0x4
+#define AD5761_ADDR_SW_DATA_RESET	0x7
+#define AD5761_ADDR_DAC_READ		0xb
+#define AD5761_ADDR_CTRL_READ_REG	0xc
+#define AD5761_ADDR_SW_FULL_RESET	0xf
+
+#define AD5761_CTRL_USE_INTVREF		BIT(5)
+#define AD5761_CTRL_ETS			BIT(6)
+
+/**
+ * struct ad5761_chip_info - chip specific information
+ * @int_vref:	Value of the internal reference voltage in mV - 0 if external
+ *		reference voltage is used
+ * @channel:	channel specification
+*/
+
+struct ad5761_chip_info {
+	unsigned long int_vref;
+	const struct iio_chan_spec channel;
+};
+
+struct ad5761_range_params {
+	int m;
+	int c;
+};
+
+enum ad5761_supported_device_ids {
+	ID_AD5721,
+	ID_AD5721R,
+	ID_AD5761,
+	ID_AD5761R,
+};
+
+/**
+ * struct ad5761_state - driver instance specific data
+ * @spi:		spi_device
+ * @vref_reg:		reference voltage regulator
+ * @use_intref:		true when the internal voltage reference is used
+ * @vref:		actual voltage reference in mVolts
+ * @range:		output range mode used
+ * @data:		cache aligned spi buffer
+ */
+struct ad5761_state {
+	struct spi_device		*spi;
+	struct regulator		*vref_reg;
+
+	bool use_intref;
+	int vref;
+	enum ad5761_voltage_range range;
+
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	union {
+		__be32 d32;
+		u8 d8[4];
+	} data[3] ____cacheline_aligned;
+};
+
+static const struct ad5761_range_params ad5761_range_params[] = {
+	[AD5761_VOLTAGE_RANGE_M10V_10V] = {
+		.m = 80,
+		.c = 40,
+	},
+	[AD5761_VOLTAGE_RANGE_0V_10V] = {
+		.m = 40,
+		.c = 0,
+	},
+	[AD5761_VOLTAGE_RANGE_M5V_5V] = {
+		.m = 40,
+		.c = 20,
+	},
+	[AD5761_VOLTAGE_RANGE_0V_5V] = {
+		.m = 20,
+		.c = 0,
+	},
+	[AD5761_VOLTAGE_RANGE_M2V5_7V5] = {
+		.m = 40,
+		.c = 10,
+	},
+	[AD5761_VOLTAGE_RANGE_M3V_3V] = {
+		.m = 24,
+		.c = 12,
+	},
+	[AD5761_VOLTAGE_RANGE_0V_16V] = {
+		.m = 64,
+		.c = 0,
+	},
+	[AD5761_VOLTAGE_RANGE_0V_20V] = {
+		.m = 80,
+		.c = 0,
+	},
+};
+
+static int _ad5761_spi_write(struct ad5761_state *st, u8 addr, u16 val)
+{
+	st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr) | val);
+
+	return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val)
+{
+	struct ad5761_state *st = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&indio_dev->mlock);
+	ret = _ad5761_spi_write(st, addr, val);
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static int _ad5761_spi_read(struct ad5761_state *st, u8 addr, u16 *val)
+{
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = &st->data[0].d8[1],
+			.bits_per_word = 8,
+			.len = 3,
+			.cs_change = true,
+		}, {
+			.tx_buf = &st->data[1].d8[1],
+			.rx_buf = &st->data[2].d8[1],
+			.bits_per_word = 8,
+			.len = 3,
+		},
+	};
+
+	st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr));
+	st->data[1].d32 = cpu_to_be32(AD5761_ADDR(AD5761_ADDR_NOOP));
+
+	ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+
+	*val = be32_to_cpu(st->data[2].d32);
+
+	return ret;
+}
+
+static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val)
+{
+	struct ad5761_state *st = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&indio_dev->mlock);
+	ret = _ad5761_spi_read(st, addr, val);
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static int ad5761_spi_set_range(struct ad5761_state *st,
+				enum ad5761_voltage_range range)
+{
+	u16 aux;
+	int ret;
+
+	aux = (range & 0x7) | AD5761_CTRL_ETS;
+
+	if (st->use_intref)
+		aux |= AD5761_CTRL_USE_INTVREF;
+
+	ret = _ad5761_spi_write(st, AD5761_ADDR_SW_FULL_RESET, 0);
+	if (ret)
+		return ret;
+
+	ret = _ad5761_spi_write(st, AD5761_ADDR_CTRL_WRITE_REG, aux);
+	if (ret)
+		return ret;
+
+	st->range = range;
+
+	return 0;
+}
+
+static int ad5761_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan,
+			   int *val,
+			   int *val2,
+			   long mask)
+{
+	struct ad5761_state *st;
+	int ret;
+	u16 aux;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = ad5761_spi_read(indio_dev, AD5761_ADDR_DAC_READ, &aux);
+		if (ret)
+			return ret;
+		*val = aux >> chan->scan_type.shift;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		st = iio_priv(indio_dev);
+		*val = st->vref * ad5761_range_params[st->range].m;
+		*val /= 10;
+		*val2 = chan->scan_type.realbits;
+		return IIO_VAL_FRACTIONAL_LOG2;
+	case IIO_CHAN_INFO_OFFSET:
+		st = iio_priv(indio_dev);
+		*val = -(1 << chan->scan_type.realbits);
+		*val *=	ad5761_range_params[st->range].c;
+		*val /=	ad5761_range_params[st->range].m;
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int ad5761_write_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int val,
+			    int val2,
+			    long mask)
+{
+	u16 aux;
+
+	if (mask != IIO_CHAN_INFO_RAW)
+		return -EINVAL;
+
+	if (val2 || (val << chan->scan_type.shift) > 0xffff || val < 0)
+		return -EINVAL;
+
+	aux = val << chan->scan_type.shift;
+
+	return ad5761_spi_write(indio_dev, AD5761_ADDR_DAC_WRITE, aux);
+}
+
+static const struct iio_info ad5761_info = {
+	.read_raw = &ad5761_read_raw,
+	.write_raw = &ad5761_write_raw,
+	.driver_module = THIS_MODULE,
+};
+
+#define AD5761_CHAN(_bits) {				\
+	.type = IIO_VOLTAGE,				\
+	.output = 1,					\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |	\
+		BIT(IIO_CHAN_INFO_OFFSET),		\
+	.scan_type = {					\
+		.sign = 'u',				\
+		.realbits = (_bits),			\
+		.storagebits = 16,			\
+		.shift = 16 - (_bits),			\
+	},						\
+}
+
+static const struct ad5761_chip_info ad5761_chip_infos[] = {
+	[ID_AD5721] = {
+		.int_vref = 0,
+		.channel = AD5761_CHAN(12),
+	},
+	[ID_AD5721R] = {
+		.int_vref = 2500,
+		.channel = AD5761_CHAN(12),
+	},
+	[ID_AD5761] = {
+		.int_vref = 0,
+		.channel = AD5761_CHAN(16),
+	},
+	[ID_AD5761R] = {
+		.int_vref = 2500,
+		.channel = AD5761_CHAN(16),
+	},
+};
+
+static int ad5761_get_vref(struct ad5761_state *st,
+			   const struct ad5761_chip_info *chip_info)
+{
+	int ret;
+
+	st->vref_reg = devm_regulator_get_optional(&st->spi->dev, "vref");
+	if (PTR_ERR(st->vref_reg) == -ENODEV) {
+		/* Use Internal regulator */
+		if (!chip_info->int_vref) {
+			dev_err(&st->spi->dev,
+				"Voltage reference not found\n");
+			return -EIO;
+		}
+
+		st->use_intref = true;
+		st->vref = chip_info->int_vref;
+		return 0;
+	}
+
+	if (IS_ERR(st->vref_reg)) {
+		dev_err(&st->spi->dev,
+			"Error getting voltage reference regulator\n");
+		return PTR_ERR(st->vref_reg);
+	}
+
+	ret = regulator_enable(st->vref_reg);
+	if (ret) {
+		dev_err(&st->spi->dev,
+			 "Failed to enable voltage reference\n");
+		return ret;
+	}
+
+	ret = regulator_get_voltage(st->vref_reg);
+	if (ret < 0) {
+		dev_err(&st->spi->dev,
+			 "Failed to get voltage reference value\n");
+		goto disable_regulator_vref;
+	}
+
+	if (ret < 2000000 || ret > 3000000) {
+		dev_warn(&st->spi->dev,
+			 "Invalid external voltage ref. value %d uV\n", ret);
+		ret = -EIO;
+		goto disable_regulator_vref;
+	}
+
+	st->vref = ret / 1000;
+	st->use_intref = false;
+
+	return 0;
+
+disable_regulator_vref:
+	regulator_disable(st->vref_reg);
+	st->vref_reg = NULL;
+	return ret;
+}
+
+static int ad5761_probe(struct spi_device *spi)
+{
+	struct iio_dev *iio_dev;
+	struct ad5761_state *st;
+	int ret;
+	const struct ad5761_chip_info *chip_info =
+		&ad5761_chip_infos[spi_get_device_id(spi)->driver_data];
+	enum ad5761_voltage_range voltage_range = AD5761_VOLTAGE_RANGE_0V_5V;
+	struct ad5761_platform_data *pdata = dev_get_platdata(&spi->dev);
+
+	iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+	if (!iio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(iio_dev);
+
+	st->spi = spi;
+	spi_set_drvdata(spi, iio_dev);
+
+	ret = ad5761_get_vref(st, chip_info);
+	if (ret)
+		return ret;
+
+	if (pdata)
+		voltage_range = pdata->voltage_range;
+
+	ret = ad5761_spi_set_range(st, voltage_range);
+	if (ret)
+		goto disable_regulator_err;
+
+	iio_dev->dev.parent = &spi->dev;
+	iio_dev->info = &ad5761_info;
+	iio_dev->modes = INDIO_DIRECT_MODE;
+	iio_dev->channels = &chip_info->channel;
+	iio_dev->num_channels = 1;
+	iio_dev->name = spi_get_device_id(st->spi)->name;
+	ret = iio_device_register(iio_dev);
+	if (ret)
+		goto disable_regulator_err;
+
+	return 0;
+
+disable_regulator_err:
+	if (!IS_ERR_OR_NULL(st->vref_reg))
+		regulator_disable(st->vref_reg);
+
+	return ret;
+}
+
+static int ad5761_remove(struct spi_device *spi)
+{
+	struct iio_dev *iio_dev = spi_get_drvdata(spi);
+	struct ad5761_state *st = iio_priv(iio_dev);
+
+	iio_device_unregister(iio_dev);
+
+	if (!IS_ERR_OR_NULL(st->vref_reg))
+		regulator_disable(st->vref_reg);
+
+	return 0;
+}
+
+static const struct spi_device_id ad5761_id[] = {
+	{"ad5721", ID_AD5721},
+	{"ad5721r", ID_AD5721R},
+	{"ad5761", ID_AD5761},
+	{"ad5761r", ID_AD5761R},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, ad5761_id);
+
+static struct spi_driver ad5761_driver = {
+	.driver = {
+		   .name = "ad5761",
+		   },
+	.probe = ad5761_probe,
+	.remove = ad5761_remove,
+	.id_table = ad5761_id,
+};
+module_spi_driver(ad5761_driver);
+
+MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD5721, AD5721R, AD5761, AD5761R driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index b4dde83..cca935c 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -1,5 +1,5 @@
 /*
- * mcp4725.c - Support for Microchip MCP4725
+ * mcp4725.c - Support for Microchip MCP4725/6
  *
  * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
  *
@@ -134,6 +134,12 @@
 	"500kohm_to_gnd"
 };
 
+static const char * const mcp4726_powerdown_modes[] = {
+	"1kohm_to_gnd",
+	"125kohm_to_gnd",
+	"640kohm_to_gnd"
+};
+
 static int mcp4725_get_powerdown_mode(struct iio_dev *indio_dev,
 	const struct iio_chan_spec *chan)
 {
@@ -182,11 +188,24 @@
 	return len;
 }
 
-static const struct iio_enum mcp4725_powerdown_mode_enum = {
-	.items = mcp4725_powerdown_modes,
-	.num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
-	.get = mcp4725_get_powerdown_mode,
-	.set = mcp4725_set_powerdown_mode,
+enum {
+	MCP4725,
+	MCP4726,
+};
+
+static const struct iio_enum mcp472x_powerdown_mode_enum[] = {
+	[MCP4725] = {
+		.items = mcp4725_powerdown_modes,
+		.num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
+		.get = mcp4725_get_powerdown_mode,
+		.set = mcp4725_set_powerdown_mode,
+	},
+	[MCP4726] = {
+		.items = mcp4726_powerdown_modes,
+		.num_items = ARRAY_SIZE(mcp4726_powerdown_modes),
+		.get = mcp4725_get_powerdown_mode,
+		.set = mcp4725_set_powerdown_mode,
+	},
 };
 
 static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
@@ -196,19 +215,46 @@
 		.write = mcp4725_write_powerdown,
 		.shared = IIO_SEPARATE,
 	},
-	IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4725_powerdown_mode_enum),
-	IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum),
+	IIO_ENUM("powerdown_mode", IIO_SEPARATE,
+			&mcp472x_powerdown_mode_enum[MCP4725]),
+	IIO_ENUM_AVAILABLE("powerdown_mode",
+			&mcp472x_powerdown_mode_enum[MCP4725]),
 	{ },
 };
 
-static const struct iio_chan_spec mcp4725_channel = {
-	.type		= IIO_VOLTAGE,
-	.indexed	= 1,
-	.output		= 1,
-	.channel	= 0,
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
-	.ext_info	= mcp4725_ext_info,
+static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
+	{
+		.name = "powerdown",
+		.read = mcp4725_read_powerdown,
+		.write = mcp4725_write_powerdown,
+		.shared = IIO_SEPARATE,
+	},
+	IIO_ENUM("powerdown_mode", IIO_SEPARATE,
+			&mcp472x_powerdown_mode_enum[MCP4726]),
+	IIO_ENUM_AVAILABLE("powerdown_mode",
+			&mcp472x_powerdown_mode_enum[MCP4726]),
+	{ },
+};
+
+static const struct iio_chan_spec mcp472x_channel[] = {
+	[MCP4725] = {
+		.type		= IIO_VOLTAGE,
+		.indexed	= 1,
+		.output		= 1,
+		.channel	= 0,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.ext_info	= mcp4725_ext_info,
+	},
+	[MCP4726] = {
+		.type		= IIO_VOLTAGE,
+		.indexed	= 1,
+		.output		= 1,
+		.channel	= 0,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.ext_info	= mcp4726_ext_info,
+	},
 };
 
 static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
@@ -302,7 +348,7 @@
 	indio_dev->dev.parent = &client->dev;
 	indio_dev->name = id->name;
 	indio_dev->info = &mcp4725_info;
-	indio_dev->channels = &mcp4725_channel;
+	indio_dev->channels = &mcp472x_channel[id->driver_data];
 	indio_dev->num_channels = 1;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
@@ -316,7 +362,7 @@
 	}
 	pd = (inbuf[0] >> 1) & 0x3;
 	data->powerdown = pd > 0 ? true : false;
-	data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */
+	data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
 	data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
 
 	return iio_device_register(indio_dev);
@@ -329,7 +375,8 @@
 }
 
 static const struct i2c_device_id mcp4725_id[] = {
-	{ "mcp4725", 0 },
+	{ "mcp4725", MCP4725 },
+	{ "mcp4726", MCP4726 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, mcp4725_id);
@@ -346,5 +393,5 @@
 module_i2c_driver(mcp4725_driver);
 
 MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
-MODULE_DESCRIPTION("MCP4725 12-bit DAC");
+MODULE_DESCRIPTION("MCP4725/6 12-bit DAC");
 MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
new file mode 100644
index 0000000..174f4b7
--- /dev/null
+++ b/drivers/iio/dac/stx104.c
@@ -0,0 +1,152 @@
+/*
+ * DAC driver for the Apex Embedded Systems STX104
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#define STX104_NUM_CHAN 2
+
+#define STX104_CHAN(chan) {				\
+	.type = IIO_VOLTAGE,				\
+	.channel = chan,				\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+	.indexed = 1,					\
+	.output = 1					\
+}
+
+#define STX104_EXTENT 16
+/**
+ * The highest base address possible for an ISA device is 0x3FF; this results in
+ * 1024 possible base addresses. Dividing the number of possible base addresses
+ * by the address extent taken by each device results in the maximum number of
+ * devices on a system.
+ */
+#define MAX_NUM_STX104 (1024 / STX104_EXTENT)
+
+static unsigned base[MAX_NUM_STX104];
+static unsigned num_stx104;
+module_param_array(base, uint, &num_stx104, 0);
+MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
+
+/**
+ * struct stx104_iio - IIO device private data structure
+ * @chan_out_states:	channels' output states
+ * @base:		base port address of the IIO device
+ */
+struct stx104_iio {
+	unsigned chan_out_states[STX104_NUM_CHAN];
+	unsigned base;
+};
+
+static int stx104_read_raw(struct iio_dev *indio_dev,
+	struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+	struct stx104_iio *const priv = iio_priv(indio_dev);
+
+	if (mask != IIO_CHAN_INFO_RAW)
+		return -EINVAL;
+
+	*val = priv->chan_out_states[chan->channel];
+
+	return IIO_VAL_INT;
+}
+
+static int stx104_write_raw(struct iio_dev *indio_dev,
+	struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+	struct stx104_iio *const priv = iio_priv(indio_dev);
+	const unsigned chan_addr_offset = 2 * chan->channel;
+
+	if (mask != IIO_CHAN_INFO_RAW)
+		return -EINVAL;
+
+	priv->chan_out_states[chan->channel] = val;
+	outw(val, priv->base + 4 + chan_addr_offset);
+
+	return 0;
+}
+
+static const struct iio_info stx104_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = stx104_read_raw,
+	.write_raw = stx104_write_raw
+};
+
+static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
+	STX104_CHAN(0),
+	STX104_CHAN(1)
+};
+
+static int stx104_probe(struct device *dev, unsigned int id)
+{
+	struct iio_dev *indio_dev;
+	struct stx104_iio *priv;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	if (!devm_request_region(dev, base[id], STX104_EXTENT,
+		dev_name(dev))) {
+		dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+			base[id], base[id] + STX104_EXTENT);
+		return -EBUSY;
+	}
+
+	indio_dev->info = &stx104_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = stx104_channels;
+	indio_dev->num_channels = STX104_NUM_CHAN;
+	indio_dev->name = dev_name(dev);
+
+	priv = iio_priv(indio_dev);
+	priv->base = base[id];
+
+	/* initialize DAC output to 0V */
+	outw(0, base[id] + 4);
+	outw(0, base[id] + 6);
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver stx104_driver = {
+	.probe = stx104_probe,
+	.driver = {
+		.name = "stx104"
+	}
+};
+
+static void __exit stx104_exit(void)
+{
+	isa_unregister_driver(&stx104_driver);
+}
+
+static int __init stx104_init(void)
+{
+	return isa_register_driver(&stx104_driver, num_stx104);
+}
+
+module_init(stx104_init);
+module_exit(stx104_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Apex Embedded Systems STX104 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 02eddce..110f95b 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -185,6 +185,11 @@
 		.drdy_irq = {
 			.addr = ST_GYRO_1_DRDY_IRQ_ADDR,
 			.mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+			/*
+			 * The sensor has IHL (active low) and open
+			 * drain settings, but only for INT1 and not
+			 * for the DRDY line on INT2.
+			 */
 		},
 		.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
 		.bootime = 2,
@@ -248,6 +253,11 @@
 		.drdy_irq = {
 			.addr = ST_GYRO_2_DRDY_IRQ_ADDR,
 			.mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+			/*
+			 * The sensor has IHL (active low) and open
+			 * drain settings, but only for INT1 and not
+			 * for the DRDY line on INT2.
+			 */
 		},
 		.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
 		.bootime = 2,
@@ -307,6 +317,11 @@
 		.drdy_irq = {
 			.addr = ST_GYRO_3_DRDY_IRQ_ADDR,
 			.mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+			/*
+			 * The sensor has IHL (active low) and open
+			 * drain settings, but only for INT1 and not
+			 * for the DRDY line on INT2.
+			 */
 		},
 		.multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
 		.bootime = 2,
diff --git a/drivers/iio/health/Kconfig b/drivers/iio/health/Kconfig
index a647679..f0c1977 100644
--- a/drivers/iio/health/Kconfig
+++ b/drivers/iio/health/Kconfig
@@ -3,7 +3,34 @@
 #
 # When adding new entries keep the list in alphabetical order
 
-menu "Health sensors"
+menu "Health Sensors"
+
+menu "Heart Rate Monitors"
+
+config AFE4403
+	tristate "TI AFE4403 Heart Rate Monitor"
+	depends on SPI_MASTER
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes to choose the Texas Instruments AFE4403
+	  heart rate monitor and low-cost pulse oximeter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called afe4403.
+
+config AFE4404
+	tristate "TI AFE4404 heart rate and pulse oximeter sensor"
+	depends on I2C
+	select REGMAP_I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes to choose the Texas Instruments AFE4404
+	  heart rate monitor and low-cost pulse oximeter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called afe4404.
 
 config MAX30100
 	tristate "MAX30100 heart rate and pulse oximeter sensor"
@@ -19,3 +46,5 @@
 	  module will be called max30100.
 
 endmenu
+
+endmenu
diff --git a/drivers/iio/health/Makefile b/drivers/iio/health/Makefile
index 7c475d7..9955a2a 100644
--- a/drivers/iio/health/Makefile
+++ b/drivers/iio/health/Makefile
@@ -4,4 +4,6 @@
 
 # When adding new entries keep the list in alphabetical order
 
+obj-$(CONFIG_AFE4403)		+= afe4403.o
+obj-$(CONFIG_AFE4404)		+= afe4404.o
 obj-$(CONFIG_MAX30100)		+= max30100.o
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
new file mode 100644
index 0000000..91c046e
--- /dev/null
+++ b/drivers/iio/health/afe4403.c
@@ -0,0 +1,708 @@
+/*
+ * AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *	Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "afe440x.h"
+
+#define AFE4403_DRIVER_NAME		"afe4403"
+
+/* AFE4403 Registers */
+#define AFE4403_TIAGAIN			0x20
+#define AFE4403_TIA_AMB_GAIN		0x21
+
+/* AFE4403 GAIN register fields */
+#define AFE4403_TIAGAIN_RES_MASK	GENMASK(2, 0)
+#define AFE4403_TIAGAIN_RES_SHIFT	0
+#define AFE4403_TIAGAIN_CAP_MASK	GENMASK(7, 3)
+#define AFE4403_TIAGAIN_CAP_SHIFT	3
+
+/* AFE4403 LEDCNTRL register fields */
+#define AFE440X_LEDCNTRL_LED1_MASK		GENMASK(15, 8)
+#define AFE440X_LEDCNTRL_LED1_SHIFT		8
+#define AFE440X_LEDCNTRL_LED2_MASK		GENMASK(7, 0)
+#define AFE440X_LEDCNTRL_LED2_SHIFT		0
+#define AFE440X_LEDCNTRL_LED_RANGE_MASK		GENMASK(17, 16)
+#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT	16
+
+/* AFE4403 CONTROL2 register fields */
+#define AFE440X_CONTROL2_PWR_DWN_TX	BIT(2)
+#define AFE440X_CONTROL2_EN_SLOW_DIAG	BIT(8)
+#define AFE440X_CONTROL2_DIAG_OUT_TRI	BIT(10)
+#define AFE440X_CONTROL2_TX_BRDG_MOD	BIT(11)
+#define AFE440X_CONTROL2_TX_REF_MASK	GENMASK(18, 17)
+#define AFE440X_CONTROL2_TX_REF_SHIFT	17
+
+/* AFE4404 NULL fields */
+#define NULL_MASK	0
+#define NULL_SHIFT	0
+
+/* AFE4403 LEDCNTRL values */
+#define AFE440X_LEDCNTRL_RANGE_TX_HALF	0x1
+#define AFE440X_LEDCNTRL_RANGE_TX_FULL	0x2
+#define AFE440X_LEDCNTRL_RANGE_TX_OFF	0x3
+
+/* AFE4403 CONTROL2 values */
+#define AFE440X_CONTROL2_TX_REF_025	0x0
+#define AFE440X_CONTROL2_TX_REF_050	0x1
+#define AFE440X_CONTROL2_TX_REF_100	0x2
+#define AFE440X_CONTROL2_TX_REF_075	0x3
+
+/* AFE4403 CONTROL3 values */
+#define AFE440X_CONTROL3_CLK_DIV_2	0x0
+#define AFE440X_CONTROL3_CLK_DIV_4	0x2
+#define AFE440X_CONTROL3_CLK_DIV_6	0x3
+#define AFE440X_CONTROL3_CLK_DIV_8	0x4
+#define AFE440X_CONTROL3_CLK_DIV_12	0x5
+#define AFE440X_CONTROL3_CLK_DIV_1	0x7
+
+/* AFE4403 TIAGAIN_CAP values */
+#define AFE4403_TIAGAIN_CAP_5_P		0x0
+#define AFE4403_TIAGAIN_CAP_10_P	0x1
+#define AFE4403_TIAGAIN_CAP_20_P	0x2
+#define AFE4403_TIAGAIN_CAP_30_P	0x3
+#define AFE4403_TIAGAIN_CAP_55_P	0x8
+#define AFE4403_TIAGAIN_CAP_155_P	0x10
+
+/* AFE4403 TIAGAIN_RES values */
+#define AFE4403_TIAGAIN_RES_500_K	0x0
+#define AFE4403_TIAGAIN_RES_250_K	0x1
+#define AFE4403_TIAGAIN_RES_100_K	0x2
+#define AFE4403_TIAGAIN_RES_50_K	0x3
+#define AFE4403_TIAGAIN_RES_25_K	0x4
+#define AFE4403_TIAGAIN_RES_10_K	0x5
+#define AFE4403_TIAGAIN_RES_1_M		0x6
+#define AFE4403_TIAGAIN_RES_NONE	0x7
+
+/**
+ * struct afe4403_data
+ * @dev - Device structure
+ * @spi - SPI device handle
+ * @regmap - Register map of the device
+ * @regulator - Pointer to the regulator for the IC
+ * @trig - IIO trigger for this device
+ * @irq - ADC_RDY line interrupt number
+ */
+struct afe4403_data {
+	struct device *dev;
+	struct spi_device *spi;
+	struct regmap *regmap;
+	struct regulator *regulator;
+	struct iio_trigger *trig;
+	int irq;
+};
+
+enum afe4403_chan_id {
+	LED1,
+	ALED1,
+	LED2,
+	ALED2,
+	LED1_ALED1,
+	LED2_ALED2,
+	ILED1,
+	ILED2,
+};
+
+static const struct afe440x_reg_info afe4403_reg_info[] = {
+	[LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL),
+	[ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL),
+	[LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL),
+	[ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
+	[LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
+	[LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
+	[ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1),
+	[ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2),
+};
+
+static const struct iio_chan_spec afe4403_channels[] = {
+	/* ADC values */
+	AFE440X_INTENSITY_CHAN(LED1, "led1", 0),
+	AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0),
+	AFE440X_INTENSITY_CHAN(LED2, "led2", 0),
+	AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0),
+	AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
+	AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+	/* LED current */
+	AFE440X_CURRENT_CHAN(ILED1, "led1"),
+	AFE440X_CURRENT_CHAN(ILED2, "led2"),
+};
+
+static const struct afe440x_val_table afe4403_res_table[] = {
+	{ 500000 }, { 250000 }, { 100000 }, { 50000 },
+	{ 25000 }, { 10000 }, { 1000000 }, { 0 },
+};
+AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table);
+
+static const struct afe440x_val_table afe4403_cap_table[] = {
+	{ 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 },
+	{ 0, 30000 }, { 0, 35000 }, { 0, 45000 }, { 0, 50000 },
+	{ 0, 55000 }, { 0, 60000 }, { 0, 70000 }, { 0, 75000 },
+	{ 0, 80000 }, { 0, 85000 }, { 0, 95000 }, { 0, 100000 },
+	{ 0, 155000 }, { 0, 160000 }, { 0, 170000 }, { 0, 175000 },
+	{ 0, 180000 }, { 0, 185000 }, { 0, 195000 }, { 0, 200000 },
+	{ 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 },
+	{ 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 },
+};
+AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table);
+
+static ssize_t afe440x_show_register(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+	unsigned int reg_val, type;
+	int vals[2];
+	int ret, val_len;
+
+	ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+	if (ret)
+		return ret;
+
+	reg_val &= afe440x_attr->mask;
+	reg_val >>= afe440x_attr->shift;
+
+	switch (afe440x_attr->type) {
+	case SIMPLE:
+		type = IIO_VAL_INT;
+		val_len = 1;
+		vals[0] = reg_val;
+		break;
+	case RESISTANCE:
+	case CAPACITANCE:
+		type = IIO_VAL_INT_PLUS_MICRO;
+		val_len = 2;
+		if (reg_val < afe440x_attr->table_size) {
+			vals[0] = afe440x_attr->val_table[reg_val].integer;
+			vals[1] = afe440x_attr->val_table[reg_val].fract;
+			break;
+		}
+		return -EINVAL;
+	default:
+		return -EINVAL;
+	}
+
+	return iio_format_value(buf, type, val_len, vals);
+}
+
+static ssize_t afe440x_store_register(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+	int val, integer, fract, ret;
+
+	ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+	if (ret)
+		return ret;
+
+	switch (afe440x_attr->type) {
+	case SIMPLE:
+		val = integer;
+		break;
+	case RESISTANCE:
+	case CAPACITANCE:
+		for (val = 0; val < afe440x_attr->table_size; val++)
+			if (afe440x_attr->val_table[val].integer == integer &&
+			    afe440x_attr->val_table[val].fract == fract)
+				break;
+		if (val == afe440x_attr->table_size)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
+				 afe440x_attr->mask,
+				 (val << afe440x_attr->shift));
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+
+static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
+static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+
+static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
+static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+
+static struct attribute *afe440x_attributes[] = {
+	&afe440x_attr_tia_separate_en.dev_attr.attr,
+	&afe440x_attr_tia_resistance1.dev_attr.attr,
+	&afe440x_attr_tia_capacitance1.dev_attr.attr,
+	&afe440x_attr_tia_resistance2.dev_attr.attr,
+	&afe440x_attr_tia_capacitance2.dev_attr.attr,
+	&dev_attr_tia_resistance_available.attr,
+	&dev_attr_tia_capacitance_available.attr,
+	NULL
+};
+
+static const struct attribute_group afe440x_attribute_group = {
+	.attrs = afe440x_attributes
+};
+
+static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val)
+{
+	u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
+	u8 rx[3];
+	int ret;
+
+	/* Enable reading from the device */
+	ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = spi_write_then_read(afe->spi, &reg, 1, rx, 3);
+	if (ret)
+		return ret;
+
+	*val = (rx[0] << 16) |
+		(rx[1] << 8) |
+		(rx[2]);
+
+	/* Disable reading from the device */
+	tx[3] = AFE440X_CONTROL0_WRITE;
+	ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int afe4403_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val, int *val2, long mask)
+{
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+	int ret;
+
+	switch (chan->type) {
+	case IIO_INTENSITY:
+		switch (mask) {
+		case IIO_CHAN_INFO_RAW:
+			ret = afe4403_read(afe, reg_info.reg, val);
+			if (ret)
+				return ret;
+			return IIO_VAL_INT;
+		case IIO_CHAN_INFO_OFFSET:
+			ret = regmap_read(afe->regmap, reg_info.offreg,
+					  val);
+			if (ret)
+				return ret;
+			*val &= reg_info.mask;
+			*val >>= reg_info.shift;
+			return IIO_VAL_INT;
+		}
+		break;
+	case IIO_CURRENT:
+		switch (mask) {
+		case IIO_CHAN_INFO_RAW:
+			ret = regmap_read(afe->regmap, reg_info.reg, val);
+			if (ret)
+				return ret;
+			*val &= reg_info.mask;
+			*val >>= reg_info.shift;
+			return IIO_VAL_INT;
+		case IIO_CHAN_INFO_SCALE:
+			*val = 0;
+			*val2 = 800000;
+			return IIO_VAL_INT_PLUS_MICRO;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int afe4403_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int val, int val2, long mask)
+{
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+
+	switch (chan->type) {
+	case IIO_INTENSITY:
+		switch (mask) {
+		case IIO_CHAN_INFO_OFFSET:
+			return regmap_update_bits(afe->regmap,
+				reg_info.offreg,
+				reg_info.mask,
+				(val << reg_info.shift));
+		}
+		break;
+	case IIO_CURRENT:
+		switch (mask) {
+		case IIO_CHAN_INFO_RAW:
+			return regmap_update_bits(afe->regmap,
+				reg_info.reg,
+				reg_info.mask,
+				(val << reg_info.shift));
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info afe4403_iio_info = {
+	.attrs = &afe440x_attribute_group,
+	.read_raw = afe4403_read_raw,
+	.write_raw = afe4403_write_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static irqreturn_t afe4403_trigger_handler(int irq, void *private)
+{
+	struct iio_poll_func *pf = private;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	int ret, bit, i = 0;
+	s32 buffer[8];
+	u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
+	u8 rx[3];
+
+	/* Enable reading from the device */
+	ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+	if (ret)
+		goto err;
+
+	for_each_set_bit(bit, indio_dev->active_scan_mask,
+			 indio_dev->masklength) {
+		ret = spi_write_then_read(afe->spi,
+					  &afe4403_reg_info[bit].reg, 1,
+					  rx, 3);
+		if (ret)
+			goto err;
+
+		buffer[i++] = (rx[0] << 16) |
+				(rx[1] << 8) |
+				(rx[2]);
+	}
+
+	/* Disable reading from the device */
+	tx[3] = AFE440X_CONTROL0_WRITE;
+	ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+	if (ret)
+		goto err;
+
+	iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+err:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops afe4403_trigger_ops = {
+	.owner = THIS_MODULE,
+};
+
+#define AFE4403_TIMING_PAIRS			\
+	{ AFE440X_LED2STC,	0x000050 },	\
+	{ AFE440X_LED2ENDC,	0x0003e7 },	\
+	{ AFE440X_LED1LEDSTC,	0x0007d0 },	\
+	{ AFE440X_LED1LEDENDC,	0x000bb7 },	\
+	{ AFE440X_ALED2STC,	0x000438 },	\
+	{ AFE440X_ALED2ENDC,	0x0007cf },	\
+	{ AFE440X_LED1STC,	0x000820 },	\
+	{ AFE440X_LED1ENDC,	0x000bb7 },	\
+	{ AFE440X_LED2LEDSTC,	0x000000 },	\
+	{ AFE440X_LED2LEDENDC,	0x0003e7 },	\
+	{ AFE440X_ALED1STC,	0x000c08 },	\
+	{ AFE440X_ALED1ENDC,	0x000f9f },	\
+	{ AFE440X_LED2CONVST,	0x0003ef },	\
+	{ AFE440X_LED2CONVEND,	0x0007cf },	\
+	{ AFE440X_ALED2CONVST,	0x0007d7 },	\
+	{ AFE440X_ALED2CONVEND,	0x000bb7 },	\
+	{ AFE440X_LED1CONVST,	0x000bbf },	\
+	{ AFE440X_LED1CONVEND,	0x009c3f },	\
+	{ AFE440X_ALED1CONVST,	0x000fa7 },	\
+	{ AFE440X_ALED1CONVEND,	0x001387 },	\
+	{ AFE440X_ADCRSTSTCT0,	0x0003e8 },	\
+	{ AFE440X_ADCRSTENDCT0,	0x0003eb },	\
+	{ AFE440X_ADCRSTSTCT1,	0x0007d0 },	\
+	{ AFE440X_ADCRSTENDCT1,	0x0007d3 },	\
+	{ AFE440X_ADCRSTSTCT2,	0x000bb8 },	\
+	{ AFE440X_ADCRSTENDCT2,	0x000bbb },	\
+	{ AFE440X_ADCRSTSTCT3,	0x000fa0 },	\
+	{ AFE440X_ADCRSTENDCT3,	0x000fa3 },	\
+	{ AFE440X_PRPCOUNT,	0x009c3f },	\
+	{ AFE440X_PDNCYCLESTC,	0x001518 },	\
+	{ AFE440X_PDNCYCLEENDC,	0x00991f }
+
+static const struct reg_sequence afe4403_reg_sequences[] = {
+	AFE4403_TIMING_PAIRS,
+	{ AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007},
+	{ AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M },
+	{ AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) |
+			    (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) },
+	{ AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 <<
+			    AFE440X_CONTROL2_TX_REF_SHIFT },
+};
+
+static const struct regmap_range afe4403_yes_ranges[] = {
+	regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL),
+};
+
+static const struct regmap_access_table afe4403_volatile_table = {
+	.yes_ranges = afe4403_yes_ranges,
+	.n_yes_ranges = ARRAY_SIZE(afe4403_yes_ranges),
+};
+
+static const struct regmap_config afe4403_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 24,
+
+	.max_register = AFE440X_PDNCYCLEENDC,
+	.cache_type = REGCACHE_RBTREE,
+	.volatile_table = &afe4403_volatile_table,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id afe4403_of_match[] = {
+	{ .compatible = "ti,afe4403", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, afe4403_of_match);
+#endif
+
+static int afe4403_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	int ret;
+
+	ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+				 AFE440X_CONTROL2_PDN_AFE,
+				 AFE440X_CONTROL2_PDN_AFE);
+	if (ret)
+		return ret;
+
+	ret = regulator_disable(afe->regulator);
+	if (ret) {
+		dev_err(dev, "Unable to disable regulator\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int afe4403_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	int ret;
+
+	ret = regulator_enable(afe->regulator);
+	if (ret) {
+		dev_err(dev, "Unable to enable regulator\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+				 AFE440X_CONTROL2_PDN_AFE, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend, afe4403_resume);
+
+static int afe4403_probe(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev;
+	struct afe4403_data *afe;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	afe = iio_priv(indio_dev);
+	spi_set_drvdata(spi, indio_dev);
+
+	afe->dev = &spi->dev;
+	afe->spi = spi;
+	afe->irq = spi->irq;
+
+	afe->regmap = devm_regmap_init_spi(spi, &afe4403_regmap_config);
+	if (IS_ERR(afe->regmap)) {
+		dev_err(afe->dev, "Unable to allocate register map\n");
+		return PTR_ERR(afe->regmap);
+	}
+
+	afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+	if (IS_ERR(afe->regulator)) {
+		dev_err(afe->dev, "Unable to get regulator\n");
+		return PTR_ERR(afe->regulator);
+	}
+	ret = regulator_enable(afe->regulator);
+	if (ret) {
+		dev_err(afe->dev, "Unable to enable regulator\n");
+		return ret;
+	}
+
+	ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
+			   AFE440X_CONTROL0_SW_RESET);
+	if (ret) {
+		dev_err(afe->dev, "Unable to reset device\n");
+		goto err_disable_reg;
+	}
+
+	ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
+				     ARRAY_SIZE(afe4403_reg_sequences));
+	if (ret) {
+		dev_err(afe->dev, "Unable to set register defaults\n");
+		goto err_disable_reg;
+	}
+
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->dev.parent = afe->dev;
+	indio_dev->channels = afe4403_channels;
+	indio_dev->num_channels = ARRAY_SIZE(afe4403_channels);
+	indio_dev->name = AFE4403_DRIVER_NAME;
+	indio_dev->info = &afe4403_iio_info;
+
+	if (afe->irq > 0) {
+		afe->trig = devm_iio_trigger_alloc(afe->dev,
+						   "%s-dev%d",
+						   indio_dev->name,
+						   indio_dev->id);
+		if (!afe->trig) {
+			dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+			ret = -ENOMEM;
+			goto err_disable_reg;
+		}
+
+		iio_trigger_set_drvdata(afe->trig, indio_dev);
+
+		afe->trig->ops = &afe4403_trigger_ops;
+		afe->trig->dev.parent = afe->dev;
+
+		ret = iio_trigger_register(afe->trig);
+		if (ret) {
+			dev_err(afe->dev, "Unable to register IIO trigger\n");
+			goto err_disable_reg;
+		}
+
+		ret = devm_request_threaded_irq(afe->dev, afe->irq,
+						iio_trigger_generic_data_rdy_poll,
+						NULL, IRQF_ONESHOT,
+						AFE4403_DRIVER_NAME,
+						afe->trig);
+		if (ret) {
+			dev_err(afe->dev, "Unable to request IRQ\n");
+			goto err_trig;
+		}
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+					 afe4403_trigger_handler, NULL);
+	if (ret) {
+		dev_err(afe->dev, "Unable to setup buffer\n");
+		goto err_trig;
+	}
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(afe->dev, "Unable to register IIO device\n");
+		goto err_buff;
+	}
+
+	return 0;
+
+err_buff:
+	iio_triggered_buffer_cleanup(indio_dev);
+err_trig:
+	if (afe->irq > 0)
+		iio_trigger_unregister(afe->trig);
+err_disable_reg:
+	regulator_disable(afe->regulator);
+
+	return ret;
+}
+
+static int afe4403_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct afe4403_data *afe = iio_priv(indio_dev);
+	int ret;
+
+	iio_device_unregister(indio_dev);
+
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	if (afe->irq > 0)
+		iio_trigger_unregister(afe->trig);
+
+	ret = regulator_disable(afe->regulator);
+	if (ret) {
+		dev_err(afe->dev, "Unable to disable regulator\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct spi_device_id afe4403_ids[] = {
+	{ "afe4403", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, afe4403_ids);
+
+static struct spi_driver afe4403_spi_driver = {
+	.driver = {
+		.name = AFE4403_DRIVER_NAME,
+		.of_match_table = of_match_ptr(afe4403_of_match),
+		.pm = &afe4403_pm_ops,
+	},
+	.probe = afe4403_probe,
+	.remove = afe4403_remove,
+	.id_table = afe4403_ids,
+};
+module_spi_driver(afe4403_spi_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
new file mode 100644
index 0000000..0759268
--- /dev/null
+++ b/drivers/iio/health/afe4404.c
@@ -0,0 +1,679 @@
+/*
+ * AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *	Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "afe440x.h"
+
+#define AFE4404_DRIVER_NAME		"afe4404"
+
+/* AFE4404 registers */
+#define AFE4404_TIA_GAIN_SEP		0x20
+#define AFE4404_TIA_GAIN		0x21
+#define AFE4404_PROG_TG_STC		0x34
+#define AFE4404_PROG_TG_ENDC		0x35
+#define AFE4404_LED3LEDSTC		0x36
+#define AFE4404_LED3LEDENDC		0x37
+#define AFE4404_CLKDIV_PRF		0x39
+#define AFE4404_OFFDAC			0x3a
+#define AFE4404_DEC			0x3d
+#define AFE4404_AVG_LED2_ALED2VAL	0x3f
+#define AFE4404_AVG_LED1_ALED1VAL	0x40
+
+/* AFE4404 GAIN register fields */
+#define AFE4404_TIA_GAIN_RES_MASK	GENMASK(2, 0)
+#define AFE4404_TIA_GAIN_RES_SHIFT	0
+#define AFE4404_TIA_GAIN_CAP_MASK	GENMASK(5, 3)
+#define AFE4404_TIA_GAIN_CAP_SHIFT	3
+
+/* AFE4404 LEDCNTRL register fields */
+#define AFE4404_LEDCNTRL_ILED1_MASK	GENMASK(5, 0)
+#define AFE4404_LEDCNTRL_ILED1_SHIFT	0
+#define AFE4404_LEDCNTRL_ILED2_MASK	GENMASK(11, 6)
+#define AFE4404_LEDCNTRL_ILED2_SHIFT	6
+#define AFE4404_LEDCNTRL_ILED3_MASK	GENMASK(17, 12)
+#define AFE4404_LEDCNTRL_ILED3_SHIFT	12
+
+/* AFE4404 CONTROL2 register fields */
+#define AFE440X_CONTROL2_ILED_2X_MASK	BIT(17)
+#define AFE440X_CONTROL2_ILED_2X_SHIFT	17
+
+/* AFE4404 CONTROL3 register fields */
+#define AFE440X_CONTROL3_OSC_ENABLE	BIT(9)
+
+/* AFE4404 OFFDAC register current fields */
+#define AFE4404_OFFDAC_CURR_LED1_MASK	GENMASK(9, 5)
+#define AFE4404_OFFDAC_CURR_LED1_SHIFT	5
+#define AFE4404_OFFDAC_CURR_LED2_MASK	GENMASK(19, 15)
+#define AFE4404_OFFDAC_CURR_LED2_SHIFT	15
+#define AFE4404_OFFDAC_CURR_LED3_MASK	GENMASK(4, 0)
+#define AFE4404_OFFDAC_CURR_LED3_SHIFT	0
+#define AFE4404_OFFDAC_CURR_ALED1_MASK	GENMASK(14, 10)
+#define AFE4404_OFFDAC_CURR_ALED1_SHIFT	10
+#define AFE4404_OFFDAC_CURR_ALED2_MASK	GENMASK(4, 0)
+#define AFE4404_OFFDAC_CURR_ALED2_SHIFT	0
+
+/* AFE4404 NULL fields */
+#define NULL_MASK	0
+#define NULL_SHIFT	0
+
+/* AFE4404 TIA_GAIN_CAP values */
+#define AFE4404_TIA_GAIN_CAP_5_P	0x0
+#define AFE4404_TIA_GAIN_CAP_2_5_P	0x1
+#define AFE4404_TIA_GAIN_CAP_10_P	0x2
+#define AFE4404_TIA_GAIN_CAP_7_5_P	0x3
+#define AFE4404_TIA_GAIN_CAP_20_P	0x4
+#define AFE4404_TIA_GAIN_CAP_17_5_P	0x5
+#define AFE4404_TIA_GAIN_CAP_25_P	0x6
+#define AFE4404_TIA_GAIN_CAP_22_5_P	0x7
+
+/* AFE4404 TIA_GAIN_RES values */
+#define AFE4404_TIA_GAIN_RES_500_K	0x0
+#define AFE4404_TIA_GAIN_RES_250_K	0x1
+#define AFE4404_TIA_GAIN_RES_100_K	0x2
+#define AFE4404_TIA_GAIN_RES_50_K	0x3
+#define AFE4404_TIA_GAIN_RES_25_K	0x4
+#define AFE4404_TIA_GAIN_RES_10_K	0x5
+#define AFE4404_TIA_GAIN_RES_1_M	0x6
+#define AFE4404_TIA_GAIN_RES_2_M	0x7
+
+/**
+ * struct afe4404_data
+ * @dev - Device structure
+ * @regmap - Register map of the device
+ * @regulator - Pointer to the regulator for the IC
+ * @trig - IIO trigger for this device
+ * @irq - ADC_RDY line interrupt number
+ */
+struct afe4404_data {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regulator *regulator;
+	struct iio_trigger *trig;
+	int irq;
+};
+
+enum afe4404_chan_id {
+	LED1,
+	ALED1,
+	LED2,
+	ALED2,
+	LED3,
+	LED1_ALED1,
+	LED2_ALED2,
+	ILED1,
+	ILED2,
+	ILED3,
+};
+
+static const struct afe440x_reg_info afe4404_reg_info[] = {
+	[LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1),
+	[ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1),
+	[LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2),
+	[ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2),
+	[LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
+	[LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
+	[LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
+	[ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1),
+	[ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2),
+	[ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3),
+};
+
+static const struct iio_chan_spec afe4404_channels[] = {
+	/* ADC values */
+	AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)),
+	AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
+	AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)),
+	AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
+	AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)),
+	AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
+	AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+	/* LED current */
+	AFE440X_CURRENT_CHAN(ILED1, "led1"),
+	AFE440X_CURRENT_CHAN(ILED2, "led2"),
+	AFE440X_CURRENT_CHAN(ILED3, "led3"),
+};
+
+static const struct afe440x_val_table afe4404_res_table[] = {
+	{ .integer = 500000, .fract = 0 },
+	{ .integer = 250000, .fract = 0 },
+	{ .integer = 100000, .fract = 0 },
+	{ .integer = 50000, .fract = 0 },
+	{ .integer = 25000, .fract = 0 },
+	{ .integer = 10000, .fract = 0 },
+	{ .integer = 1000000, .fract = 0 },
+	{ .integer = 2000000, .fract = 0 },
+};
+AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table);
+
+static const struct afe440x_val_table afe4404_cap_table[] = {
+	{ .integer = 0, .fract = 5000 },
+	{ .integer = 0, .fract = 2500 },
+	{ .integer = 0, .fract = 10000 },
+	{ .integer = 0, .fract = 7500 },
+	{ .integer = 0, .fract = 20000 },
+	{ .integer = 0, .fract = 17500 },
+	{ .integer = 0, .fract = 25000 },
+	{ .integer = 0, .fract = 22500 },
+};
+AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table);
+
+static ssize_t afe440x_show_register(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+	unsigned int reg_val, type;
+	int vals[2];
+	int ret, val_len;
+
+	ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+	if (ret)
+		return ret;
+
+	reg_val &= afe440x_attr->mask;
+	reg_val >>= afe440x_attr->shift;
+
+	switch (afe440x_attr->type) {
+	case SIMPLE:
+		type = IIO_VAL_INT;
+		val_len = 1;
+		vals[0] = reg_val;
+		break;
+	case RESISTANCE:
+	case CAPACITANCE:
+		type = IIO_VAL_INT_PLUS_MICRO;
+		val_len = 2;
+		if (reg_val < afe440x_attr->table_size) {
+			vals[0] = afe440x_attr->val_table[reg_val].integer;
+			vals[1] = afe440x_attr->val_table[reg_val].fract;
+			break;
+		}
+		return -EINVAL;
+	default:
+		return -EINVAL;
+	}
+
+	return iio_format_value(buf, type, val_len, vals);
+}
+
+static ssize_t afe440x_store_register(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+	int val, integer, fract, ret;
+
+	ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+	if (ret)
+		return ret;
+
+	switch (afe440x_attr->type) {
+	case SIMPLE:
+		val = integer;
+		break;
+	case RESISTANCE:
+	case CAPACITANCE:
+		for (val = 0; val < afe440x_attr->table_size; val++)
+			if (afe440x_attr->val_table[val].integer == integer &&
+			    afe440x_attr->val_table[val].fract == fract)
+				break;
+		if (val == afe440x_attr->table_size)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
+				 afe440x_attr->mask,
+				 (val << afe440x_attr->shift));
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+
+static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
+static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+
+static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
+static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+
+static struct attribute *afe440x_attributes[] = {
+	&afe440x_attr_tia_separate_en.dev_attr.attr,
+	&afe440x_attr_tia_resistance1.dev_attr.attr,
+	&afe440x_attr_tia_capacitance1.dev_attr.attr,
+	&afe440x_attr_tia_resistance2.dev_attr.attr,
+	&afe440x_attr_tia_capacitance2.dev_attr.attr,
+	&dev_attr_tia_resistance_available.attr,
+	&dev_attr_tia_capacitance_available.attr,
+	NULL
+};
+
+static const struct attribute_group afe440x_attribute_group = {
+	.attrs = afe440x_attributes
+};
+
+static int afe4404_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val, int *val2, long mask)
+{
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+	int ret;
+
+	switch (chan->type) {
+	case IIO_INTENSITY:
+		switch (mask) {
+		case IIO_CHAN_INFO_RAW:
+			ret = regmap_read(afe->regmap, reg_info.reg, val);
+			if (ret)
+				return ret;
+			return IIO_VAL_INT;
+		case IIO_CHAN_INFO_OFFSET:
+			ret = regmap_read(afe->regmap, reg_info.offreg,
+					  val);
+			if (ret)
+				return ret;
+			*val &= reg_info.mask;
+			*val >>= reg_info.shift;
+			return IIO_VAL_INT;
+		}
+		break;
+	case IIO_CURRENT:
+		switch (mask) {
+		case IIO_CHAN_INFO_RAW:
+			ret = regmap_read(afe->regmap, reg_info.reg, val);
+			if (ret)
+				return ret;
+			*val &= reg_info.mask;
+			*val >>= reg_info.shift;
+			return IIO_VAL_INT;
+		case IIO_CHAN_INFO_SCALE:
+			*val = 0;
+			*val2 = 800000;
+			return IIO_VAL_INT_PLUS_MICRO;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int afe4404_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int val, int val2, long mask)
+{
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+
+	switch (chan->type) {
+	case IIO_INTENSITY:
+		switch (mask) {
+		case IIO_CHAN_INFO_OFFSET:
+			return regmap_update_bits(afe->regmap,
+				reg_info.offreg,
+				reg_info.mask,
+				(val << reg_info.shift));
+		}
+		break;
+	case IIO_CURRENT:
+		switch (mask) {
+		case IIO_CHAN_INFO_RAW:
+			return regmap_update_bits(afe->regmap,
+				reg_info.reg,
+				reg_info.mask,
+				(val << reg_info.shift));
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info afe4404_iio_info = {
+	.attrs = &afe440x_attribute_group,
+	.read_raw = afe4404_read_raw,
+	.write_raw = afe4404_write_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static irqreturn_t afe4404_trigger_handler(int irq, void *private)
+{
+	struct iio_poll_func *pf = private;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	int ret, bit, i = 0;
+	s32 buffer[10];
+
+	for_each_set_bit(bit, indio_dev->active_scan_mask,
+			 indio_dev->masklength) {
+		ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg,
+				  &buffer[i++]);
+		if (ret)
+			goto err;
+	}
+
+	iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+err:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops afe4404_trigger_ops = {
+	.owner = THIS_MODULE,
+};
+
+/* Default timings from data-sheet */
+#define AFE4404_TIMING_PAIRS			\
+	{ AFE440X_PRPCOUNT,	39999	},	\
+	{ AFE440X_LED2LEDSTC,	0	},	\
+	{ AFE440X_LED2LEDENDC,	398	},	\
+	{ AFE440X_LED2STC,	80	},	\
+	{ AFE440X_LED2ENDC,	398	},	\
+	{ AFE440X_ADCRSTSTCT0,	5600	},	\
+	{ AFE440X_ADCRSTENDCT0,	5606	},	\
+	{ AFE440X_LED2CONVST,	5607	},	\
+	{ AFE440X_LED2CONVEND,	6066	},	\
+	{ AFE4404_LED3LEDSTC,	400	},	\
+	{ AFE4404_LED3LEDENDC,	798	},	\
+	{ AFE440X_ALED2STC,	480	},	\
+	{ AFE440X_ALED2ENDC,	798	},	\
+	{ AFE440X_ADCRSTSTCT1,	6068	},	\
+	{ AFE440X_ADCRSTENDCT1,	6074	},	\
+	{ AFE440X_ALED2CONVST,	6075	},	\
+	{ AFE440X_ALED2CONVEND,	6534	},	\
+	{ AFE440X_LED1LEDSTC,	800	},	\
+	{ AFE440X_LED1LEDENDC,	1198	},	\
+	{ AFE440X_LED1STC,	880	},	\
+	{ AFE440X_LED1ENDC,	1198	},	\
+	{ AFE440X_ADCRSTSTCT2,	6536	},	\
+	{ AFE440X_ADCRSTENDCT2,	6542	},	\
+	{ AFE440X_LED1CONVST,	6543	},	\
+	{ AFE440X_LED1CONVEND,	7003	},	\
+	{ AFE440X_ALED1STC,	1280	},	\
+	{ AFE440X_ALED1ENDC,	1598	},	\
+	{ AFE440X_ADCRSTSTCT3,	7005	},	\
+	{ AFE440X_ADCRSTENDCT3,	7011	},	\
+	{ AFE440X_ALED1CONVST,	7012	},	\
+	{ AFE440X_ALED1CONVEND,	7471	},	\
+	{ AFE440X_PDNCYCLESTC,	7671	},	\
+	{ AFE440X_PDNCYCLEENDC,	39199	}
+
+static const struct reg_sequence afe4404_reg_sequences[] = {
+	AFE4404_TIMING_PAIRS,
+	{ AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
+	{ AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K },
+	{ AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) |
+			    (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) |
+			    (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) },
+	{ AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE	},
+};
+
+static const struct regmap_range afe4404_yes_ranges[] = {
+	regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL),
+	regmap_reg_range(AFE4404_AVG_LED2_ALED2VAL, AFE4404_AVG_LED1_ALED1VAL),
+};
+
+static const struct regmap_access_table afe4404_volatile_table = {
+	.yes_ranges = afe4404_yes_ranges,
+	.n_yes_ranges = ARRAY_SIZE(afe4404_yes_ranges),
+};
+
+static const struct regmap_config afe4404_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 24,
+
+	.max_register = AFE4404_AVG_LED1_ALED1VAL,
+	.cache_type = REGCACHE_RBTREE,
+	.volatile_table = &afe4404_volatile_table,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id afe4404_of_match[] = {
+	{ .compatible = "ti,afe4404", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, afe4404_of_match);
+#endif
+
+static int afe4404_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	int ret;
+
+	ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+				 AFE440X_CONTROL2_PDN_AFE,
+				 AFE440X_CONTROL2_PDN_AFE);
+	if (ret)
+		return ret;
+
+	ret = regulator_disable(afe->regulator);
+	if (ret) {
+		dev_err(dev, "Unable to disable regulator\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int afe4404_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	int ret;
+
+	ret = regulator_enable(afe->regulator);
+	if (ret) {
+		dev_err(dev, "Unable to enable regulator\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+				 AFE440X_CONTROL2_PDN_AFE, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend, afe4404_resume);
+
+static int afe4404_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct iio_dev *indio_dev;
+	struct afe4404_data *afe;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	afe = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+
+	afe->dev = &client->dev;
+	afe->irq = client->irq;
+
+	afe->regmap = devm_regmap_init_i2c(client, &afe4404_regmap_config);
+	if (IS_ERR(afe->regmap)) {
+		dev_err(afe->dev, "Unable to allocate register map\n");
+		return PTR_ERR(afe->regmap);
+	}
+
+	afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+	if (IS_ERR(afe->regulator)) {
+		dev_err(afe->dev, "Unable to get regulator\n");
+		return PTR_ERR(afe->regulator);
+	}
+	ret = regulator_enable(afe->regulator);
+	if (ret) {
+		dev_err(afe->dev, "Unable to enable regulator\n");
+		return ret;
+	}
+
+	ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
+			   AFE440X_CONTROL0_SW_RESET);
+	if (ret) {
+		dev_err(afe->dev, "Unable to reset device\n");
+		goto disable_reg;
+	}
+
+	ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
+				     ARRAY_SIZE(afe4404_reg_sequences));
+	if (ret) {
+		dev_err(afe->dev, "Unable to set register defaults\n");
+		goto disable_reg;
+	}
+
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->dev.parent = afe->dev;
+	indio_dev->channels = afe4404_channels;
+	indio_dev->num_channels = ARRAY_SIZE(afe4404_channels);
+	indio_dev->name = AFE4404_DRIVER_NAME;
+	indio_dev->info = &afe4404_iio_info;
+
+	if (afe->irq > 0) {
+		afe->trig = devm_iio_trigger_alloc(afe->dev,
+						   "%s-dev%d",
+						   indio_dev->name,
+						   indio_dev->id);
+		if (!afe->trig) {
+			dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+			ret = -ENOMEM;
+			goto disable_reg;
+		}
+
+		iio_trigger_set_drvdata(afe->trig, indio_dev);
+
+		afe->trig->ops = &afe4404_trigger_ops;
+		afe->trig->dev.parent = afe->dev;
+
+		ret = iio_trigger_register(afe->trig);
+		if (ret) {
+			dev_err(afe->dev, "Unable to register IIO trigger\n");
+			goto disable_reg;
+		}
+
+		ret = devm_request_threaded_irq(afe->dev, afe->irq,
+						iio_trigger_generic_data_rdy_poll,
+						NULL, IRQF_ONESHOT,
+						AFE4404_DRIVER_NAME,
+						afe->trig);
+		if (ret) {
+			dev_err(afe->dev, "Unable to request IRQ\n");
+			goto disable_reg;
+		}
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+					 afe4404_trigger_handler, NULL);
+	if (ret) {
+		dev_err(afe->dev, "Unable to setup buffer\n");
+		goto unregister_trigger;
+	}
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(afe->dev, "Unable to register IIO device\n");
+		goto unregister_triggered_buffer;
+	}
+
+	return 0;
+
+unregister_triggered_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+unregister_trigger:
+	if (afe->irq > 0)
+		iio_trigger_unregister(afe->trig);
+disable_reg:
+	regulator_disable(afe->regulator);
+
+	return ret;
+}
+
+static int afe4404_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct afe4404_data *afe = iio_priv(indio_dev);
+	int ret;
+
+	iio_device_unregister(indio_dev);
+
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	if (afe->irq > 0)
+		iio_trigger_unregister(afe->trig);
+
+	ret = regulator_disable(afe->regulator);
+	if (ret) {
+		dev_err(afe->dev, "Unable to disable regulator\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct i2c_device_id afe4404_ids[] = {
+	{ "afe4404", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, afe4404_ids);
+
+static struct i2c_driver afe4404_i2c_driver = {
+	.driver = {
+		.name = AFE4404_DRIVER_NAME,
+		.of_match_table = of_match_ptr(afe4404_of_match),
+		.pm = &afe4404_pm_ops,
+	},
+	.probe = afe4404_probe,
+	.remove = afe4404_remove,
+	.id_table = afe4404_ids,
+};
+module_i2c_driver(afe4404_i2c_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe440x.h b/drivers/iio/health/afe440x.h
new file mode 100644
index 0000000..c671ab7
--- /dev/null
+++ b/drivers/iio/health/afe440x.h
@@ -0,0 +1,191 @@
+/*
+ * AFE440X Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *	Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _AFE440X_H
+#define _AFE440X_H
+
+/* AFE440X registers */
+#define AFE440X_CONTROL0		0x00
+#define AFE440X_LED2STC			0x01
+#define AFE440X_LED2ENDC		0x02
+#define AFE440X_LED1LEDSTC		0x03
+#define AFE440X_LED1LEDENDC		0x04
+#define AFE440X_ALED2STC		0x05
+#define AFE440X_ALED2ENDC		0x06
+#define AFE440X_LED1STC			0x07
+#define AFE440X_LED1ENDC		0x08
+#define AFE440X_LED2LEDSTC		0x09
+#define AFE440X_LED2LEDENDC		0x0a
+#define AFE440X_ALED1STC		0x0b
+#define AFE440X_ALED1ENDC		0x0c
+#define AFE440X_LED2CONVST		0x0d
+#define AFE440X_LED2CONVEND		0x0e
+#define AFE440X_ALED2CONVST		0x0f
+#define AFE440X_ALED2CONVEND		0x10
+#define AFE440X_LED1CONVST		0x11
+#define AFE440X_LED1CONVEND		0x12
+#define AFE440X_ALED1CONVST		0x13
+#define AFE440X_ALED1CONVEND		0x14
+#define AFE440X_ADCRSTSTCT0		0x15
+#define AFE440X_ADCRSTENDCT0		0x16
+#define AFE440X_ADCRSTSTCT1		0x17
+#define AFE440X_ADCRSTENDCT1		0x18
+#define AFE440X_ADCRSTSTCT2		0x19
+#define AFE440X_ADCRSTENDCT2		0x1a
+#define AFE440X_ADCRSTSTCT3		0x1b
+#define AFE440X_ADCRSTENDCT3		0x1c
+#define AFE440X_PRPCOUNT		0x1d
+#define AFE440X_CONTROL1		0x1e
+#define AFE440X_LEDCNTRL		0x22
+#define AFE440X_CONTROL2		0x23
+#define AFE440X_ALARM			0x29
+#define AFE440X_LED2VAL			0x2a
+#define AFE440X_ALED2VAL		0x2b
+#define AFE440X_LED1VAL			0x2c
+#define AFE440X_ALED1VAL		0x2d
+#define AFE440X_LED2_ALED2VAL		0x2e
+#define AFE440X_LED1_ALED1VAL		0x2f
+#define AFE440X_CONTROL3		0x31
+#define AFE440X_PDNCYCLESTC		0x32
+#define AFE440X_PDNCYCLEENDC		0x33
+
+/* CONTROL0 register fields */
+#define AFE440X_CONTROL0_REG_READ	BIT(0)
+#define AFE440X_CONTROL0_TM_COUNT_RST	BIT(1)
+#define AFE440X_CONTROL0_SW_RESET	BIT(3)
+
+/* CONTROL1 register fields */
+#define AFE440X_CONTROL1_TIMEREN	BIT(8)
+
+/* TIAGAIN register fields */
+#define AFE440X_TIAGAIN_ENSEPGAIN_MASK	BIT(15)
+#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT	15
+
+/* CONTROL2 register fields */
+#define AFE440X_CONTROL2_PDN_AFE	BIT(0)
+#define AFE440X_CONTROL2_PDN_RX		BIT(1)
+#define AFE440X_CONTROL2_DYNAMIC4	BIT(3)
+#define AFE440X_CONTROL2_DYNAMIC3	BIT(4)
+#define AFE440X_CONTROL2_DYNAMIC2	BIT(14)
+#define AFE440X_CONTROL2_DYNAMIC1	BIT(20)
+
+/* CONTROL3 register fields */
+#define AFE440X_CONTROL3_CLKDIV		GENMASK(2, 0)
+
+/* CONTROL0 values */
+#define AFE440X_CONTROL0_WRITE		0x0
+#define AFE440X_CONTROL0_READ		0x1
+
+struct afe440x_reg_info {
+	unsigned int reg;
+	unsigned int offreg;
+	unsigned int shift;
+	unsigned int mask;
+};
+
+#define AFE440X_REG_INFO(_reg, _offreg, _sm)			\
+	{							\
+		.reg = _reg,					\
+		.offreg = _offreg,				\
+		.shift = _sm ## _SHIFT,				\
+		.mask = _sm ## _MASK,				\
+	}
+
+#define AFE440X_INTENSITY_CHAN(_index, _name, _mask)		\
+	{							\
+		.type = IIO_INTENSITY,				\
+		.channel = _index,				\
+		.address = _index,				\
+		.scan_index = _index,				\
+		.scan_type = {					\
+				.sign = 's',			\
+				.realbits = 24,			\
+				.storagebits = 32,		\
+				.endianness = IIO_CPU,		\
+		},						\
+		.extend_name = _name,				\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |	\
+			_mask,					\
+	}
+
+#define AFE440X_CURRENT_CHAN(_index, _name)			\
+	{							\
+		.type = IIO_CURRENT,				\
+		.channel = _index,				\
+		.address = _index,				\
+		.scan_index = _index,				\
+		.extend_name = _name,				\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |	\
+			BIT(IIO_CHAN_INFO_SCALE),		\
+		.output = true,					\
+	}
+
+enum afe440x_reg_type {
+	SIMPLE,
+	RESISTANCE,
+	CAPACITANCE,
+};
+
+struct afe440x_val_table {
+	int integer;
+	int fract;
+};
+
+#define AFE440X_TABLE_ATTR(_name, _table)				\
+static ssize_t _name ## _show(struct device *dev,			\
+			      struct device_attribute *attr, char *buf)	\
+{									\
+	ssize_t len = 0;						\
+	int i;								\
+									\
+	for (i = 0; i < ARRAY_SIZE(_table); i++)			\
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ", \
+				 _table[i].integer,			\
+				 _table[i].fract);			\
+									\
+	buf[len - 1] = '\n';						\
+									\
+	return len;							\
+}									\
+static DEVICE_ATTR_RO(_name)
+
+struct afe440x_attr {
+	struct device_attribute dev_attr;
+	unsigned int reg;
+	unsigned int shift;
+	unsigned int mask;
+	enum afe440x_reg_type type;
+	const struct afe440x_val_table *val_table;
+	unsigned int table_size;
+};
+
+#define to_afe440x_attr(_dev_attr)				\
+	container_of(_dev_attr, struct afe440x_attr, dev_attr)
+
+#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size)	\
+	struct afe440x_attr afe440x_attr_##_name = {		\
+		.dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR),	\
+				   afe440x_show_register,	\
+				   afe440x_store_register),	\
+		.reg = _reg,					\
+		.shift = _field ## _SHIFT,			\
+		.mask = _field ## _MASK,			\
+		.type = _type,					\
+		.val_table = _table,				\
+		.table_size = _size,				\
+	}
+
+#endif /* _AFE440X_H */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 9d1c81f..09db893 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -13,7 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
  *
- * TODO: allow LED current and pulse length controls via device tree properties
+ * TODO: enable pulse length controls via device tree properties
  */
 
 #include <linux/module.h>
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/i2c.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
@@ -65,6 +66,7 @@
 #define MAX30100_REG_SPO2_CONFIG_1600US		0x3
 
 #define MAX30100_REG_LED_CONFIG			0x09
+#define MAX30100_REG_LED_CONFIG_LED_MASK	0x0f
 #define MAX30100_REG_LED_CONFIG_RED_LED_SHIFT	4
 
 #define MAX30100_REG_LED_CONFIG_24MA		0x07
@@ -111,6 +113,12 @@
 	.volatile_reg = max30100_is_volatile_reg,
 };
 
+static const unsigned int max30100_led_current_mapping[] = {
+	4400, 7600, 11000, 14200, 17400,
+	20800, 24000, 27100, 30600, 33800,
+	37000, 40200, 43600, 46800, 50000
+};
+
 static const unsigned long max30100_scan_masks[] = {0x3, 0};
 
 static const struct iio_chan_spec max30100_channels[] = {
@@ -243,15 +251,76 @@
 	return IRQ_HANDLED;
 }
 
+static int max30100_get_current_idx(unsigned int val, int *reg)
+{
+	int idx;
+
+	/* LED turned off */
+	if (val == 0) {
+		*reg = 0;
+		return 0;
+	}
+
+	for (idx = 0; idx < ARRAY_SIZE(max30100_led_current_mapping); idx++) {
+		if (max30100_led_current_mapping[idx] == val) {
+			*reg = idx + 1;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int max30100_led_init(struct max30100_data *data)
+{
+	struct device *dev = &data->client->dev;
+	struct device_node *np = dev->of_node;
+	unsigned int val[2];
+	int reg, ret;
+
+	ret = of_property_read_u32_array(np, "maxim,led-current-microamp",
+					(unsigned int *) &val, 2);
+	if (ret) {
+		/* Default to 24 mA RED LED, 50 mA IR LED */
+		reg = (MAX30100_REG_LED_CONFIG_24MA <<
+			MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) |
+			MAX30100_REG_LED_CONFIG_50MA;
+		dev_warn(dev, "no led-current-microamp set");
+
+		return regmap_write(data->regmap, MAX30100_REG_LED_CONFIG, reg);
+	}
+
+	/* RED LED current */
+	ret = max30100_get_current_idx(val[0], &reg);
+	if (ret) {
+		dev_err(dev, "invalid RED current setting %d", val[0]);
+		return ret;
+	}
+
+	ret = regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG,
+		MAX30100_REG_LED_CONFIG_LED_MASK <<
+		MAX30100_REG_LED_CONFIG_RED_LED_SHIFT,
+		reg << MAX30100_REG_LED_CONFIG_RED_LED_SHIFT);
+	if (ret)
+		return ret;
+
+	/* IR LED current */
+	ret = max30100_get_current_idx(val[1], &reg);
+	if (ret) {
+		dev_err(dev, "invalid IR current setting %d", val[1]);
+		return ret;
+	}
+
+	return regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG,
+		MAX30100_REG_LED_CONFIG_LED_MASK, reg);
+}
+
 static int max30100_chip_init(struct max30100_data *data)
 {
 	int ret;
 
-	/* RED IR LED = 24mA, IR LED = 50mA */
-	ret = regmap_write(data->regmap, MAX30100_REG_LED_CONFIG,
-				(MAX30100_REG_LED_CONFIG_24MA <<
-				 MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) |
-				 MAX30100_REG_LED_CONFIG_50MA);
+	/* setup LED current settings */
+	ret = max30100_led_init(data);
 	if (ret)
 		return ret;
 
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 6a23698..866dda1 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -43,14 +43,16 @@
 	  humidity and temperature sensor.
 
 	  To compile this driver as a module, choose M here: the module
-	  will be called si7005.
+	  will be called si7005. This driver also
+	  supports Hoperf TH02 Humidity and Temperature Sensor.
 
 config SI7020
 	tristate "Si7013/20/21 Relative Humidity and Temperature Sensors"
 	depends on I2C
 	help
 	  Say yes here to build support for the Silicon Labs Si7013/20/21
-	  Relative Humidity and Temperature Sensors.
+	  Relative Humidity and Temperature Sensors. This driver also
+	  supports Hoperf TH06 Humidity and Temperature Sensor.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called si7020.
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index cfc5a05..20b500d 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -50,12 +50,32 @@
 #define DHT11_EDGES_PER_READ (2 * DHT11_BITS_PER_READ + \
 			      DHT11_EDGES_PREAMBLE + 1)
 
-/* Data transmission timing (nano seconds) */
+/*
+ * Data transmission timing:
+ * Data bits are encoded as pulse length (high time) on the data line.
+ * 0-bit: 22-30uS -- typically 26uS (AM2302)
+ * 1-bit: 68-75uS -- typically 70uS (AM2302)
+ * The acutal timings also depend on the properties of the cable, with
+ * longer cables typically making pulses shorter.
+ *
+ * Our decoding depends on the time resolution of the system:
+ * timeres > 34uS ... don't know what a 1-tick pulse is
+ * 34uS > timeres > 30uS ... no problem (30kHz and 32kHz clocks)
+ * 30uS > timeres > 23uS ... don't know what a 2-tick pulse is
+ * timeres < 23uS ... no problem
+ *
+ * Luckily clocks in the 33-44kHz range are quite uncommon, so we can
+ * support most systems if the threshold for decoding a pulse as 1-bit
+ * is chosen carefully. If somebody really wants to support clocks around
+ * 40kHz, where this driver is most unreliable, there are two options.
+ * a) select an implementation using busy loop polling on those systems
+ * b) use the checksum to do some probabilistic decoding
+ */
 #define DHT11_START_TRANSMISSION	18  /* ms */
-#define DHT11_SENSOR_RESPONSE	80000
-#define DHT11_START_BIT		50000
-#define DHT11_DATA_BIT_LOW	27000
-#define DHT11_DATA_BIT_HIGH	70000
+#define DHT11_MIN_TIMERES	34000  /* ns */
+#define DHT11_THRESHOLD		49000  /* ns */
+#define DHT11_AMBIG_LOW		23000  /* ns */
+#define DHT11_AMBIG_HIGH	30000  /* ns */
 
 struct dht11 {
 	struct device			*dev;
@@ -76,43 +96,39 @@
 	struct {s64 ts; int value; }	edges[DHT11_EDGES_PER_READ];
 };
 
-static unsigned char dht11_decode_byte(int *timing, int threshold)
+static unsigned char dht11_decode_byte(char *bits)
 {
 	unsigned char ret = 0;
 	int i;
 
 	for (i = 0; i < 8; ++i) {
 		ret <<= 1;
-		if (timing[i] >= threshold)
+		if (bits[i])
 			++ret;
 	}
 
 	return ret;
 }
 
-static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
+static int dht11_decode(struct dht11 *dht11, int offset)
 {
-	int i, t, timing[DHT11_BITS_PER_READ], threshold;
+	int i, t;
+	char bits[DHT11_BITS_PER_READ];
 	unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
 
-	threshold = DHT11_DATA_BIT_HIGH / timeres;
-	if (DHT11_DATA_BIT_LOW / timeres + 1 >= threshold)
-		pr_err("dht11: WARNING: decoding ambiguous\n");
-
-	/* scale down with timeres and check validity */
 	for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
 		t = dht11->edges[offset + 2 * i + 2].ts -
 			dht11->edges[offset + 2 * i + 1].ts;
 		if (!dht11->edges[offset + 2 * i + 1].value)
 			return -EIO;  /* lost synchronisation */
-		timing[i] = t / timeres;
+		bits[i] = t > DHT11_THRESHOLD;
 	}
 
-	hum_int = dht11_decode_byte(timing, threshold);
-	hum_dec = dht11_decode_byte(&timing[8], threshold);
-	temp_int = dht11_decode_byte(&timing[16], threshold);
-	temp_dec = dht11_decode_byte(&timing[24], threshold);
-	checksum = dht11_decode_byte(&timing[32], threshold);
+	hum_int = dht11_decode_byte(bits);
+	hum_dec = dht11_decode_byte(&bits[8]);
+	temp_int = dht11_decode_byte(&bits[16]);
+	temp_dec = dht11_decode_byte(&bits[24]);
+	checksum = dht11_decode_byte(&bits[32]);
 
 	if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
 		return -EIO;
@@ -161,12 +177,12 @@
 			int *val, int *val2, long m)
 {
 	struct dht11 *dht11 = iio_priv(iio_dev);
-	int ret, timeres;
+	int ret, timeres, offset;
 
 	mutex_lock(&dht11->lock);
 	if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
 		timeres = ktime_get_resolution_ns();
-		if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
+		if (timeres > DHT11_MIN_TIMERES) {
 			dev_err(dht11->dev, "timeresolution %dns too low\n",
 				timeres);
 			/* In theory a better clock could become available
@@ -176,6 +192,10 @@
 			ret = -EAGAIN;
 			goto err;
 		}
+		if (timeres > DHT11_AMBIG_LOW && timeres < DHT11_AMBIG_HIGH)
+			dev_warn(dht11->dev,
+				 "timeresolution: %dns - decoding ambiguous\n",
+				 timeres);
 
 		reinit_completion(&dht11->completion);
 
@@ -208,11 +228,14 @@
 		if (ret < 0)
 			goto err;
 
-		ret = dht11_decode(dht11,
-				   dht11->num_edges == DHT11_EDGES_PER_READ ?
-					DHT11_EDGES_PREAMBLE :
-					DHT11_EDGES_PREAMBLE - 2,
-				timeres);
+		offset = DHT11_EDGES_PREAMBLE +
+				dht11->num_edges - DHT11_EDGES_PER_READ;
+		for (; offset >= 0; --offset) {
+			ret = dht11_decode(dht11, offset);
+			if (!ret)
+				break;
+		}
+
 		if (ret)
 			goto err;
 	}
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index 91972cc..98a022f 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -170,6 +170,7 @@
 
 static const struct i2c_device_id si7005_id[] = {
 	{ "si7005", 0 },
+	{ "th02", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, si7005_id);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 71991b5..5ab4e06 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -149,6 +149,7 @@
 
 static const struct i2c_device_id si7020_id[] = {
 	{ "si7020", 0 },
+	{ "th06", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, si7020_id);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 8f8d137..a7f557a 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -3,15 +3,31 @@
 #
 
 config INV_MPU6050_IIO
-	tristate "Invensense MPU6050 devices"
-	depends on I2C && SYSFS
-	depends on I2C_MUX
+	tristate
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
+
+config INV_MPU6050_I2C
+	tristate "Invensense MPU6050 devices (I2C)"
+	depends on I2C
+	select INV_MPU6050_IIO
+	select I2C_MUX
+	select REGMAP_I2C
 	help
 	  This driver supports the Invensense MPU6050 devices.
 	  This driver can also support MPU6500 in MPU6050 compatibility mode
 	  and also in MPU6500 mode with some limitations.
 	  It is a gyroscope/accelerometer combo device.
 	  This driver can be built as a module. The module will be called
-	  inv-mpu6050.
+	  inv-mpu6050-i2c.
+
+config INV_MPU6050_SPI
+	tristate "Invensense MPU6050 devices (SPI)"
+	depends on SPI_MASTER
+	select INV_MPU6050_IIO
+	select REGMAP_SPI
+	help
+	  This driver supports the Invensense MPU6050 devices.
+	  It is a gyroscope/accelerometer combo device.
+	  This driver can be built as a module. The module will be called
+	  inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index f566f6a..734af5e 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -3,4 +3,10 @@
 #
 
 obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
-inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o inv_mpu_acpi.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
+
+obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o
+inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o
+
+obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o
+inv-mpu6050-spi-objs := inv_mpu_spi.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 1c982a5..0bcfa8d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -139,22 +139,23 @@
 	return 0;
 }
 
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
 {
+	struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
 
 	st->mux_client = NULL;
-	if (ACPI_HANDLE(&st->client->dev)) {
+	if (ACPI_HANDLE(&client->dev)) {
 		struct i2c_board_info info;
 		struct acpi_device *adev;
 		int ret = -1;
 
-		adev = ACPI_COMPANION(&st->client->dev);
+		adev = ACPI_COMPANION(&client->dev);
 		memset(&info, 0, sizeof(info));
 
 		dmi_check_system(inv_mpu_dev_list);
 		switch (matched_product_name) {
 		case INV_MPU_ASUS_T100TA:
-			ret = asus_acpi_get_sensor_info(adev, st->client,
+			ret = asus_acpi_get_sensor_info(adev, client,
 							&info);
 			break;
 		/* Add more matched product processing here */
@@ -166,7 +167,7 @@
 			/* No matching DMI, so create device on INV6XX type */
 			unsigned short primary, secondary;
 
-			ret = inv_mpu_process_acpi_config(st->client, &primary,
+			ret = inv_mpu_process_acpi_config(client, &primary,
 							  &secondary);
 			if (!ret && secondary) {
 				char *name;
@@ -191,8 +192,9 @@
 	return 0;
 }
 
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
 {
+	struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
 	if (st->mux_client)
 		i2c_unregister_device(st->mux_client);
 }
@@ -200,12 +202,12 @@
 
 #include "inv_mpu_iio.h"
 
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
 {
 	return 0;
 }
 
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
 {
 }
 #endif
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f0e0609..2258600 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -75,100 +75,17 @@
 	},
 };
 
-int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
-{
-	return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
-}
-
-/*
- * The i2c read/write needs to happen in unlocked mode. As the parent
- * adapter is common. If we use locked versions, it will fail as
- * the mux adapter will lock the parent i2c adapter, while calling
- * select/deselect functions.
- */
-static int inv_mpu6050_write_reg_unlocked(struct inv_mpu6050_state *st,
-					  u8 reg, u8 d)
-{
-	int ret;
-	u8 buf[2];
-	struct i2c_msg msg[1] = {
-		{
-			.addr = st->client->addr,
-			.flags = 0,
-			.len = sizeof(buf),
-			.buf = buf,
-		}
-	};
-
-	buf[0] = reg;
-	buf[1] = d;
-	ret = __i2c_transfer(st->client->adapter, msg, 1);
-	if (ret != 1)
-		return ret;
-
-	return 0;
-}
-
-static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
-				     u32 chan_id)
-{
-	struct iio_dev *indio_dev = mux_priv;
-	struct inv_mpu6050_state *st = iio_priv(indio_dev);
-	int ret = 0;
-
-	/* Use the same mutex which was used everywhere to protect power-op */
-	mutex_lock(&indio_dev->mlock);
-	if (!st->powerup_count) {
-		ret = inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
-						     0);
-		if (ret)
-			goto write_error;
-
-		msleep(INV_MPU6050_REG_UP_TIME);
-	}
-	if (!ret) {
-		st->powerup_count++;
-		ret = inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
-						     st->client->irq |
-						     INV_MPU6050_BIT_BYPASS_EN);
-	}
-write_error:
-	mutex_unlock(&indio_dev->mlock);
-
-	return ret;
-}
-
-static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
-				       void *mux_priv, u32 chan_id)
-{
-	struct iio_dev *indio_dev = mux_priv;
-	struct inv_mpu6050_state *st = iio_priv(indio_dev);
-
-	mutex_lock(&indio_dev->mlock);
-	/* It doesn't really mattter, if any of the calls fails */
-	inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
-				       st->client->irq);
-	st->powerup_count--;
-	if (!st->powerup_count)
-		inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
-					       INV_MPU6050_BIT_SLEEP);
-	mutex_unlock(&indio_dev->mlock);
-
-	return 0;
-}
-
 int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
 {
-	u8 d, mgmt_1;
+	unsigned int d, mgmt_1;
 	int result;
 
 	/* switch clock needs to be careful. Only when gyro is on, can
 	   clock source be switched to gyro. Otherwise, it must be set to
 	   internal clock */
 	if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
-		result = i2c_smbus_read_i2c_block_data(st->client,
-				       st->reg->pwr_mgmt_1, 1, &mgmt_1);
-		if (result != 1)
+		result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1);
+		if (result)
 			return result;
 
 		mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
@@ -178,20 +95,19 @@
 		/* turning off gyro requires switch to internal clock first.
 		   Then turn off gyro engine */
 		mgmt_1 |= INV_CLK_INTERNAL;
-		result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+		result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1);
 		if (result)
 			return result;
 	}
 
-	result = i2c_smbus_read_i2c_block_data(st->client,
-				       st->reg->pwr_mgmt_2, 1, &d);
-	if (result != 1)
+	result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d);
+	if (result)
 		return result;
 	if (en)
 		d &= ~mask;
 	else
 		d |= mask;
-	result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+	result = regmap_write(st->map, st->reg->pwr_mgmt_2, d);
 	if (result)
 		return result;
 
@@ -201,7 +117,7 @@
 		if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
 			/* switch internal clock to PLL */
 			mgmt_1 |= INV_CLK_PLL;
-			result = inv_mpu6050_write_reg(st,
+			result = regmap_write(st->map,
 					st->reg->pwr_mgmt_1, mgmt_1);
 			if (result)
 				return result;
@@ -218,15 +134,14 @@
 	if (power_on) {
 		/* Already under indio-dev->mlock mutex */
 		if (!st->powerup_count)
-			result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
-						       0);
+			result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
 		if (!result)
 			st->powerup_count++;
 	} else {
 		st->powerup_count--;
 		if (!st->powerup_count)
-			result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
-						       INV_MPU6050_BIT_SLEEP);
+			result = regmap_write(st->map, st->reg->pwr_mgmt_1,
+					      INV_MPU6050_BIT_SLEEP);
 	}
 
 	if (result)
@@ -237,6 +152,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
 
 /**
  *  inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
@@ -257,22 +173,22 @@
 	if (result)
 		return result;
 	d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
-	result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+	result = regmap_write(st->map, st->reg->gyro_config, d);
 	if (result)
 		return result;
 
 	d = INV_MPU6050_FILTER_20HZ;
-	result = inv_mpu6050_write_reg(st, st->reg->lpf, d);
+	result = regmap_write(st->map, st->reg->lpf, d);
 	if (result)
 		return result;
 
 	d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
-	result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+	result = regmap_write(st->map, st->reg->sample_rate_div, d);
 	if (result)
 		return result;
 
 	d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
-	result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+	result = regmap_write(st->map, st->reg->accl_config, d);
 	if (result)
 		return result;
 
@@ -290,9 +206,8 @@
 	__be16 d;
 
 	ind = (axis - IIO_MOD_X) * 2;
-	result = i2c_smbus_read_i2c_block_data(st->client, reg + ind,  2,
-						(u8 *)&d);
-	if (result != 2)
+	result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2);
+	if (result)
 		return -EINVAL;
 	*val = (short)be16_to_cpup(&d);
 
@@ -418,8 +333,7 @@
 	for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
 		if (gyro_scale_6050[i] == val) {
 			d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
-			result = inv_mpu6050_write_reg(st,
-					st->reg->gyro_config, d);
+			result = regmap_write(st->map, st->reg->gyro_config, d);
 			if (result)
 				return result;
 
@@ -456,8 +370,7 @@
 	for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
 		if (accel_scale[i] == val) {
 			d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
-			result = inv_mpu6050_write_reg(st,
-					st->reg->accl_config, d);
+			result = regmap_write(st->map, st->reg->accl_config, d);
 			if (result)
 				return result;
 
@@ -537,7 +450,7 @@
 	while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
 		i++;
 	data = d[i];
-	result = inv_mpu6050_write_reg(st, st->reg->lpf, data);
+	result = regmap_write(st->map, st->reg->lpf, data);
 	if (result)
 		return result;
 	st->chip_config.lpf = data;
@@ -575,7 +488,7 @@
 		goto fifo_rate_fail;
 
 	d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
-	result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+	result = regmap_write(st->map, st->reg->sample_rate_div, d);
 	if (result)
 		goto fifo_rate_fail;
 	st->chip_config.fifo_rate = fifo_rate;
@@ -727,8 +640,7 @@
 /**
  *  inv_check_and_setup_chip() - check and setup chip.
  */
-static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
-		const struct i2c_device_id *id)
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
 {
 	int result;
 
@@ -737,8 +649,8 @@
 	st->reg = hw_info[st->chip_type].reg;
 
 	/* reset to make sure previous state are not there */
-	result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
-					INV_MPU6050_BIT_H_RESET);
+	result = regmap_write(st->map, st->reg->pwr_mgmt_1,
+			      INV_MPU6050_BIT_H_RESET);
 	if (result)
 		return result;
 	msleep(INV_MPU6050_POWER_UP_TIME);
@@ -765,54 +677,47 @@
 	return 0;
 }
 
-/**
- *  inv_mpu_probe() - probe function.
- *  @client:          i2c client.
- *  @id:              i2c device id.
- *
- *  Returns 0 on success, a negative error code otherwise.
- */
-static int inv_mpu_probe(struct i2c_client *client,
-	const struct i2c_device_id *id)
+int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+		       int (*inv_mpu_bus_setup)(struct iio_dev *))
 {
 	struct inv_mpu6050_state *st;
 	struct iio_dev *indio_dev;
 	struct inv_mpu6050_platform_data *pdata;
+	struct device *dev = regmap_get_device(regmap);
 	int result;
 
-	if (!i2c_check_functionality(client->adapter,
-		I2C_FUNC_SMBUS_I2C_BLOCK))
-		return -ENOSYS;
-
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
 	if (!indio_dev)
 		return -ENOMEM;
 
 	st = iio_priv(indio_dev);
-	st->client = client;
 	st->powerup_count = 0;
-	pdata = dev_get_platdata(&client->dev);
+	st->irq = irq;
+	st->map = regmap;
+	pdata = dev_get_platdata(dev);
 	if (pdata)
 		st->plat_data = *pdata;
 	/* power is turned on inside check chip type*/
-	result = inv_check_and_setup_chip(st, id);
+	result = inv_check_and_setup_chip(st);
 	if (result)
 		return result;
 
+	if (inv_mpu_bus_setup)
+		inv_mpu_bus_setup(indio_dev);
+
 	result = inv_mpu6050_init_config(indio_dev);
 	if (result) {
-		dev_err(&client->dev,
-			"Could not initialize device.\n");
+		dev_err(dev, "Could not initialize device.\n");
 		return result;
 	}
 
-	i2c_set_clientdata(client, indio_dev);
-	indio_dev->dev.parent = &client->dev;
-	/* id will be NULL when enumerated via ACPI */
-	if (id)
-		indio_dev->name = (char *)id->name;
+	dev_set_drvdata(dev, indio_dev);
+	indio_dev->dev.parent = dev;
+	/* name will be NULL when enumerated via ACPI */
+	if (name)
+		indio_dev->name = name;
 	else
-		indio_dev->name = (char *)dev_name(&client->dev);
+		indio_dev->name = dev_name(dev);
 	indio_dev->channels = inv_mpu_channels;
 	indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
 
@@ -824,13 +729,12 @@
 					    inv_mpu6050_read_fifo,
 					    NULL);
 	if (result) {
-		dev_err(&st->client->dev, "configure buffer fail %d\n",
-				result);
+		dev_err(dev, "configure buffer fail %d\n", result);
 		return result;
 	}
 	result = inv_mpu6050_probe_trigger(indio_dev);
 	if (result) {
-		dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+		dev_err(dev, "trigger probe fail %d\n", result);
 		goto out_unreg_ring;
 	}
 
@@ -838,102 +742,47 @@
 	spin_lock_init(&st->time_stamp_lock);
 	result = iio_device_register(indio_dev);
 	if (result) {
-		dev_err(&st->client->dev, "IIO register fail %d\n", result);
+		dev_err(dev, "IIO register fail %d\n", result);
 		goto out_remove_trigger;
 	}
 
-	st->mux_adapter = i2c_add_mux_adapter(client->adapter,
-					      &client->dev,
-					      indio_dev,
-					      0, 0, 0,
-					      inv_mpu6050_select_bypass,
-					      inv_mpu6050_deselect_bypass);
-	if (!st->mux_adapter) {
-		result = -ENODEV;
-		goto out_unreg_device;
-	}
-
-	result = inv_mpu_acpi_create_mux_client(st);
-	if (result)
-		goto out_del_mux;
-
 	return 0;
 
-out_del_mux:
-	i2c_del_mux_adapter(st->mux_adapter);
-out_unreg_device:
-	iio_device_unregister(indio_dev);
 out_remove_trigger:
 	inv_mpu6050_remove_trigger(st);
 out_unreg_ring:
 	iio_triggered_buffer_cleanup(indio_dev);
 	return result;
 }
+EXPORT_SYMBOL_GPL(inv_mpu_core_probe);
 
-static int inv_mpu_remove(struct i2c_client *client)
+int inv_mpu_core_remove(struct device  *dev)
 {
-	struct iio_dev *indio_dev = i2c_get_clientdata(client);
-	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 
-	inv_mpu_acpi_delete_mux_client(st);
-	i2c_del_mux_adapter(st->mux_adapter);
 	iio_device_unregister(indio_dev);
-	inv_mpu6050_remove_trigger(st);
+	inv_mpu6050_remove_trigger(iio_priv(indio_dev));
 	iio_triggered_buffer_cleanup(indio_dev);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(inv_mpu_core_remove);
+
 #ifdef CONFIG_PM_SLEEP
 
 static int inv_mpu_resume(struct device *dev)
 {
-	return inv_mpu6050_set_power_itg(
-		iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+	return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), true);
 }
 
 static int inv_mpu_suspend(struct device *dev)
 {
-	return inv_mpu6050_set_power_itg(
-		iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+	return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), false);
 }
-static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
-
-#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
-#else
-#define INV_MPU6050_PMOPS NULL
 #endif /* CONFIG_PM_SLEEP */
 
-/*
- * device id table is used to identify what device can be
- * supported by this driver
- */
-static const struct i2c_device_id inv_mpu_id[] = {
-	{"mpu6050", INV_MPU6050},
-	{"mpu6500", INV_MPU6500},
-	{}
-};
-
-MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
-
-static const struct acpi_device_id inv_acpi_match[] = {
-	{"INVN6500", 0},
-	{ },
-};
-
-MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
-
-static struct i2c_driver inv_mpu_driver = {
-	.probe		=	inv_mpu_probe,
-	.remove		=	inv_mpu_remove,
-	.id_table	=	inv_mpu_id,
-	.driver = {
-		.name	=	"inv-mpu6050",
-		.pm     =       INV_MPU6050_PMOPS,
-		.acpi_match_table = ACPI_PTR(inv_acpi_match),
-	},
-};
-
-module_i2c_driver(inv_mpu_driver);
+SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+EXPORT_SYMBOL_GPL(inv_mpu_pmops);
 
 MODULE_AUTHOR("Invensense Corporation");
 MODULE_DESCRIPTION("Invensense device MPU6050 driver");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
new file mode 100644
index 0000000..af400dd
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -0,0 +1,206 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include "inv_mpu_iio.h"
+
+static const struct regmap_config inv_mpu_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+/*
+ * The i2c read/write needs to happen in unlocked mode. As the parent
+ * adapter is common. If we use locked versions, it will fail as
+ * the mux adapter will lock the parent i2c adapter, while calling
+ * select/deselect functions.
+ */
+static int inv_mpu6050_write_reg_unlocked(struct i2c_client *client,
+					  u8 reg, u8 d)
+{
+	int ret;
+	u8 buf[2] = {reg, d};
+	struct i2c_msg msg[1] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = sizeof(buf),
+			.buf = buf,
+		}
+	};
+
+	ret = __i2c_transfer(client->adapter, msg, 1);
+	if (ret != 1)
+		return ret;
+
+	return 0;
+}
+
+static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
+				     u32 chan_id)
+{
+	struct i2c_client *client = mux_priv;
+	struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+	int ret = 0;
+
+	/* Use the same mutex which was used everywhere to protect power-op */
+	mutex_lock(&indio_dev->mlock);
+	if (!st->powerup_count) {
+		ret = inv_mpu6050_write_reg_unlocked(client,
+						     st->reg->pwr_mgmt_1, 0);
+		if (ret)
+			goto write_error;
+
+		msleep(INV_MPU6050_REG_UP_TIME);
+	}
+	if (!ret) {
+		st->powerup_count++;
+		ret = inv_mpu6050_write_reg_unlocked(client,
+						     st->reg->int_pin_cfg,
+						     INV_MPU6050_INT_PIN_CFG |
+						     INV_MPU6050_BIT_BYPASS_EN);
+	}
+write_error:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
+				       void *mux_priv, u32 chan_id)
+{
+	struct i2c_client *client = mux_priv;
+	struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+	mutex_lock(&indio_dev->mlock);
+	/* It doesn't really mattter, if any of the calls fails */
+	inv_mpu6050_write_reg_unlocked(client, st->reg->int_pin_cfg,
+				       INV_MPU6050_INT_PIN_CFG);
+	st->powerup_count--;
+	if (!st->powerup_count)
+		inv_mpu6050_write_reg_unlocked(client, st->reg->pwr_mgmt_1,
+					       INV_MPU6050_BIT_SLEEP);
+	mutex_unlock(&indio_dev->mlock);
+
+	return 0;
+}
+
+/**
+ *  inv_mpu_probe() - probe function.
+ *  @client:          i2c client.
+ *  @id:              i2c device id.
+ *
+ *  Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	struct inv_mpu6050_state *st;
+	int result;
+	const char *name = id ? id->name : NULL;
+	struct regmap *regmap;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_I2C_BLOCK))
+		return -ENOSYS;
+
+	regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	result = inv_mpu_core_probe(regmap, client->irq, name, NULL);
+	if (result < 0)
+		return result;
+
+	st = iio_priv(dev_get_drvdata(&client->dev));
+	st->mux_adapter = i2c_add_mux_adapter(client->adapter,
+					      &client->dev,
+					      client,
+					      0, 0, 0,
+					      inv_mpu6050_select_bypass,
+					      inv_mpu6050_deselect_bypass);
+	if (!st->mux_adapter) {
+		result = -ENODEV;
+		goto out_unreg_device;
+	}
+
+	result = inv_mpu_acpi_create_mux_client(client);
+	if (result)
+		goto out_del_mux;
+
+	return 0;
+
+out_del_mux:
+	i2c_del_mux_adapter(st->mux_adapter);
+out_unreg_device:
+	inv_mpu_core_remove(&client->dev);
+	return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+	inv_mpu_acpi_delete_mux_client(client);
+	i2c_del_mux_adapter(st->mux_adapter);
+
+	return inv_mpu_core_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+	{"mpu6050", INV_MPU6050},
+	{"mpu6500", INV_MPU6500},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static const struct acpi_device_id inv_acpi_match[] = {
+	{"INVN6500", 0},
+	{ },
+};
+
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
+static struct i2c_driver inv_mpu_driver = {
+	.probe		=	inv_mpu_probe,
+	.remove		=	inv_mpu_remove,
+	.id_table	=	inv_mpu_id,
+	.driver = {
+		.acpi_match_table = ACPI_PTR(inv_acpi_match),
+		.name	=	"inv-mpu6050-i2c",
+		.pm     =       &inv_mpu_pmops,
+	},
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index db0a4a2..fcc2f3d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -15,6 +15,7 @@
 #include <linux/spinlock.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
+#include <linux/regmap.h>
 #include <linux/iio/sysfs.h>
 #include <linux/iio/kfifo_buf.h>
 #include <linux/iio/trigger.h>
@@ -61,6 +62,7 @@
 enum inv_devices {
 	INV_MPU6050,
 	INV_MPU6500,
+	INV_MPU6000,
 	INV_NUM_PARTS
 };
 
@@ -107,9 +109,10 @@
  *  @hw:		Other hardware-specific information.
  *  @chip_type:		chip type.
  *  @time_stamp_lock:	spin lock to time stamp.
- *  @client:		i2c client handle.
  *  @plat_data:		platform data.
  *  @timestamps:        kfifo queue to store time stamp.
+ *  @map		regmap pointer.
+ *  @irq		interrupt number.
  */
 struct inv_mpu6050_state {
 #define TIMESTAMP_FIFO_SIZE 16
@@ -119,12 +122,13 @@
 	const struct inv_mpu6050_hw *hw;
 	enum   inv_devices chip_type;
 	spinlock_t time_stamp_lock;
-	struct i2c_client *client;
 	struct i2c_adapter *mux_adapter;
 	struct i2c_client *mux_client;
 	unsigned int powerup_count;
 	struct inv_mpu6050_platform_data plat_data;
 	DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+	struct regmap *map;
+	int irq;
 };
 
 /*register and associated bit definition*/
@@ -151,6 +155,7 @@
 #define INV_MPU6050_BIT_I2C_MST_EN          0x20
 #define INV_MPU6050_BIT_FIFO_EN             0x40
 #define INV_MPU6050_BIT_DMP_EN              0x80
+#define INV_MPU6050_BIT_I2C_IF_DIS          0x10
 
 #define INV_MPU6050_REG_PWR_MGMT_1          0x6B
 #define INV_MPU6050_BIT_H_RESET             0x80
@@ -185,6 +190,7 @@
 
 #define INV_MPU6050_REG_INT_PIN_CFG	0x37
 #define INV_MPU6050_BIT_BYPASS_EN	0x2
+#define INV_MPU6050_INT_PIN_CFG		0
 
 /* init parameters */
 #define INV_MPU6050_INIT_FIFO_RATE           50
@@ -252,5 +258,10 @@
 int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
 int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
 int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st);
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st);
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client);
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
+int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+		      int (*inv_mpu_bus_setup)(struct iio_dev *));
+int inv_mpu_core_remove(struct device *dev);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
+extern const struct dev_pm_ops inv_mpu_pmops;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index ba27e27..1fc5fd9 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/i2c.h>
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/sysfs.h>
@@ -41,22 +40,23 @@
 	struct inv_mpu6050_state  *st = iio_priv(indio_dev);
 
 	/* disable interrupt */
-	result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+	result = regmap_write(st->map, st->reg->int_enable, 0);
 	if (result) {
-		dev_err(&st->client->dev, "int_enable failed %d\n", result);
+		dev_err(regmap_get_device(st->map), "int_enable failed %d\n",
+			result);
 		return result;
 	}
 	/* disable the sensor output to FIFO */
-	result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+	result = regmap_write(st->map, st->reg->fifo_en, 0);
 	if (result)
 		goto reset_fifo_fail;
 	/* disable fifo reading */
-	result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+	result = regmap_write(st->map, st->reg->user_ctrl, 0);
 	if (result)
 		goto reset_fifo_fail;
 
 	/* reset FIFO*/
-	result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+	result = regmap_write(st->map, st->reg->user_ctrl,
 					INV_MPU6050_BIT_FIFO_RST);
 	if (result)
 		goto reset_fifo_fail;
@@ -67,13 +67,13 @@
 	/* enable interrupt */
 	if (st->chip_config.accl_fifo_enable ||
 	    st->chip_config.gyro_fifo_enable) {
-		result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+		result = regmap_write(st->map, st->reg->int_enable,
 					INV_MPU6050_BIT_DATA_RDY_EN);
 		if (result)
 			return result;
 	}
 	/* enable FIFO reading and I2C master interface*/
-	result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+	result = regmap_write(st->map, st->reg->user_ctrl,
 					INV_MPU6050_BIT_FIFO_EN);
 	if (result)
 		goto reset_fifo_fail;
@@ -83,15 +83,15 @@
 		d |= INV_MPU6050_BITS_GYRO_OUT;
 	if (st->chip_config.accl_fifo_enable)
 		d |= INV_MPU6050_BIT_ACCEL_OUT;
-	result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+	result = regmap_write(st->map, st->reg->fifo_en, d);
 	if (result)
 		goto reset_fifo_fail;
 
 	return 0;
 
 reset_fifo_fail:
-	dev_err(&st->client->dev, "reset fifo failed %d\n", result);
-	result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+	dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result);
+	result = regmap_write(st->map, st->reg->int_enable,
 					INV_MPU6050_BIT_DATA_RDY_EN);
 
 	return result;
@@ -143,10 +143,10 @@
 	 * read fifo_count register to know how many bytes inside FIFO
 	 * right now
 	 */
-	result = i2c_smbus_read_i2c_block_data(st->client,
+	result = regmap_bulk_read(st->map,
 				       st->reg->fifo_count_h,
-				       INV_MPU6050_FIFO_COUNT_BYTE, data);
-	if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+				       data, INV_MPU6050_FIFO_COUNT_BYTE);
+	if (result)
 		goto end_session;
 	fifo_count = be16_to_cpup((__be16 *)(&data[0]));
 	if (fifo_count < bytes_per_datum)
@@ -161,10 +161,9 @@
 		fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
 			goto flush_fifo;
 	while (fifo_count >= bytes_per_datum) {
-		result = i2c_smbus_read_i2c_block_data(st->client,
-						       st->reg->fifo_r_w,
-						       bytes_per_datum, data);
-		if (result != bytes_per_datum)
+		result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
+					  data, bytes_per_datum);
+		if (result)
 			goto flush_fifo;
 
 		result = kfifo_out(&st->timestamps, &timestamp, 1);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
new file mode 100644
index 0000000..5b552a6
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -0,0 +1,97 @@
+/*
+* Copyright (C) 2015 Intel Corporation Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include "inv_mpu_iio.h"
+
+static const struct regmap_config inv_mpu_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
+{
+	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+	int ret = 0;
+
+	ret = inv_mpu6050_set_power_itg(st, true);
+	if (ret)
+		return ret;
+
+	ret = regmap_write(st->map, INV_MPU6050_REG_USER_CTRL,
+			   INV_MPU6050_BIT_I2C_IF_DIS);
+	if (ret) {
+		inv_mpu6050_set_power_itg(st, false);
+		return ret;
+	}
+
+	return inv_mpu6050_set_power_itg(st, false);
+}
+
+static int inv_mpu_probe(struct spi_device *spi)
+{
+	struct regmap *regmap;
+	const struct spi_device_id *id = spi_get_device_id(spi);
+	const char *name = id ? id->name : NULL;
+
+	regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	return inv_mpu_core_probe(regmap, spi->irq, name, inv_mpu_i2c_disable);
+}
+
+static int inv_mpu_remove(struct spi_device *spi)
+{
+	return inv_mpu_core_remove(&spi->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct spi_device_id inv_mpu_id[] = {
+	{"mpu6000", INV_MPU6000},
+	{}
+};
+
+MODULE_DEVICE_TABLE(spi, inv_mpu_id);
+
+static const struct acpi_device_id inv_acpi_match[] = {
+	{"INVN6000", 0},
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
+static struct spi_driver inv_mpu_driver = {
+	.probe		=	inv_mpu_probe,
+	.remove		=	inv_mpu_remove,
+	.id_table	=	inv_mpu_id,
+	.driver = {
+		.acpi_match_table = ACPI_PTR(inv_acpi_match),
+		.name	=	"inv-mpu6000-spi",
+		.pm     =       &inv_mpu_pmops,
+	},
+};
+
+module_spi_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Adriana Reus <adriana.reus@intel.com>");
+MODULE_DESCRIPTION("Invensense device MPU6000 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 844610c..72d6aae 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -65,15 +65,15 @@
 		if (result)
 			return result;
 	} else {
-		result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+		result = regmap_write(st->map, st->reg->fifo_en, 0);
 		if (result)
 			return result;
 
-		result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+		result = regmap_write(st->map, st->reg->int_enable, 0);
 		if (result)
 			return result;
 
-		result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+		result = regmap_write(st->map, st->reg->user_ctrl, 0);
 		if (result)
 			return result;
 
@@ -123,7 +123,7 @@
 	if (!st->trig)
 		return -ENOMEM;
 
-	ret = devm_request_irq(&indio_dev->dev, st->client->irq,
+	ret = devm_request_irq(&indio_dev->dev, st->irq,
 			       &iio_trigger_generic_data_rdy_poll,
 			       IRQF_TRIGGER_RISING,
 			       "inv_mpu",
@@ -131,7 +131,7 @@
 	if (ret)
 		return ret;
 
-	st->trig->dev.parent = &st->client->dev;
+	st->trig->dev.parent = regmap_get_device(st->map);
 	st->trig->ops = &inv_mpu_trigger_ops;
 	iio_trigger_set_drvdata(st->trig, indio_dev);
 
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 139ae91..b976332 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -512,33 +512,41 @@
 	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
 }
 
+static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
+					     unsigned int scan_index)
+{
+	const struct iio_chan_spec *ch;
+	unsigned int bytes;
+
+	ch = iio_find_channel_from_si(indio_dev, scan_index);
+	bytes = ch->scan_type.storagebits / 8;
+	if (ch->scan_type.repeat > 1)
+		bytes *= ch->scan_type.repeat;
+	return bytes;
+}
+
+static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
+{
+	return iio_storage_bytes_for_si(indio_dev,
+					indio_dev->scan_index_timestamp);
+}
+
 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
 				const unsigned long *mask, bool timestamp)
 {
-	const struct iio_chan_spec *ch;
 	unsigned bytes = 0;
 	int length, i;
 
 	/* How much space will the demuxed element take? */
 	for_each_set_bit(i, mask,
 			 indio_dev->masklength) {
-		ch = iio_find_channel_from_si(indio_dev, i);
-		if (ch->scan_type.repeat > 1)
-			length = ch->scan_type.storagebits / 8 *
-				ch->scan_type.repeat;
-		else
-			length = ch->scan_type.storagebits / 8;
+		length = iio_storage_bytes_for_si(indio_dev, i);
 		bytes = ALIGN(bytes, length);
 		bytes += length;
 	}
+
 	if (timestamp) {
-		ch = iio_find_channel_from_si(indio_dev,
-					      indio_dev->scan_index_timestamp);
-		if (ch->scan_type.repeat > 1)
-			length = ch->scan_type.storagebits / 8 *
-				ch->scan_type.repeat;
-		else
-			length = ch->scan_type.storagebits / 8;
+		length = iio_storage_bytes_for_timestamp(indio_dev);
 		bytes = ALIGN(bytes, length);
 		bytes += length;
 	}
@@ -1288,7 +1296,6 @@
 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 				   struct iio_buffer *buffer)
 {
-	const struct iio_chan_spec *ch;
 	int ret, in_ind = -1, out_ind, length;
 	unsigned in_loc = 0, out_loc = 0;
 	struct iio_demux_table *p = NULL;
@@ -1315,21 +1322,11 @@
 			in_ind = find_next_bit(indio_dev->active_scan_mask,
 					       indio_dev->masklength,
 					       in_ind + 1);
-			ch = iio_find_channel_from_si(indio_dev, in_ind);
-			if (ch->scan_type.repeat > 1)
-				length = ch->scan_type.storagebits / 8 *
-					ch->scan_type.repeat;
-			else
-				length = ch->scan_type.storagebits / 8;
+			length = iio_storage_bytes_for_si(indio_dev, in_ind);
 			/* Make sure we are aligned */
 			in_loc = roundup(in_loc, length) + length;
 		}
-		ch = iio_find_channel_from_si(indio_dev, in_ind);
-		if (ch->scan_type.repeat > 1)
-			length = ch->scan_type.storagebits / 8 *
-				ch->scan_type.repeat;
-		else
-			length = ch->scan_type.storagebits / 8;
+		length = iio_storage_bytes_for_si(indio_dev, in_ind);
 		out_loc = roundup(out_loc, length);
 		in_loc = roundup(in_loc, length);
 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
@@ -1340,13 +1337,7 @@
 	}
 	/* Relies on scan_timestamp being last */
 	if (buffer->scan_timestamp) {
-		ch = iio_find_channel_from_si(indio_dev,
-			indio_dev->scan_index_timestamp);
-		if (ch->scan_type.repeat > 1)
-			length = ch->scan_type.storagebits / 8 *
-				ch->scan_type.repeat;
-		else
-			length = ch->scan_type.storagebits / 8;
+		length = iio_storage_bytes_for_timestamp(indio_dev);
 		out_loc = roundup(out_loc, length);
 		in_loc = roundup(in_loc, length);
 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index af7cc1e..70cb7eb 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -77,6 +77,7 @@
 	[IIO_VELOCITY] = "velocity",
 	[IIO_CONCENTRATION] = "concentration",
 	[IIO_RESISTANCE] = "resistance",
+	[IIO_PH] = "ph",
 };
 
 static const char * const iio_modifier_names[] = {
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 01e111e7..b776c8e 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -65,19 +65,25 @@
 #define OPT3001_REG_EXPONENT(n)		((n) >> 12)
 #define OPT3001_REG_MANTISSA(n)		((n) & 0xfff)
 
+#define OPT3001_INT_TIME_LONG		800000
+#define OPT3001_INT_TIME_SHORT		100000
+
 /*
  * Time to wait for conversion result to be ready. The device datasheet
- * worst-case max value is 880ms. Add some slack to be on the safe side.
+ * sect. 6.5 states results are ready after total integration time plus 3ms.
+ * This results in worst-case max values of 113ms or 883ms, respectively.
+ * Add some slack to be on the safe side.
  */
-#define OPT3001_RESULT_READY_TIMEOUT	msecs_to_jiffies(1000)
+#define OPT3001_RESULT_READY_SHORT	150
+#define OPT3001_RESULT_READY_LONG	1000
 
 struct opt3001 {
 	struct i2c_client	*client;
 	struct device		*dev;
 
 	struct mutex		lock;
-	u16			ok_to_ignore_lock:1;
-	u16			result_ready:1;
+	bool			ok_to_ignore_lock;
+	bool			result_ready;
 	wait_queue_head_t	result_ready_queue;
 	u16			result;
 
@@ -89,6 +95,8 @@
 
 	u8			high_thresh_exp;
 	u8			low_thresh_exp;
+
+	bool			use_irq;
 };
 
 struct opt3001_scale {
@@ -227,26 +235,30 @@
 	u16 reg;
 	u8 exponent;
 	u16 value;
+	long timeout;
 
-	/*
-	 * Enable the end-of-conversion interrupt mechanism. Note that doing
-	 * so will overwrite the low-level limit value however we will restore
-	 * this value later on.
-	 */
-	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
-			OPT3001_LOW_LIMIT_EOC_ENABLE);
-	if (ret < 0) {
-		dev_err(opt->dev, "failed to write register %02x\n",
-				OPT3001_LOW_LIMIT);
-		return ret;
+	if (opt->use_irq) {
+		/*
+		 * Enable the end-of-conversion interrupt mechanism. Note that
+		 * doing so will overwrite the low-level limit value however we
+		 * will restore this value later on.
+		 */
+		ret = i2c_smbus_write_word_swapped(opt->client,
+					OPT3001_LOW_LIMIT,
+					OPT3001_LOW_LIMIT_EOC_ENABLE);
+		if (ret < 0) {
+			dev_err(opt->dev, "failed to write register %02x\n",
+					OPT3001_LOW_LIMIT);
+			return ret;
+		}
+
+		/* Allow IRQ to access the device despite lock being set */
+		opt->ok_to_ignore_lock = true;
 	}
 
-	/* Reset data-ready indicator flag (will be set in the IRQ routine) */
+	/* Reset data-ready indicator flag */
 	opt->result_ready = false;
 
-	/* Allow IRQ to access the device despite lock being set */
-	opt->ok_to_ignore_lock = true;
-
 	/* Configure for single-conversion mode and start a new conversion */
 	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
 	if (ret < 0) {
@@ -266,32 +278,69 @@
 		goto err;
 	}
 
-	/* Wait for the IRQ to indicate the conversion is complete */
-	ret = wait_event_timeout(opt->result_ready_queue, opt->result_ready,
-			OPT3001_RESULT_READY_TIMEOUT);
+	if (opt->use_irq) {
+		/* Wait for the IRQ to indicate the conversion is complete */
+		ret = wait_event_timeout(opt->result_ready_queue,
+				opt->result_ready,
+				msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
+	} else {
+		/* Sleep for result ready time */
+		timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
+			OPT3001_RESULT_READY_SHORT : OPT3001_RESULT_READY_LONG;
+		msleep(timeout);
+
+		/* Check result ready flag */
+		ret = i2c_smbus_read_word_swapped(opt->client,
+						  OPT3001_CONFIGURATION);
+		if (ret < 0) {
+			dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+			goto err;
+		}
+
+		if (!(ret & OPT3001_CONFIGURATION_CRF)) {
+			ret = -ETIMEDOUT;
+			goto err;
+		}
+
+		/* Obtain value */
+		ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
+		if (ret < 0) {
+			dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_RESULT);
+			goto err;
+		}
+		opt->result = ret;
+		opt->result_ready = true;
+	}
 
 err:
-	/* Disallow IRQ to access the device while lock is active */
-	opt->ok_to_ignore_lock = false;
+	if (opt->use_irq)
+		/* Disallow IRQ to access the device while lock is active */
+		opt->ok_to_ignore_lock = false;
 
 	if (ret == 0)
 		return -ETIMEDOUT;
 	else if (ret < 0)
 		return ret;
 
-	/*
-	 * Disable the end-of-conversion interrupt mechanism by restoring the
-	 * low-level limit value (clearing OPT3001_LOW_LIMIT_EOC_ENABLE). Note
-	 * that selectively clearing those enable bits would affect the actual
-	 * limit value due to bit-overlap and therefore can't be done.
-	 */
-	value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
-	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
-			value);
-	if (ret < 0) {
-		dev_err(opt->dev, "failed to write register %02x\n",
-				OPT3001_LOW_LIMIT);
-		return ret;
+	if (opt->use_irq) {
+		/*
+		 * Disable the end-of-conversion interrupt mechanism by
+		 * restoring the low-level limit value (clearing
+		 * OPT3001_LOW_LIMIT_EOC_ENABLE). Note that selectively clearing
+		 * those enable bits would affect the actual limit value due to
+		 * bit-overlap and therefore can't be done.
+		 */
+		value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
+		ret = i2c_smbus_write_word_swapped(opt->client,
+						   OPT3001_LOW_LIMIT,
+						   value);
+		if (ret < 0) {
+			dev_err(opt->dev, "failed to write register %02x\n",
+					OPT3001_LOW_LIMIT);
+			return ret;
+		}
 	}
 
 	exponent = OPT3001_REG_EXPONENT(opt->result);
@@ -325,13 +374,13 @@
 	reg = ret;
 
 	switch (time) {
-	case 100000:
+	case OPT3001_INT_TIME_SHORT:
 		reg &= ~OPT3001_CONFIGURATION_CT;
-		opt->int_time = 100000;
+		opt->int_time = OPT3001_INT_TIME_SHORT;
 		break;
-	case 800000:
+	case OPT3001_INT_TIME_LONG:
 		reg |= OPT3001_CONFIGURATION_CT;
-		opt->int_time = 800000;
+		opt->int_time = OPT3001_INT_TIME_LONG;
 		break;
 	default:
 		return -EINVAL;
@@ -597,9 +646,9 @@
 
 	/* Reflect status of the device's integration time setting */
 	if (reg & OPT3001_CONFIGURATION_CT)
-		opt->int_time = 800000;
+		opt->int_time = OPT3001_INT_TIME_LONG;
 	else
-		opt->int_time = 100000;
+		opt->int_time = OPT3001_INT_TIME_SHORT;
 
 	/* Ensure device is in shutdown initially */
 	opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
@@ -733,12 +782,18 @@
 		return ret;
 	}
 
-	ret = request_threaded_irq(irq, NULL, opt3001_irq,
-			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-			"opt3001", iio);
-	if (ret) {
-		dev_err(dev, "failed to request IRQ #%d\n", irq);
-		return ret;
+	/* Make use of INT pin only if valid IRQ no. is given */
+	if (irq > 0) {
+		ret = request_threaded_irq(irq, NULL, opt3001_irq,
+				IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				"opt3001", iio);
+		if (ret) {
+			dev_err(dev, "failed to request IRQ #%d\n", irq);
+			return ret;
+		}
+		opt->use_irq = true;
+	} else {
+		dev_dbg(opt->dev, "enabling interrupt-less operation\n");
 	}
 
 	return 0;
@@ -751,7 +806,8 @@
 	int ret;
 	u16 reg;
 
-	free_irq(client->irq, iio);
+	if (opt->use_irq)
+		free_irq(client->irq, iio);
 
 	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
 	if (ret < 0) {
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index b13936d..9c5c9ef 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -252,7 +252,7 @@
 	u8 data_regs[3];
 };
 
-static struct ak_def ak_def_array[AK_MAX_TYPE] = {
+static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
 	{
 		.type = AK8975,
 		.raw_to_gauss = ak8975_raw_to_gauss,
@@ -360,7 +360,7 @@
  */
 struct ak8975_data {
 	struct i2c_client	*client;
-	struct ak_def		*def;
+	const struct ak_def	*def;
 	struct attribute_group	attrs;
 	struct mutex		lock;
 	u8			asa[3];
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index b27f014..501f858 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -175,6 +175,8 @@
 #define ST_MAGN_3_BDU_MASK			0x10
 #define ST_MAGN_3_DRDY_IRQ_ADDR			0x62
 #define ST_MAGN_3_DRDY_INT_MASK			0x01
+#define ST_MAGN_3_IHL_IRQ_ADDR			0x63
+#define ST_MAGN_3_IHL_IRQ_MASK			0x04
 #define ST_MAGN_3_FS_AVL_15000_GAIN		1500
 #define ST_MAGN_3_MULTIREAD_BIT			false
 #define ST_MAGN_3_OUT_X_L_ADDR			0x68
@@ -480,6 +482,8 @@
 		.drdy_irq = {
 			.addr = ST_MAGN_3_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
+			.addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
+			.mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
 		},
 		.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
 		.bootime = 2,
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 6f2e7c9..f15f66d 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -10,11 +10,11 @@
 	depends on I2C
 	select REGMAP_I2C
 	help
-	 Say yes here to build support for Bosch Sensortec BMP280
-	 pressure and temperature sensor.
+	  Say yes here to build support for Bosch Sensortec BMP280
+	  pressure and temperature sensor.
 
-	 To compile this driver as a module, choose M here: the module
-	 will be called bmp280.
+	  To compile this driver as a module, choose M here: the module
+	  will be called bmp280.
 
 config HID_SENSOR_PRESS
 	depends on HID_SENSOR_HUB
@@ -27,18 +27,33 @@
 	  Say yes here to build support for the HID SENSOR
 	  Pressure driver
 
-          To compile this driver as a module, choose M here: the module
-          will be called hid-sensor-press.
+	  To compile this driver as a module, choose M here: the module
+	  will be called hid-sensor-press.
 
 config MPL115
+	tristate
+
+config MPL115_I2C
 	tristate "Freescale MPL115A2 pressure sensor driver"
 	depends on I2C
+	select MPL115
 	help
 	  Say yes here to build support for the Freescale MPL115A2
 	  pressure sensor connected via I2C.
 
-          To compile this driver as a module, choose M here: the module
-          will be called mpl115.
+	  To compile this driver as a module, choose M here: the module
+	  will be called mpl115_i2c.
+
+config MPL115_SPI
+	tristate "Freescale MPL115A1 pressure sensor driver"
+	depends on SPI_MASTER
+	select MPL115
+	help
+	  Say yes here to build support for the Freescale MPL115A1
+	  pressure sensor connected via SPI.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called mpl115_spi.
 
 config MPL3115
 	tristate "Freescale MPL3115A2 pressure sensor driver"
@@ -49,11 +64,12 @@
 	  Say yes here to build support for the Freescale MPL3115A2
 	  pressure sensor / altimeter.
 
-          To compile this driver as a module, choose M here: the module
-          will be called mpl3115.
+	  To compile this driver as a module, choose M here: the module
+	  will be called mpl3115.
 
 config MS5611
 	tristate "Measurement Specialties MS5611 pressure sensor driver"
+	select IIO_TRIGGERED_BUFFER
 	help
 	  Say Y here to build support for the Measurement Specialties
 	  MS5611, MS5607 pressure and temperature sensors.
@@ -82,7 +98,7 @@
 config MS5637
 	tristate "Measurement Specialties MS5637 pressure & temperature sensor"
 	depends on I2C
-        select IIO_MS_SENSORS_I2C
+	select IIO_MS_SENSORS_I2C
 	help
 	  If you say yes here you get support for the Measurement Specialties
 	  MS5637 pressure and temperature sensor.
@@ -128,7 +144,7 @@
 	  Say yes here to build support for the EPCOS T5403 pressure sensor
 	  connected via I2C.
 
-          To compile this driver as a module, choose M here: the module
-          will be called t5403.
+	  To compile this driver as a module, choose M here: the module
+	  will be called t5403.
 
 endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 46571c96..d336af1 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -6,6 +6,8 @@
 obj-$(CONFIG_BMP280) += bmp280.o
 obj-$(CONFIG_HID_SENSOR_PRESS)   += hid-sensor-press.o
 obj-$(CONFIG_MPL115) += mpl115.o
+obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
+obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
 obj-$(CONFIG_MPL3115) += mpl3115.o
 obj-$(CONFIG_MS5611) += ms5611_core.o
 obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index a0d7dee..73f2f0c 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -1,5 +1,5 @@
 /*
- * mpl115.c - Support for Freescale MPL115A2 pressure/temperature sensor
+ * mpl115.c - Support for Freescale MPL115A pressure/temperature sensor
  *
  * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
  *
@@ -7,17 +7,16 @@
  * the GNU General Public License.  See the file COPYING in the main
  * directory of this archive for more details.
  *
- * (7-bit I2C slave address 0x60)
- *
  * TODO: shutdown pin
  *
  */
 
 #include <linux/module.h>
-#include <linux/i2c.h>
 #include <linux/iio/iio.h>
 #include <linux/delay.h>
 
+#include "mpl115.h"
+
 #define MPL115_PADC 0x00 /* pressure ADC output value, MSB first, 10 bit */
 #define MPL115_TADC 0x02 /* temperature ADC output value, MSB first, 10 bit */
 #define MPL115_A0 0x04 /* 12 bit integer, 3 bit fraction */
@@ -27,16 +26,18 @@
 #define MPL115_CONVERT 0x12 /* convert temperature and pressure */
 
 struct mpl115_data {
-	struct i2c_client *client;
+	struct device *dev;
 	struct mutex lock;
 	s16 a0;
 	s16 b1, b2;
 	s16 c12;
+	const struct mpl115_ops *ops;
 };
 
 static int mpl115_request(struct mpl115_data *data)
 {
-	int ret = i2c_smbus_write_byte_data(data->client, MPL115_CONVERT, 0);
+	int ret = data->ops->write(data->dev, MPL115_CONVERT, 0);
+
 	if (ret < 0)
 		return ret;
 
@@ -57,12 +58,12 @@
 	if (ret < 0)
 		goto done;
 
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_PADC);
+	ret = data->ops->read(data->dev, MPL115_PADC);
 	if (ret < 0)
 		goto done;
 	padc = ret >> 6;
 
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+	ret = data->ops->read(data->dev, MPL115_TADC);
 	if (ret < 0)
 		goto done;
 	tadc = ret >> 6;
@@ -90,7 +91,7 @@
 	ret = mpl115_request(data);
 	if (ret < 0)
 		goto done;
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+	ret = data->ops->read(data->dev, MPL115_TADC);
 done:
 	mutex_unlock(&data->lock);
 	return ret;
@@ -145,66 +146,53 @@
 	.driver_module = THIS_MODULE,
 };
 
-static int mpl115_probe(struct i2c_client *client,
-			 const struct i2c_device_id *id)
+int mpl115_probe(struct device *dev, const char *name,
+			const struct mpl115_ops *ops)
 {
 	struct mpl115_data *data;
 	struct iio_dev *indio_dev;
 	int ret;
 
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
-		return -ENODEV;
-
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
 	if (!indio_dev)
 		return -ENOMEM;
 
 	data = iio_priv(indio_dev);
-	data->client = client;
+	data->dev = dev;
+	data->ops = ops;
 	mutex_init(&data->lock);
 
-	i2c_set_clientdata(client, indio_dev);
 	indio_dev->info = &mpl115_info;
-	indio_dev->name = id->name;
-	indio_dev->dev.parent = &client->dev;
+	indio_dev->name = name;
+	indio_dev->dev.parent = dev;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 	indio_dev->channels = mpl115_channels;
 	indio_dev->num_channels = ARRAY_SIZE(mpl115_channels);
 
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_A0);
+	ret = data->ops->init(data->dev);
+	if (ret)
+		return ret;
+
+	ret = data->ops->read(data->dev, MPL115_A0);
 	if (ret < 0)
 		return ret;
 	data->a0 = ret;
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_B1);
+	ret = data->ops->read(data->dev, MPL115_B1);
 	if (ret < 0)
 		return ret;
 	data->b1 = ret;
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_B2);
+	ret = data->ops->read(data->dev, MPL115_B2);
 	if (ret < 0)
 		return ret;
 	data->b2 = ret;
-	ret = i2c_smbus_read_word_swapped(data->client, MPL115_C12);
+	ret = data->ops->read(data->dev, MPL115_C12);
 	if (ret < 0)
 		return ret;
 	data->c12 = ret;
 
-	return devm_iio_device_register(&client->dev, indio_dev);
+	return devm_iio_device_register(dev, indio_dev);
 }
-
-static const struct i2c_device_id mpl115_id[] = {
-	{ "mpl115", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, mpl115_id);
-
-static struct i2c_driver mpl115_driver = {
-	.driver = {
-		.name	= "mpl115",
-	},
-	.probe = mpl115_probe,
-	.id_table = mpl115_id,
-};
-module_i2c_driver(mpl115_driver);
+EXPORT_SYMBOL_GPL(mpl115_probe);
 
 MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
 MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver");
diff --git a/drivers/iio/pressure/mpl115.h b/drivers/iio/pressure/mpl115.h
new file mode 100644
index 0000000..01b6527
--- /dev/null
+++ b/drivers/iio/pressure/mpl115.h
@@ -0,0 +1,24 @@
+/*
+ * Freescale MPL115A pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#ifndef _MPL115_H_
+#define _MPL115_H_
+
+struct mpl115_ops {
+	int (*init)(struct device *);
+	int (*read)(struct device *, u8);
+	int (*write)(struct device *, u8, u8);
+};
+
+int mpl115_probe(struct device *dev, const char *name,
+			const struct mpl115_ops *ops);
+
+#endif
diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c
new file mode 100644
index 0000000..9ea055c
--- /dev/null
+++ b/drivers/iio/pressure/mpl115_i2c.c
@@ -0,0 +1,67 @@
+/*
+ * Freescale MPL115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A2.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "mpl115.h"
+
+static int mpl115_i2c_init(struct device *dev)
+{
+	return 0;
+}
+
+static int mpl115_i2c_read(struct device *dev, u8 address)
+{
+	return i2c_smbus_read_word_swapped(to_i2c_client(dev), address);
+}
+
+static int mpl115_i2c_write(struct device *dev, u8 address, u8 value)
+{
+	return i2c_smbus_write_byte_data(to_i2c_client(dev), address, value);
+}
+
+static const struct mpl115_ops mpl115_i2c_ops = {
+	.init = mpl115_i2c_init,
+	.read = mpl115_i2c_read,
+	.write = mpl115_i2c_write,
+};
+
+static int mpl115_i2c_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+		return -ENODEV;
+
+	return mpl115_probe(&client->dev, id->name, &mpl115_i2c_ops);
+}
+
+static const struct i2c_device_id mpl115_i2c_id[] = {
+	{ "mpl115", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, mpl115_i2c_id);
+
+static struct i2c_driver mpl115_i2c_driver = {
+	.driver = {
+		.name	= "mpl115",
+	},
+	.probe = mpl115_i2c_probe,
+	.id_table = mpl115_i2c_id,
+};
+module_i2c_driver(mpl115_i2c_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL115A2 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl115_spi.c b/drivers/iio/pressure/mpl115_spi.c
new file mode 100644
index 0000000..9ebf55f
--- /dev/null
+++ b/drivers/iio/pressure/mpl115_spi.c
@@ -0,0 +1,106 @@
+/*
+ * Freescale MPL115A1 pressure/temperature sensor
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A1.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "mpl115.h"
+
+#define MPL115_SPI_WRITE(address)	((address) << 1)
+#define MPL115_SPI_READ(address)	(0x80 | (address) << 1)
+
+struct mpl115_spi_buf {
+	u8 tx[4];
+	u8 rx[4];
+};
+
+static int mpl115_spi_init(struct device *dev)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct mpl115_spi_buf *buf;
+
+	buf = devm_kzalloc(dev, sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spi_set_drvdata(spi, buf);
+
+	return 0;
+}
+
+static int mpl115_spi_read(struct device *dev, u8 address)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct mpl115_spi_buf *buf = spi_get_drvdata(spi);
+	struct spi_transfer xfer = {
+		.tx_buf = buf->tx,
+		.rx_buf = buf->rx,
+		.len = 4,
+	};
+	int ret;
+
+	buf->tx[0] = MPL115_SPI_READ(address);
+	buf->tx[2] = MPL115_SPI_READ(address + 1);
+
+	ret = spi_sync_transfer(spi, &xfer, 1);
+	if (ret)
+		return ret;
+
+	return (buf->rx[1] << 8) | buf->rx[3];
+}
+
+static int mpl115_spi_write(struct device *dev, u8 address, u8 value)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct mpl115_spi_buf *buf = spi_get_drvdata(spi);
+	struct spi_transfer xfer = {
+		.tx_buf = buf->tx,
+		.len = 2,
+	};
+
+	buf->tx[0] = MPL115_SPI_WRITE(address);
+	buf->tx[1] = value;
+
+	return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static const struct mpl115_ops mpl115_spi_ops = {
+	.init = mpl115_spi_init,
+	.read = mpl115_spi_read,
+	.write = mpl115_spi_write,
+};
+
+static int mpl115_spi_probe(struct spi_device *spi)
+{
+	const struct spi_device_id *id = spi_get_device_id(spi);
+
+	return mpl115_probe(&spi->dev, id->name, &mpl115_spi_ops);
+}
+
+static const struct spi_device_id mpl115_spi_ids[] = {
+	{ "mpl115", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(spi, mpl115_spi_ids);
+
+static struct spi_driver mpl115_spi_driver = {
+	.driver = {
+		.name   = "mpl115",
+	},
+	.probe = mpl115_spi_probe,
+	.id_table = mpl115_spi_ids,
+};
+module_spi_driver(mpl115_spi_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("Freescale MPL115A1 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 23b93c7..2d70dd6 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -52,5 +52,6 @@
 };
 
 int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type);
+int ms5611_remove(struct iio_dev *indio_dev);
 
 #endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 2f3d9b4..c7885f0c 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -17,6 +17,9 @@
 #include <linux/iio/iio.h>
 #include <linux/delay.h>
 
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
 #include "ms5611.h"
 
 static bool ms5611_prom_is_valid(u16 *prom, size_t len)
@@ -173,6 +176,28 @@
 	return 0;
 }
 
+static irqreturn_t ms5611_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct ms5611_state *st = iio_priv(indio_dev);
+	s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */
+	int ret;
+
+	mutex_lock(&st->lock);
+	ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]);
+	mutex_unlock(&st->lock);
+	if (ret < 0)
+		goto err;
+
+	iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+
+err:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
 static int ms5611_read_raw(struct iio_dev *indio_dev,
 			   struct iio_chan_spec const *chan,
 			   int *val, int *val2, long mask)
@@ -201,11 +226,25 @@
 		default:
 			return -EINVAL;
 		}
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->type) {
+		case IIO_TEMP:
+			*val = 10;
+			return IIO_VAL_INT;
+		case IIO_PRESSURE:
+			*val = 0;
+			*val2 = 1000;
+			return IIO_VAL_INT_PLUS_MICRO;
+		default:
+			return -EINVAL;
+		}
 	}
 
 	return -EINVAL;
 }
 
+static const unsigned long ms5611_scan_masks[] = {0x3, 0};
+
 static struct ms5611_chip_info chip_info_tbl[] = {
 	[MS5611] = {
 		.temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
@@ -218,12 +257,29 @@
 static const struct iio_chan_spec ms5611_channels[] = {
 	{
 		.type = IIO_PRESSURE,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+			BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 32,
+			.storagebits = 32,
+			.endianness = IIO_CPU,
+		},
 	},
 	{
 		.type = IIO_TEMP,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
-	}
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+			BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 1,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 32,
+			.storagebits = 32,
+			.endianness = IIO_CPU,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(2),
 };
 
 static const struct iio_info ms5611_info = {
@@ -255,15 +311,43 @@
 	indio_dev->channels = ms5611_channels;
 	indio_dev->num_channels = ARRAY_SIZE(ms5611_channels);
 	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->available_scan_masks = ms5611_scan_masks;
 
 	ret = ms5611_init(indio_dev);
 	if (ret < 0)
 		return ret;
 
-	return devm_iio_device_register(dev, indio_dev);
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 ms5611_trigger_handler, NULL);
+	if (ret < 0) {
+		dev_err(dev, "iio triggered buffer setup failed\n");
+		return ret;
+	}
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0) {
+		dev_err(dev, "unable to register iio device\n");
+		goto err_buffer_cleanup;
+	}
+
+	return 0;
+
+err_buffer_cleanup:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	return ret;
 }
 EXPORT_SYMBOL(ms5611_probe);
 
+int ms5611_remove(struct iio_dev *indio_dev)
+{
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(ms5611_remove);
+
 MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
 MODULE_DESCRIPTION("MS5611 core driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 245797d..42706a8 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -99,6 +99,7 @@
 		return -ENOMEM;
 
 	st = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
 	st->reset = ms5611_i2c_reset;
 	st->read_prom_word = ms5611_i2c_read_prom_word;
 	st->read_adc_temp_and_pressure = ms5611_i2c_read_adc_temp_and_pressure;
@@ -107,6 +108,11 @@
 	return ms5611_probe(indio_dev, &client->dev, id->driver_data);
 }
 
+static int ms5611_i2c_remove(struct i2c_client *client)
+{
+	return ms5611_remove(i2c_get_clientdata(client));
+}
+
 static const struct i2c_device_id ms5611_id[] = {
 	{ "ms5611", MS5611 },
 	{ "ms5607", MS5607 },
@@ -120,6 +126,7 @@
 	},
 	.id_table = ms5611_id,
 	.probe = ms5611_i2c_probe,
+	.remove = ms5611_i2c_remove,
 };
 module_i2c_driver(ms5611_driver);
 
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index aaa0c4b..c4bf4e8 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -90,6 +90,8 @@
 	if (!indio_dev)
 		return -ENOMEM;
 
+	spi_set_drvdata(spi, indio_dev);
+
 	spi->mode = SPI_MODE_0;
 	spi->max_speed_hz = 20000000;
 	spi->bits_per_word = 8;
@@ -107,6 +109,11 @@
 			    spi_get_device_id(spi)->driver_data);
 }
 
+static int ms5611_spi_remove(struct spi_device *spi)
+{
+	return ms5611_remove(spi_get_drvdata(spi));
+}
+
 static const struct spi_device_id ms5611_id[] = {
 	{ "ms5611", MS5611 },
 	{ "ms5607", MS5607 },
@@ -120,6 +127,7 @@
 	},
 	.id_table = ms5611_id,
 	.probe = ms5611_spi_probe,
+	.remove = ms5611_spi_remove,
 };
 module_spi_driver(ms5611_driver);
 
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index b39a2fb..172393a 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -62,6 +62,8 @@
 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR		0x22
 #define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK	0x04
 #define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK	0x20
+#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR		0x22
+#define ST_PRESS_LPS331AP_IHL_IRQ_MASK		0x80
 #define ST_PRESS_LPS331AP_MULTIREAD_BIT		true
 #define ST_PRESS_LPS331AP_TEMP_OFFSET		42500
 
@@ -100,6 +102,8 @@
 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR		0x23
 #define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK	0x01
 #define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK	0x10
+#define ST_PRESS_LPS25H_IHL_IRQ_ADDR		0x22
+#define ST_PRESS_LPS25H_IHL_IRQ_MASK		0x80
 #define ST_PRESS_LPS25H_MULTIREAD_BIT		true
 #define ST_PRESS_LPS25H_TEMP_OFFSET		42500
 #define ST_PRESS_LPS25H_OUT_XL_ADDR		0x28
@@ -220,6 +224,8 @@
 			.addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
 			.mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
+			.addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
+			.mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
 		},
 		.multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
 		.bootime = 2,
@@ -304,6 +310,8 @@
 			.addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
 			.mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
 			.mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
+			.addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
+			.mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
 		},
 		.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
 		.bootime = 2,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 054fc10..a216b46 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -440,7 +440,7 @@
 	  still useful.
 
 config BMP085
-	bool
+	tristate
 	depends on SYSFS
 
 config BMP085_I2C
@@ -470,7 +470,7 @@
 config PCH_PHUB
 	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
 	select GENERIC_NET_UTILS
-	depends on PCI && (X86_32 || COMPILE_TEST)
+	depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
 	help
 	  This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
 	  Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
@@ -525,6 +525,284 @@
 	  ARM Ltd. Versatile Express uses specialised platform configuration
 	  bus. System Configuration interface is one of the possible means
 	  of generating transactions on this bus.
+config PANEL
+	tristate "Parallel port LCD/Keypad Panel support"
+	depends on PARPORT
+	---help---
+	  Say Y here if you have an HD44780 or KS-0074 LCD connected to your
+	  parallel port. This driver also features 4 and 6-key keypads. The LCD
+	  is accessible through the /dev/lcd char device (10, 156), and the
+	  keypad through /dev/keypad (10, 185). Both require misc device to be
+	  enabled. This code can either be compiled as a module, or linked into
+	  the kernel and started at boot. If you don't understand what all this
+	  is about, say N.
+
+config PANEL_PARPORT
+	int "Default parallel port number (0=LPT1)"
+	depends on PANEL
+	range 0 255
+	default "0"
+	---help---
+	  This is the index of the parallel port the panel is connected to. One
+	  driver instance only supports one parallel port, so if your keypad
+	  and LCD are connected to two separate ports, you have to start two
+	  modules with different arguments. Numbering starts with '0' for LPT1,
+	  and so on.
+
+config PANEL_PROFILE
+	int "Default panel profile (0-5, 0=custom)"
+	depends on PANEL
+	range 0 5
+	default "5"
+	---help---
+	  To ease configuration, the driver supports different configuration
+	  profiles for past and recent wirings. These profiles can also be
+	  used to define an approximative configuration, completed by a few
+	  other options. Here are the profiles :
+
+	    0 = custom (see further)
+	    1 = 2x16 parallel LCD, old keypad
+	    2 = 2x16 serial LCD (KS-0074), new keypad
+	    3 = 2x16 parallel LCD (Hantronix), no keypad
+	    4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
+	    5 = 2x40 parallel LCD (old one), with old keypad
+
+	  Custom configurations allow you to define how your display is
+	  wired to the parallel port, and how it works. This is only intended
+	  for experts.
+
+config PANEL_KEYPAD
+	depends on PANEL && PANEL_PROFILE="0"
+	int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
+	range 0 3
+	default 0
+	---help---
+	  This enables and configures a keypad connected to the parallel port.
+	  The keys will be read from character device 10,185. Valid values are :
+
+	    0 : do not enable this driver
+	    1 : old 6 keys keypad
+	    2 : new 6 keys keypad, as used on the server at www.ant-computing.com
+	    3 : Nexcom NSA1045's 4 keys keypad
+
+	  New profiles can be described in the driver source. The driver also
+	  supports simultaneous keys pressed when the keypad supports them.
+
+config PANEL_LCD
+	depends on PANEL && PANEL_PROFILE="0"
+	int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
+	range 0 5
+	default 0
+	---help---
+	   This enables and configures an LCD connected to the parallel port.
+	   The driver includes an interpreter for escape codes starting with
+	   '\e[L' which are specific to the LCD, and a few ANSI codes. The
+	   driver will be registered as character device 10,156, usually
+	   under the name '/dev/lcd'. There are a total of 6 supported types :
+
+	     0 : do not enable the driver
+	     1 : custom configuration and wiring (see further)
+	     2 : 2x16 & 2x40 parallel LCD (old wiring)
+	     3 : 2x16 serial LCD (KS-0074 based)
+	     4 : 2x16 parallel LCD (Hantronix wiring)
+	     5 : 2x16 parallel LCD (Nexcom wiring)
+
+	   When type '1' is specified, other options will appear to configure
+	   more precise aspects (wiring, dimensions, protocol, ...). Please note
+	   that those values changed from the 2.4 driver for better consistency.
+
+config PANEL_LCD_HEIGHT
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+	int "Number of lines on the LCD (1-2)"
+	range 1 2
+	default 2
+	---help---
+	  This is the number of visible character lines on the LCD in custom profile.
+	  It can either be 1 or 2.
+
+config PANEL_LCD_WIDTH
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+	int "Number of characters per line on the LCD (1-40)"
+	range 1 40
+	default 40
+	---help---
+	  This is the number of characters per line on the LCD in custom profile.
+	  Common values are 16,20,24,40.
+
+config PANEL_LCD_BWIDTH
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+	int "Internal LCD line width (1-40, 40 by default)"
+	range 1 40
+	default 40
+	---help---
+	  Most LCDs use a standard controller which supports hardware lines of 40
+	  characters, although sometimes only 16, 20 or 24 of them are really wired
+	  to the terminal. This results in some non-visible but addressable characters,
+	  and is the case for most parallel LCDs. Other LCDs, and some serial ones,
+	  however, use the same line width internally as what is visible. The KS0074
+	  for example, uses 16 characters per line for 16 visible characters per line.
+
+	  This option lets you configure the value used by your LCD in 'custom' profile.
+	  If you don't know, put '40' here.
+
+config PANEL_LCD_HWIDTH
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+	int "Hardware LCD line width (1-64, 64 by default)"
+	range 1 64
+	default 64
+	---help---
+	  Most LCDs use a single address bit to differentiate line 0 and line 1. Since
+	  some of them need to be able to address 40 chars with the lower bits, they
+	  often use the immediately superior power of 2, which is 64, to address the
+	  next line.
+
+	  If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
+	  64 here for a 2x40.
+
+config PANEL_LCD_CHARSET
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+	int "LCD character set (0=normal, 1=KS0074)"
+	range 0 1
+	default 0
+	---help---
+	  Some controllers such as the KS0074 use a somewhat strange character set
+	  where many symbols are at unusual places. The driver knows how to map
+	  'standard' ASCII characters to the character sets used by these controllers.
+	  Valid values are :
+
+	     0 : normal (untranslated) character set
+	     1 : KS0074 character set
+
+	  If you don't know, use the normal one (0).
+
+config PANEL_LCD_PROTO
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+	int "LCD communication mode (0=parallel 8 bits, 1=serial)"
+	range 0 1
+	default 0
+	---help---
+	  This driver now supports any serial or parallel LCD wired to a parallel
+	  port. But before assigning signals, the driver needs to know if it will
+	  be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
+	  (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
+	  (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
+	  parallel LCD, and 1 for a serial LCD.
+
+config PANEL_LCD_PIN_E
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+        int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+	range -17 17
+	default 14
+	---help---
+	  This describes the number of the parallel port pin to which the LCD 'E'
+	  signal has been connected. It can be :
+
+	          0 : no connection (eg: connected to ground)
+	      1..17 : directly connected to any of these pins on the DB25 plug
+	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+	  Default for the 'E' pin in custom profile is '14' (AUTOFEED).
+
+config PANEL_LCD_PIN_RS
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+        int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+	range -17 17
+	default 17
+	---help---
+	  This describes the number of the parallel port pin to which the LCD 'RS'
+	  signal has been connected. It can be :
+
+	          0 : no connection (eg: connected to ground)
+	      1..17 : directly connected to any of these pins on the DB25 plug
+	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+	  Default for the 'RS' pin in custom profile is '17' (SELECT IN).
+
+config PANEL_LCD_PIN_RW
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+        int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+	range -17 17
+	default 16
+	---help---
+	  This describes the number of the parallel port pin to which the LCD 'RW'
+	  signal has been connected. It can be :
+
+	          0 : no connection (eg: connected to ground)
+	      1..17 : directly connected to any of these pins on the DB25 plug
+	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+	  Default for the 'RW' pin in custom profile is '16' (INIT).
+
+config PANEL_LCD_PIN_SCL
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+        int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+	range -17 17
+	default 1
+	---help---
+	  This describes the number of the parallel port pin to which the serial
+	  LCD 'SCL' signal has been connected. It can be :
+
+	          0 : no connection (eg: connected to ground)
+	      1..17 : directly connected to any of these pins on the DB25 plug
+	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+	  Default for the 'SCL' pin in custom profile is '1' (STROBE).
+
+config PANEL_LCD_PIN_SDA
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+        int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+	range -17 17
+	default 2
+	---help---
+	  This describes the number of the parallel port pin to which the serial
+	  LCD 'SDA' signal has been connected. It can be :
+
+	          0 : no connection (eg: connected to ground)
+	      1..17 : directly connected to any of these pins on the DB25 plug
+	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+	  Default for the 'SDA' pin in custom profile is '2' (D0).
+
+config PANEL_LCD_PIN_BL
+	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+        int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+	range -17 17
+	default 0
+	---help---
+	  This describes the number of the parallel port pin to which the LCD 'BL' signal
+          has been connected. It can be :
+
+	          0 : no connection (eg: connected to ground)
+	      1..17 : directly connected to any of these pins on the DB25 plug
+	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+	  Default for the 'BL' pin in custom profile is '0' (uncontrolled).
+
+config PANEL_CHANGE_MESSAGE
+	depends on PANEL
+	bool "Change LCD initialization message ?"
+	default "n"
+	---help---
+	  This allows you to replace the boot message indicating the kernel version
+	  and the driver version with a custom message. This is useful on appliances
+	  where a simple 'Starting system' message can be enough to stop a customer
+	  from worrying.
+
+	  If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
+	  say 'N' and keep the default message with the version.
+
+config PANEL_BOOT_MESSAGE
+	depends on PANEL && PANEL_CHANGE_MESSAGE="y"
+	string "New initialization message"
+	default ""
+	---help---
+	  This allows you to replace the boot message indicating the kernel version
+	  and the driver version with a custom message. This is useful on appliances
+	  where a simple 'Starting system' message can be enough to stop a customer
+	  from worrying.
+
+	  An empty message will only clear the display at driver init time. Any other
+	  printf()-formatted message is valid with newline and escape codes.
 
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 537d7f3..b2fb6dbf 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,3 +56,4 @@
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
+obj-$(CONFIG_PANEL)             += panel.o
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index a3e789b..dfb72ec 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1215,7 +1215,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int apds990x_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	apds990x_chip_off(chip);
@@ -1224,7 +1224,7 @@
 
 static int apds990x_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	/*
@@ -1240,7 +1240,7 @@
 #ifdef CONFIG_PM
 static int apds990x_runtime_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	apds990x_chip_off(chip);
@@ -1249,7 +1249,7 @@
 
 static int apds990x_runtime_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	apds990x_chip_on(chip);
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index c65b5ea..b3176ee 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -8,7 +8,6 @@
  * Author: Linus Walleij <triad@df.lth.se>
  */
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
@@ -328,20 +327,6 @@
 	return ret;
 }
 
-static int __exit charlcd_remove(struct platform_device *pdev)
-{
-	struct charlcd *lcd = platform_get_drvdata(pdev);
-
-	if (lcd) {
-		free_irq(lcd->irq, lcd);
-		iounmap(lcd->virtbase);
-		release_mem_region(lcd->phybase, lcd->physize);
-		kfree(lcd);
-	}
-
-	return 0;
-}
-
 static int charlcd_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -376,13 +361,8 @@
 	.driver = {
 		.name = DRIVERNAME,
 		.pm = &charlcd_pm_ops,
+		.suppress_bind_attrs = true,
 		.of_match_table = of_match_ptr(charlcd_match),
 	},
-	.remove = __exit_p(charlcd_remove),
 };
-
-module_platform_driver_probe(charlcd_driver, charlcd_probe);
-
-MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
-MODULE_DESCRIPTION("ARM Character LCD Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(charlcd_driver, charlcd_probe);
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 753d7ec..845466e 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1323,7 +1323,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int bh1770_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 
 	bh1770_chip_off(chip);
@@ -1333,7 +1333,7 @@
 
 static int bh1770_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 	int ret = 0;
 
@@ -1361,7 +1361,7 @@
 #ifdef CONFIG_PM
 static int bh1770_runtime_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 
 	bh1770_chip_off(chip);
@@ -1371,7 +1371,7 @@
 
 static int bh1770_runtime_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 
 	bh1770_chip_on(chip);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index cc8645b..1922cb8 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -721,9 +721,7 @@
 				struct bin_attribute *attr,
 				char *buffer, loff_t offset, size_t count)
 {
-	struct c2port_device *c2dev =
-			dev_get_drvdata(container_of(kobj,
-						struct device, kobj));
+	struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
 	ssize_t ret;
 
 	/* Check the device and flash access status */
@@ -838,9 +836,7 @@
 				struct bin_attribute *attr,
 				char *buffer, loff_t offset, size_t count)
 {
-	struct c2port_device *c2dev =
-			dev_get_drvdata(container_of(kobj,
-						struct device, kobj));
+	struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
 	int ret;
 
 	/* Check the device access status */
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 02006f71..038af5d 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -386,8 +386,7 @@
 			       struct bin_attribute *bin_attr, char *buf,
 			       loff_t off, size_t count)
 {
-	struct cxl_afu *afu = to_cxl_afu(container_of(kobj,
-						      struct device, kobj));
+	struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
 
 	return cxl_afu_read_err_buffer(afu, buf, off, count);
 }
@@ -467,7 +466,7 @@
 			       loff_t off, size_t count)
 {
 	struct afu_config_record *cr = to_cr(kobj);
-	struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
+	struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
 
 	u64 i, j, val;
 
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 5d7c090..d105c25 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -289,7 +289,7 @@
 {
 	struct at24_data *at24;
 
-	at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
+	at24 = dev_get_drvdata(kobj_to_dev(kobj));
 	return at24_read(at24, buf, off, count);
 }
 
@@ -420,7 +420,7 @@
 {
 	struct at24_data *at24;
 
-	at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
+	at24 = dev_get_drvdata(kobj_to_dev(kobj));
 	return at24_write(at24, buf, off, count);
 }
 
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index f850ef5..3e9e5a28 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -139,7 +139,7 @@
 	struct device		*dev;
 	struct at25_data	*at25;
 
-	dev = container_of(kobj, struct device, kobj);
+	dev = kobj_to_dev(kobj);
 	at25 = dev_get_drvdata(dev);
 
 	return at25_ee_read(at25, buf, off, count);
@@ -273,7 +273,7 @@
 	struct device		*dev;
 	struct at25_data	*at25;
 
-	dev = container_of(kobj, struct device, kobj);
+	dev = kobj_to_dev(kobj);
 	at25 = dev_get_drvdata(dev);
 
 	return at25_ee_write(at25, buf, off, count);
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 7342fd6..3d1d551 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -84,7 +84,7 @@
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
+	struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
 	struct eeprom_data *data = i2c_get_clientdata(client);
 	u8 slice;
 
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index ff63f05..f62ab29 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -10,9 +10,13 @@
 
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
 #include <linux/sysfs.h>
@@ -25,6 +29,15 @@
 #define ADDR_ERAL	0x20
 #define ADDR_EWEN	0x30
 
+struct eeprom_93xx46_devtype_data {
+	unsigned int quirks;
+};
+
+static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+	.quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
+		  EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+};
+
 struct eeprom_93xx46_dev {
 	struct spi_device *spi;
 	struct eeprom_93xx46_platform_data *pdata;
@@ -33,6 +46,16 @@
 	int addrlen;
 };
 
+static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev)
+{
+	return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ;
+}
+
+static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+{
+	return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+}
+
 static ssize_t
 eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
@@ -40,58 +63,73 @@
 {
 	struct eeprom_93xx46_dev *edev;
 	struct device *dev;
-	struct spi_message m;
-	struct spi_transfer t[2];
-	int bits, ret;
-	u16 cmd_addr;
+	ssize_t ret = 0;
 
-	dev = container_of(kobj, struct device, kobj);
+	dev = kobj_to_dev(kobj);
 	edev = dev_get_drvdata(dev);
 
-	cmd_addr = OP_READ << edev->addrlen;
-
-	if (edev->addrlen == 7) {
-		cmd_addr |= off & 0x7f;
-		bits = 10;
-	} else {
-		cmd_addr |= off & 0x3f;
-		bits = 9;
-	}
-
-	dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
-		cmd_addr, edev->spi->max_speed_hz);
-
-	spi_message_init(&m);
-	memset(t, 0, sizeof(t));
-
-	t[0].tx_buf = (char *)&cmd_addr;
-	t[0].len = 2;
-	t[0].bits_per_word = bits;
-	spi_message_add_tail(&t[0], &m);
-
-	t[1].rx_buf = buf;
-	t[1].len = count;
-	t[1].bits_per_word = 8;
-	spi_message_add_tail(&t[1], &m);
-
 	mutex_lock(&edev->lock);
 
 	if (edev->pdata->prepare)
 		edev->pdata->prepare(edev);
 
-	ret = spi_sync(edev->spi, &m);
-	/* have to wait at least Tcsl ns */
-	ndelay(250);
-	if (ret) {
-		dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
-			count, (int)off, ret);
+	while (count) {
+		struct spi_message m;
+		struct spi_transfer t[2] = { { 0 } };
+		u16 cmd_addr = OP_READ << edev->addrlen;
+		size_t nbytes = count;
+		int bits;
+		int err;
+
+		if (edev->addrlen == 7) {
+			cmd_addr |= off & 0x7f;
+			bits = 10;
+			if (has_quirk_single_word_read(edev))
+				nbytes = 1;
+		} else {
+			cmd_addr |= (off >> 1) & 0x3f;
+			bits = 9;
+			if (has_quirk_single_word_read(edev))
+				nbytes = 2;
+		}
+
+		dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+			cmd_addr, edev->spi->max_speed_hz);
+
+		spi_message_init(&m);
+
+		t[0].tx_buf = (char *)&cmd_addr;
+		t[0].len = 2;
+		t[0].bits_per_word = bits;
+		spi_message_add_tail(&t[0], &m);
+
+		t[1].rx_buf = buf;
+		t[1].len = count;
+		t[1].bits_per_word = 8;
+		spi_message_add_tail(&t[1], &m);
+
+		err = spi_sync(edev->spi, &m);
+		/* have to wait at least Tcsl ns */
+		ndelay(250);
+
+		if (err) {
+			dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+				nbytes, (int)off, err);
+			ret = err;
+			break;
+		}
+
+		buf += nbytes;
+		off += nbytes;
+		count -= nbytes;
+		ret += nbytes;
 	}
 
 	if (edev->pdata->finish)
 		edev->pdata->finish(edev);
 
 	mutex_unlock(&edev->lock);
-	return ret ? : count;
+	return ret;
 }
 
 static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -110,7 +148,13 @@
 		bits = 9;
 	}
 
-	dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+	if (has_quirk_instruction_length(edev)) {
+		cmd_addr <<= 2;
+		bits += 2;
+	}
+
+	dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
+			is_on ? "en" : "ds", cmd_addr, bits);
 
 	spi_message_init(&m);
 	memset(&t, 0, sizeof(t));
@@ -155,7 +199,7 @@
 		bits = 10;
 		data_len = 1;
 	} else {
-		cmd_addr |= off & 0x3f;
+		cmd_addr |= (off >> 1) & 0x3f;
 		bits = 9;
 		data_len = 2;
 	}
@@ -190,7 +234,7 @@
 	struct device *dev;
 	int i, ret, step = 1;
 
-	dev = container_of(kobj, struct device, kobj);
+	dev = kobj_to_dev(kobj);
 	edev = dev_get_drvdata(dev);
 
 	/* only write even number of bytes on 16-bit devices */
@@ -245,6 +289,13 @@
 		bits = 9;
 	}
 
+	if (has_quirk_instruction_length(edev)) {
+		cmd_addr <<= 2;
+		bits += 2;
+	}
+
+	dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
+
 	spi_message_init(&m);
 	memset(&t, 0, sizeof(t));
 
@@ -294,12 +345,100 @@
 }
 static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
 
+static void select_assert(void *context)
+{
+	struct eeprom_93xx46_dev *edev = context;
+
+	gpiod_set_value_cansleep(edev->pdata->select, 1);
+}
+
+static void select_deassert(void *context)
+{
+	struct eeprom_93xx46_dev *edev = context;
+
+	gpiod_set_value_cansleep(edev->pdata->select, 0);
+}
+
+static const struct of_device_id eeprom_93xx46_of_table[] = {
+	{ .compatible = "eeprom-93xx46", },
+	{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+	{}
+};
+MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+
+static int eeprom_93xx46_probe_dt(struct spi_device *spi)
+{
+	const struct of_device_id *of_id =
+		of_match_device(eeprom_93xx46_of_table, &spi->dev);
+	struct device_node *np = spi->dev.of_node;
+	struct eeprom_93xx46_platform_data *pd;
+	u32 tmp;
+	int gpio;
+	enum of_gpio_flags of_flags;
+	int ret;
+
+	pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(np, "data-size", &tmp);
+	if (ret < 0) {
+		dev_err(&spi->dev, "data-size property not found\n");
+		return ret;
+	}
+
+	if (tmp == 8) {
+		pd->flags |= EE_ADDR8;
+	} else if (tmp == 16) {
+		pd->flags |= EE_ADDR16;
+	} else {
+		dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
+		return -EINVAL;
+	}
+
+	if (of_property_read_bool(np, "read-only"))
+		pd->flags |= EE_READONLY;
+
+	gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags);
+	if (gpio_is_valid(gpio)) {
+		unsigned long flags =
+			of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0;
+
+		ret = devm_gpio_request_one(&spi->dev, gpio, flags,
+					    "eeprom_93xx46_select");
+		if (ret)
+			return ret;
+
+		pd->select = gpio_to_desc(gpio);
+		pd->prepare = select_assert;
+		pd->finish = select_deassert;
+
+		gpiod_direction_output(pd->select, 0);
+	}
+
+	if (of_id->data) {
+		const struct eeprom_93xx46_devtype_data *data = of_id->data;
+
+		pd->quirks = data->quirks;
+	}
+
+	spi->dev.platform_data = pd;
+
+	return 0;
+}
+
 static int eeprom_93xx46_probe(struct spi_device *spi)
 {
 	struct eeprom_93xx46_platform_data *pd;
 	struct eeprom_93xx46_dev *edev;
 	int err;
 
+	if (spi->dev.of_node) {
+		err = eeprom_93xx46_probe_dt(spi);
+		if (err < 0)
+			return err;
+	}
+
 	pd = spi->dev.platform_data;
 	if (!pd) {
 		dev_err(&spi->dev, "missing platform data\n");
@@ -370,6 +509,7 @@
 static struct spi_driver eeprom_93xx46_driver = {
 	.driver = {
 		.name	= "93xx46",
+		.of_match_table = of_match_ptr(eeprom_93xx46_of_table),
 	},
 	.probe		= eeprom_93xx46_probe,
 	.remove		= eeprom_93xx46_remove,
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 6ab31ef..c24c9b7 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -278,7 +278,7 @@
 				 struct attribute *attr, int n)
 {
 	unsigned int j;
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev = kobj_to_dev(kobj);
 	struct genwqe_dev *cd = dev_get_drvdata(dev);
 	umode_t mode = attr->mode;
 
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 5bd1277..9fea49d 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -34,6 +34,7 @@
 #include <linux/kref.h>
 #include <linux/device.h>
 #include <linux/input.h>
+#include <linux/time64.h>
 
 /* Driver identification */
 #define DRIVER_NAME	"ibmasm"
@@ -53,9 +54,11 @@
 
 static inline char *get_timestamp(char *buf)
 {
-	struct timeval now;
-	do_gettimeofday(&now);
-	sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec);
+	struct timespec64 now;
+
+	ktime_get_real_ts64(&now);
+	sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec,
+				now.tv_nsec / NSEC_PER_USEC);
 	return buf;
 }
 
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 0c3bb7e..14b7d53 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -209,7 +209,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int lis3lv02d_i2c_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -219,7 +219,7 @@
 
 static int lis3lv02d_i2c_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	/*
@@ -238,7 +238,7 @@
 #ifdef CONFIG_PM
 static int lis3_i2c_runtime_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	lis3lv02d_poweroff(lis3);
@@ -247,7 +247,7 @@
 
 static int lis3_i2c_runtime_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	lis3lv02d_poweron(lis3);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 11fdadc..5c1351b 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -335,7 +335,7 @@
 	memset((void *)data, 0, 64);
 }
 
-static void execute_location(void *dst)
+static void noinline execute_location(void *dst)
 {
 	void (*func)(void) = dst;
 
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index d23384d..c49e1d2 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,6 +1,6 @@
 config INTEL_MEI
 	tristate "Intel Management Engine Interface"
-	depends on X86 && PCI && WATCHDOG_CORE
+	depends on X86 && PCI
 	help
 	  The Intel Management Engine (Intel ME) provides Manageability,
 	  Security and Media services for system containing Intel chipsets.
@@ -12,7 +12,7 @@
 config INTEL_MEI_ME
 	tristate "ME Enabled Intel Chipsets"
 	select INTEL_MEI
-	depends on X86 && PCI && WATCHDOG_CORE
+	depends on X86 && PCI
 	help
 	  MEI support for ME Enabled Intel chipsets.
 
@@ -37,7 +37,7 @@
 config INTEL_MEI_TXE
 	tristate "Intel Trusted Execution Environment with ME Interface"
 	select INTEL_MEI
-	depends on X86 && PCI && WATCHDOG_CORE
+	depends on X86 && PCI
 	help
 	  MEI Support for Trusted Execution Environment device on Intel SoCs
 
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 01447ca..59e6b0a 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -9,7 +9,6 @@
 mei-objs += client.o
 mei-objs += main.o
 mei-objs += amthif.o
-mei-objs += wd.o
 mei-objs += bus.o
 mei-objs += bus-fixup.o
 mei-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index cd0403f..194360a 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -50,7 +50,6 @@
 	dev->iamthif_current_cb = NULL;
 	dev->iamthif_canceled = false;
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
-	dev->iamthif_timer = 0;
 	dev->iamthif_stall_timer = 0;
 	dev->iamthif_open_count = 0;
 }
@@ -68,11 +67,14 @@
 	struct mei_cl *cl = &dev->iamthif_cl;
 	int ret;
 
+	if (mei_cl_is_connected(cl))
+		return 0;
+
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
 
 	mei_cl_init(cl, dev);
 
-	ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+	ret = mei_cl_link(cl);
 	if (ret < 0) {
 		dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
 		return ret;
@@ -80,32 +82,10 @@
 
 	ret = mei_cl_connect(cl, me_cl, NULL);
 
-	dev->iamthif_state = MEI_IAMTHIF_IDLE;
-
 	return ret;
 }
 
 /**
- * mei_amthif_find_read_list_entry - finds a amthilist entry for current file
- *
- * @dev: the device structure
- * @file: pointer to file object
- *
- * Return:   returned a list entry on success, NULL on failure.
- */
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
-						struct file *file)
-{
-	struct mei_cl_cb *cb;
-
-	list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list)
-		if (cb->file_object == file)
-			return cb;
-	return NULL;
-}
-
-
-/**
  * mei_amthif_read - read data from AMTHIF client
  *
  * @dev: the device structure
@@ -126,18 +106,11 @@
 {
 	struct mei_cl *cl = file->private_data;
 	struct mei_cl_cb *cb;
-	unsigned long timeout;
 	int rets;
 	int wait_ret;
 
-	/* Only possible if we are in timeout */
-	if (!cl) {
-		dev_err(dev->dev, "bad file ext.\n");
-		return -ETIME;
-	}
-
 	dev_dbg(dev->dev, "checking amthif data\n");
-	cb = mei_amthif_find_read_list_entry(dev, file);
+	cb = mei_cl_read_cb(cl, file);
 
 	/* Check for if we can block or not*/
 	if (cb == NULL && file->f_flags & O_NONBLOCK)
@@ -149,8 +122,9 @@
 		/* unlock the Mutex */
 		mutex_unlock(&dev->device_lock);
 
-		wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
-			(cb = mei_amthif_find_read_list_entry(dev, file)));
+		wait_ret = wait_event_interruptible(cl->rx_wait,
+					!list_empty(&cl->rd_completed) ||
+					!mei_cl_is_connected(cl));
 
 		/* Locking again the Mutex */
 		mutex_lock(&dev->device_lock);
@@ -158,7 +132,12 @@
 		if (wait_ret)
 			return -ERESTARTSYS;
 
-		dev_dbg(dev->dev, "woke up from sleep\n");
+		if (!mei_cl_is_connected(cl)) {
+			rets = -EBUSY;
+			goto out;
+		}
+
+		cb = mei_cl_read_cb(cl, file);
 	}
 
 	if (cb->status) {
@@ -168,24 +147,10 @@
 	}
 
 	dev_dbg(dev->dev, "Got amthif data\n");
-	dev->iamthif_timer = 0;
-
-	timeout = cb->read_time +
-		mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-	dev_dbg(dev->dev, "amthif timeout = %lud\n",
-			timeout);
-
-	if  (time_after(jiffies, timeout)) {
-		dev_dbg(dev->dev, "amthif Time out\n");
-		/* 15 sec for the message has expired */
-		list_del_init(&cb->list);
-		rets = -ETIME;
-		goto free;
-	}
 	/* if the whole message will fit remove it from the list */
 	if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
 		list_del_init(&cb->list);
-	else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
+	else if (cb->buf_idx <= *offset) {
 		/* end of the message has been reached */
 		list_del_init(&cb->list);
 		rets = 0;
@@ -195,9 +160,8 @@
 		 * remove message from deletion list
 		 */
 
-	dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
-	    cb->buf.size);
-	dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
+	dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n",
+		cb->buf.size, cb->buf_idx);
 
 	/* length is being truncated to PAGE_SIZE, however,
 	 * the buf_idx may point beyond */
@@ -229,7 +193,7 @@
  *
  * Return: 0 on success, <0 on failure.
  */
-static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
+static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
 {
 	struct mei_device *dev = cl->dev;
 	struct mei_cl_cb *cb;
@@ -248,7 +212,7 @@
 	list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
 
 	dev->iamthif_state = MEI_IAMTHIF_READING;
-	dev->iamthif_file_object = cb->file_object;
+	dev->iamthif_fp = cb->fp;
 	dev->iamthif_current_cb = cb;
 
 	return 0;
@@ -277,7 +241,7 @@
 
 	dev->iamthif_state = MEI_IAMTHIF_WRITING;
 	dev->iamthif_current_cb = cb;
-	dev->iamthif_file_object = cb->file_object;
+	dev->iamthif_fp = cb->fp;
 	dev->iamthif_canceled = false;
 
 	ret = mei_cl_write(cl, cb, false);
@@ -285,7 +249,7 @@
 		return ret;
 
 	if (cb->completed)
-		cb->status = mei_amthif_read_start(cl, cb->file_object);
+		cb->status = mei_amthif_read_start(cl, cb->fp);
 
 	return 0;
 }
@@ -304,8 +268,7 @@
 
 	dev->iamthif_canceled = false;
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
-	dev->iamthif_timer = 0;
-	dev->iamthif_file_object = NULL;
+	dev->iamthif_fp = NULL;
 
 	dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
 
@@ -329,17 +292,17 @@
 int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
 
-	struct mei_device *dev;
-
-	if (WARN_ON(!cl || !cl->dev))
-		return -ENODEV;
-
-	if (WARN_ON(!cb))
-		return -EINVAL;
-
-	dev = cl->dev;
+	struct mei_device *dev = cl->dev;
 
 	list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
+
+	/*
+	 * The previous request is still in processing, queue this one.
+	 */
+	if (dev->iamthif_state > MEI_IAMTHIF_IDLE &&
+	    dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE)
+		return 0;
+
 	return mei_amthif_run_next_cmd(dev);
 }
 
@@ -360,10 +323,10 @@
 {
 	unsigned int mask = 0;
 
-	poll_wait(file, &dev->iamthif_cl.wait, wait);
+	poll_wait(file, &dev->iamthif_cl.rx_wait, wait);
 
 	if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
-	    dev->iamthif_file_object == file) {
+	    dev->iamthif_fp == file) {
 
 		mask |= POLLIN | POLLRDNORM;
 		mei_amthif_run_next_cmd(dev);
@@ -393,7 +356,7 @@
 		return ret;
 
 	if (cb->completed)
-		cb->status = mei_amthif_read_start(cl, cb->file_object);
+		cb->status = mei_amthif_read_start(cl, cb->fp);
 
 	return 0;
 }
@@ -437,11 +400,12 @@
 /**
  * mei_amthif_complete - complete amthif callback.
  *
- * @dev: the device structure.
+ * @cl: host client
  * @cb: callback block.
  */
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
+	struct mei_device *dev = cl->dev;
 
 	if (cb->fop_type == MEI_FOP_WRITE) {
 		if (!cb->status) {
@@ -453,25 +417,22 @@
 		 * in case of error enqueue the write cb to complete read list
 		 * so it can be propagated to the reader
 		 */
-		list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
-		wake_up_interruptible(&dev->iamthif_cl.wait);
+		list_add_tail(&cb->list, &cl->rd_completed);
+		wake_up_interruptible(&cl->rx_wait);
 		return;
 	}
 
 	if (!dev->iamthif_canceled) {
 		dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
 		dev->iamthif_stall_timer = 0;
-		list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+		list_add_tail(&cb->list, &cl->rd_completed);
 		dev_dbg(dev->dev, "amthif read completed\n");
-		dev->iamthif_timer = jiffies;
-		dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
-			dev->iamthif_timer);
 	} else {
 		mei_amthif_run_next_cmd(dev);
 	}
 
 	dev_dbg(dev->dev, "completing amthif call back.\n");
-	wake_up_interruptible(&dev->iamthif_cl.wait);
+	wake_up_interruptible(&cl->rx_wait);
 }
 
 /**
@@ -497,7 +458,7 @@
 	/* list all list member */
 	list_for_each_entry_safe(cb, next, mei_cb_list, list) {
 		/* check if list member associated with a file */
-		if (file == cb->file_object) {
+		if (file == cb->fp) {
 			/* check if cb equal to current iamthif cb */
 			if (dev->iamthif_current_cb == cb) {
 				dev->iamthif_current_cb = NULL;
@@ -523,13 +484,14 @@
  *
  * Return: true if callback removed from the list, false otherwise
  */
-static bool mei_clear_lists(struct mei_device *dev, struct file *file)
+static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
 {
 	bool removed = false;
+	struct mei_cl *cl = &dev->iamthif_cl;
 
 	/* remove callbacks associated with a file */
 	mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
-	if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list))
+	if (mei_clear_list(dev, file, &cl->rd_completed))
 		removed = true;
 
 	mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
@@ -546,7 +508,7 @@
 	/* check if iamthif_current_cb not NULL */
 	if (dev->iamthif_current_cb && !removed) {
 		/* check file and iamthif current cb association */
-		if (dev->iamthif_current_cb->file_object == file) {
+		if (dev->iamthif_current_cb->fp == file) {
 			/* remove cb */
 			mei_io_cb_free(dev->iamthif_current_cb);
 			dev->iamthif_current_cb = NULL;
@@ -569,7 +531,7 @@
 	if (dev->iamthif_open_count > 0)
 		dev->iamthif_open_count--;
 
-	if (dev->iamthif_file_object == file &&
+	if (dev->iamthif_fp == file &&
 	    dev->iamthif_state != MEI_IAMTHIF_IDLE) {
 
 		dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 020de59..e9e6ea3 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -35,6 +35,9 @@
 #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
 			0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
 
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+			    0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
 #define MEI_UUID_ANY NULL_UUID_LE
 
 /**
@@ -48,8 +51,7 @@
  */
 static void number_of_connections(struct mei_cl_device *cldev)
 {
-	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
-			__func__, mei_me_cl_uuid(cldev->me_cl));
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
 
 	if (cldev->me_cl->props.max_number_of_connections > 1)
 		cldev->do_match = 0;
@@ -62,11 +64,36 @@
  */
 static void blacklist(struct mei_cl_device *cldev)
 {
-	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
-			__func__, mei_me_cl_uuid(cldev->me_cl));
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
 	cldev->do_match = 0;
 }
 
+/**
+ * mei_wd - wd client on the bus, change protocol version
+ *   as the API has changed.
+ *
+ * @cldev: me clients device
+ */
+#if IS_ENABLED(CONFIG_INTEL_MEI_ME)
+#include <linux/pci.h>
+#include "hw-me-regs.h"
+static void mei_wd(struct mei_cl_device *cldev)
+{
+	struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
+
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+	if (pdev->device == MEI_DEV_ID_WPT_LP ||
+	    pdev->device == MEI_DEV_ID_SPT ||
+	    pdev->device == MEI_DEV_ID_SPT_H)
+		cldev->me_cl->props.protocol_version = 0x2;
+
+	cldev->do_match = 1;
+}
+#else
+static inline void mei_wd(struct mei_cl_device *cldev) {}
+#endif /* CONFIG_INTEL_MEI_ME */
+
 struct mei_nfc_cmd {
 	u8 command;
 	u8 status;
@@ -208,12 +235,11 @@
 
 	bus = cldev->bus;
 
-	dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
-		__func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
 
 	mutex_lock(&bus->device_lock);
 	/* we need to connect to INFO GUID */
-	cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+	cl = mei_cl_alloc_linked(bus);
 	if (IS_ERR(cl)) {
 		ret = PTR_ERR(cl);
 		cl = NULL;
@@ -282,6 +308,7 @@
 	MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
 	MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
 	MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+	MEI_FIXUP(MEI_UUID_WD, mei_wd),
 };
 
 /**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0b05aa9..5d5996e 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -44,7 +44,7 @@
 			bool blocking)
 {
 	struct mei_device *bus;
-	struct mei_cl_cb *cb = NULL;
+	struct mei_cl_cb *cb;
 	ssize_t rets;
 
 	if (WARN_ON(!cl || !cl->dev))
@@ -53,6 +53,11 @@
 	bus = cl->dev;
 
 	mutex_lock(&bus->device_lock);
+	if (bus->dev_state != MEI_DEV_ENABLED) {
+		rets = -ENODEV;
+		goto out;
+	}
+
 	if (!mei_cl_is_connected(cl)) {
 		rets = -ENODEV;
 		goto out;
@@ -81,8 +86,6 @@
 
 out:
 	mutex_unlock(&bus->device_lock);
-	if (rets < 0)
-		mei_io_cb_free(cb);
 
 	return rets;
 }
@@ -109,6 +112,10 @@
 	bus = cl->dev;
 
 	mutex_lock(&bus->device_lock);
+	if (bus->dev_state != MEI_DEV_ENABLED) {
+		rets = -ENODEV;
+		goto out;
+	}
 
 	cb = mei_cl_read_cb(cl, NULL);
 	if (cb)
@@ -230,45 +237,55 @@
  * mei_cl_bus_notify_event - schedule notify cb on bus client
  *
  * @cl: host client
+ *
+ * Return: true if event was scheduled
+ *         false if the client is not waiting for event
  */
-void mei_cl_bus_notify_event(struct mei_cl *cl)
+bool mei_cl_bus_notify_event(struct mei_cl *cl)
 {
 	struct mei_cl_device *cldev = cl->cldev;
 
 	if (!cldev || !cldev->event_cb)
-		return;
+		return false;
 
 	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
-		return;
+		return false;
 
 	if (!cl->notify_ev)
-		return;
+		return false;
 
 	set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
 
 	schedule_work(&cldev->event_work);
 
 	cl->notify_ev = false;
+
+	return true;
 }
 
 /**
- * mei_cl_bus_rx_event  - schedule rx evenet
+ * mei_cl_bus_rx_event  - schedule rx event
  *
  * @cl: host client
+ *
+ * Return: true if event was scheduled
+ *         false if the client is not waiting for event
  */
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+bool mei_cl_bus_rx_event(struct mei_cl *cl)
 {
 	struct mei_cl_device *cldev = cl->cldev;
 
 	if (!cldev || !cldev->event_cb)
-		return;
+		return false;
 
 	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
-		return;
+		return false;
 
 	set_bit(MEI_CL_EVENT_RX, &cldev->events);
 
 	schedule_work(&cldev->event_work);
+
+	return true;
 }
 
 /**
@@ -398,7 +415,7 @@
 
 	if (!cl) {
 		mutex_lock(&bus->device_lock);
-		cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+		cl = mei_cl_alloc_linked(bus);
 		mutex_unlock(&bus->device_lock);
 		if (IS_ERR(cl))
 			return PTR_ERR(cl);
@@ -958,6 +975,22 @@
 	dev_dbg(bus->dev, "rescan end");
 }
 
+void mei_cl_bus_rescan_work(struct work_struct *work)
+{
+	struct mei_device *bus =
+		container_of(work, struct mei_device, bus_rescan_work);
+	struct mei_me_client *me_cl;
+
+	mutex_lock(&bus->device_lock);
+	me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
+	if (me_cl)
+		mei_amthif_host_init(bus, me_cl);
+	mei_me_cl_put(me_cl);
+	mutex_unlock(&bus->device_lock);
+
+	mei_cl_bus_rescan(bus);
+}
+
 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
 				struct module *owner)
 {
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index a6c87c7..bab17e4 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -359,7 +359,7 @@
  * Return: mei_cl_cb pointer or NULL;
  */
 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
-				 struct file *fp)
+				 const struct file *fp)
 {
 	struct mei_cl_cb *cb;
 
@@ -368,7 +368,7 @@
 		return NULL;
 
 	INIT_LIST_HEAD(&cb->list);
-	cb->file_object = fp;
+	cb->fp = fp;
 	cb->cl = cl;
 	cb->buf_idx = 0;
 	cb->fop_type = type;
@@ -455,7 +455,8 @@
  * Return: cb on success and NULL on failure
  */
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
-				  enum mei_cb_file_ops type, struct file *fp)
+				  enum mei_cb_file_ops type,
+				  const struct file *fp)
 {
 	struct mei_cl_cb *cb;
 
@@ -485,7 +486,7 @@
 	struct mei_cl_cb *cb;
 
 	list_for_each_entry(cb, &cl->rd_completed, list)
-		if (!fp || fp == cb->file_object)
+		if (!fp || fp == cb->fp)
 			return cb;
 
 	return NULL;
@@ -503,12 +504,12 @@
 	struct mei_cl_cb *cb, *next;
 
 	list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
-		if (!fp || fp == cb->file_object)
+		if (!fp || fp == cb->fp)
 			mei_io_cb_free(cb);
 
 
 	list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
-		if (!fp || fp == cb->file_object)
+		if (!fp || fp == cb->fp)
 			mei_io_cb_free(cb);
 }
 
@@ -535,7 +536,6 @@
 	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
 	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
 	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
 
 	mei_cl_read_cb_flush(cl, fp);
 
@@ -587,27 +587,23 @@
  * mei_cl_link - allocate host id in the host map
  *
  * @cl: host client
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  *
  * Return: 0 on success
  *	-EINVAL on incorrect values
  *	-EMFILE if open count exceeded.
  */
-int mei_cl_link(struct mei_cl *cl, int id)
+int mei_cl_link(struct mei_cl *cl)
 {
 	struct mei_device *dev;
 	long open_handle_count;
+	int id;
 
 	if (WARN_ON(!cl || !cl->dev))
 		return -EINVAL;
 
 	dev = cl->dev;
 
-	/* If Id is not assigned get one*/
-	if (id == MEI_HOST_CLIENT_ID_ANY)
-		id = find_first_zero_bit(dev->host_clients_map,
-					MEI_CLIENTS_MAX);
-
+	id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
 	if (id >= MEI_CLIENTS_MAX) {
 		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
 		return -EMFILE;
@@ -648,7 +644,7 @@
 	if (!cl)
 		return 0;
 
-	/* wd and amthif might not be initialized */
+	/* amthif might not be initialized */
 	if (!cl->dev)
 		return 0;
 
@@ -670,31 +666,12 @@
 	return 0;
 }
 
-
-void mei_host_client_init(struct work_struct *work)
+void mei_host_client_init(struct mei_device *dev)
 {
-	struct mei_device *dev =
-		container_of(work, struct mei_device, init_work);
-	struct mei_me_client *me_cl;
-
-	mutex_lock(&dev->device_lock);
-
-
-	me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
-	if (me_cl)
-		mei_amthif_host_init(dev, me_cl);
-	mei_me_cl_put(me_cl);
-
-	me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
-	if (me_cl)
-		mei_wd_host_init(dev, me_cl);
-	mei_me_cl_put(me_cl);
-
 	dev->dev_state = MEI_DEV_ENABLED;
 	dev->reset_count = 0;
-	mutex_unlock(&dev->device_lock);
 
-	mei_cl_bus_rescan(dev);
+	schedule_work(&dev->bus_rescan_work);
 
 	pm_runtime_mark_last_busy(dev->dev);
 	dev_dbg(dev->dev, "rpm: autosuspend\n");
@@ -726,6 +703,33 @@
 }
 
 /**
+ * mei_cl_wake_all - wake up readers, writers and event waiters so
+ *                 they can be interrupted
+ *
+ * @cl: host client
+ */
+static void mei_cl_wake_all(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+
+	/* synchronized under device mutex */
+	if (waitqueue_active(&cl->rx_wait)) {
+		cl_dbg(dev, cl, "Waking up reading client!\n");
+		wake_up_interruptible(&cl->rx_wait);
+	}
+	/* synchronized under device mutex */
+	if (waitqueue_active(&cl->tx_wait)) {
+		cl_dbg(dev, cl, "Waking up writing client!\n");
+		wake_up_interruptible(&cl->tx_wait);
+	}
+	/* synchronized under device mutex */
+	if (waitqueue_active(&cl->ev_wait)) {
+		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+		wake_up_interruptible(&cl->ev_wait);
+	}
+}
+
+/**
  * mei_cl_set_disconnected - set disconnected state and clear
  *   associated states and resources
  *
@@ -740,8 +744,11 @@
 		return;
 
 	cl->state = MEI_FILE_DISCONNECTED;
+	mei_io_list_free(&dev->write_list, cl);
+	mei_io_list_free(&dev->write_waiting_list, cl);
 	mei_io_list_flush(&dev->ctrl_rd_list, cl);
 	mei_io_list_flush(&dev->ctrl_wr_list, cl);
+	mei_cl_wake_all(cl);
 	cl->mei_flow_ctrl_creds = 0;
 	cl->timer_count = 0;
 
@@ -1034,7 +1041,7 @@
  * Return: 0 on success, <0 on failure.
  */
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
-		   struct file *file)
+		  const struct file *file)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
@@ -1119,11 +1126,10 @@
  * mei_cl_alloc_linked - allocate and link host client
  *
  * @dev: the device structure
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  *
  * Return: cl on success ERR_PTR on failure
  */
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
 {
 	struct mei_cl *cl;
 	int ret;
@@ -1134,7 +1140,7 @@
 		goto err;
 	}
 
-	ret = mei_cl_link(cl, id);
+	ret = mei_cl_link(cl);
 	if (ret)
 		goto err;
 
@@ -1149,11 +1155,12 @@
 /**
  * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
  *
- * @cl: private data of the file object
+ * @cl: host client
+ * @fp: the file pointer associated with the pointer
  *
  * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
  */
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp)
 {
 	int rets;
 
@@ -1164,7 +1171,7 @@
 		return 1;
 
 	if (mei_cl_is_fixed_address(cl)) {
-		rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+		rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp);
 		if (rets && rets != -EBUSY)
 			return rets;
 		return 1;
@@ -1186,7 +1193,7 @@
  *	0 on success
  *	-EINVAL when ctrl credits are <= 0
  */
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
 {
 	if (WARN_ON(!cl || !cl->me_cl))
 		return -EINVAL;
@@ -1283,7 +1290,8 @@
  *
  * Return: 0 on such and error otherwise.
  */
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+int mei_cl_notify_request(struct mei_cl *cl,
+			  const struct file *file, u8 request)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
@@ -1368,12 +1376,12 @@
 
 	cl_dbg(dev, cl, "notify event");
 	cl->notify_ev = true;
-	wake_up_interruptible_all(&cl->ev_wait);
+	if (!mei_cl_bus_notify_event(cl))
+		wake_up_interruptible(&cl->ev_wait);
 
 	if (cl->ev_async)
 		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
 
-	mei_cl_bus_notify_event(cl);
 }
 
 /**
@@ -1422,6 +1430,25 @@
 }
 
 /**
+ * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control
+ *                        for given host client
+ *
+ * @cl: host client
+ *
+ * Return: true, if found at least one cb.
+ */
+static bool mei_cl_is_read_fc_cb(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+	struct mei_cl_cb *cb;
+
+	list_for_each_entry(cb, &dev->ctrl_wr_list.list, list)
+		if (cb->fop_type == MEI_FOP_READ && cb->cl == cl)
+			return true;
+	return false;
+}
+
+/**
  * mei_cl_read_start - the start read client message function.
  *
  * @cl: host client
@@ -1430,7 +1457,7 @@
  *
  * Return: 0 on success, <0 on failure.
  */
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
@@ -1445,7 +1472,7 @@
 		return -ENODEV;
 
 	/* HW currently supports only one pending read */
-	if (!list_empty(&cl->rd_pending))
+	if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl))
 		return -EBUSY;
 
 	if (!mei_me_cl_is_active(cl->me_cl)) {
@@ -1524,7 +1551,7 @@
 
 	first_chunk = cb->buf_idx == 0;
 
-	rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
+	rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1;
 	if (rets < 0)
 		return rets;
 
@@ -1556,7 +1583,7 @@
 		return 0;
 	}
 
-	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
+	cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
 			cb->buf.size, cb->buf_idx);
 
 	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
@@ -1618,7 +1645,7 @@
 	if (rets < 0 && rets != -EINPROGRESS) {
 		pm_runtime_put_noidle(dev->dev);
 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
-		return rets;
+		goto free;
 	}
 
 	cb->buf_idx = 0;
@@ -1630,7 +1657,7 @@
 	mei_hdr.msg_complete = 0;
 	mei_hdr.internal = cb->internal;
 
-	rets = mei_cl_flow_ctrl_creds(cl);
+	rets = mei_cl_flow_ctrl_creds(cl, cb->fp);
 	if (rets < 0)
 		goto err;
 
@@ -1677,7 +1704,8 @@
 
 		mutex_unlock(&dev->device_lock);
 		rets = wait_event_interruptible(cl->tx_wait,
-				cl->writing_state == MEI_WRITE_COMPLETE);
+				cl->writing_state == MEI_WRITE_COMPLETE ||
+				(!mei_cl_is_connected(cl)));
 		mutex_lock(&dev->device_lock);
 		/* wait_event_interruptible returns -ERESTARTSYS */
 		if (rets) {
@@ -1685,6 +1713,10 @@
 				rets = -EINTR;
 			goto err;
 		}
+		if (cl->writing_state != MEI_WRITE_COMPLETE) {
+			rets = -EFAULT;
+			goto err;
+		}
 	}
 
 	rets = size;
@@ -1692,6 +1724,8 @@
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
+free:
+	mei_io_cb_free(cb);
 
 	return rets;
 }
@@ -1721,10 +1755,8 @@
 
 	case MEI_FOP_READ:
 		list_add_tail(&cb->list, &cl->rd_completed);
-		if (waitqueue_active(&cl->rx_wait))
-			wake_up_interruptible_all(&cl->rx_wait);
-		else
-			mei_cl_bus_rx_event(cl);
+		if (!mei_cl_bus_rx_event(cl))
+			wake_up_interruptible(&cl->rx_wait);
 		break;
 
 	case MEI_FOP_CONNECT:
@@ -1753,44 +1785,3 @@
 	list_for_each_entry(cl, &dev->file_list, link)
 		mei_cl_set_disconnected(cl);
 }
-
-
-/**
- * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
- *
- * @dev: mei device
- */
-void mei_cl_all_wakeup(struct mei_device *dev)
-{
-	struct mei_cl *cl;
-
-	list_for_each_entry(cl, &dev->file_list, link) {
-		if (waitqueue_active(&cl->rx_wait)) {
-			cl_dbg(dev, cl, "Waking up reading client!\n");
-			wake_up_interruptible(&cl->rx_wait);
-		}
-		if (waitqueue_active(&cl->tx_wait)) {
-			cl_dbg(dev, cl, "Waking up writing client!\n");
-			wake_up_interruptible(&cl->tx_wait);
-		}
-
-		/* synchronized under device mutex */
-		if (waitqueue_active(&cl->ev_wait)) {
-			cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
-			wake_up_interruptible(&cl->ev_wait);
-		}
-	}
-}
-
-/**
- * mei_cl_all_write_clear - clear all pending writes
- *
- * @dev: mei device
- */
-void mei_cl_all_write_clear(struct mei_device *dev)
-{
-	mei_io_list_free(&dev->write_list, NULL);
-	mei_io_list_free(&dev->write_waiting_list, NULL);
-}
-
-
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 04e1aa3..0d7a3a1 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -18,7 +18,6 @@
 #define _MEI_CLIENT_H_
 
 #include <linux/types.h>
-#include <linux/watchdog.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
 
@@ -84,7 +83,7 @@
  * MEI IO Functions
  */
 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
-				 struct file *fp);
+				 const struct file *fp);
 void mei_io_cb_free(struct mei_cl_cb *priv_cb);
 int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
 
@@ -108,21 +107,19 @@
 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
 
 
-int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_link(struct mei_cl *cl);
 int mei_cl_unlink(struct mei_cl *cl);
 
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
 
 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
 				 const struct file *fp);
 void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
-				  enum mei_cb_file_ops type, struct file *fp);
+				  enum mei_cb_file_ops type,
+				  const struct file *fp);
 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
 
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
-
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
 /*
  *  MEI input output function prototype
  */
@@ -217,10 +214,10 @@
 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
 			  struct mei_cl_cb *cmpl_list);
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
-		   struct file *file);
+		   const struct file *file);
 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
 			      struct mei_cl_cb *cmpl_list);
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
 int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
 			struct mei_cl_cb *cmpl_list);
 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
@@ -229,19 +226,18 @@
 
 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 
-void mei_host_client_init(struct work_struct *work);
+void mei_host_client_init(struct mei_device *dev);
 
 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_notify_request(struct mei_cl *cl,
+			  const struct file *file, u8 request);
 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
 		      struct mei_cl_cb *cmpl_list);
 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
 void mei_cl_notify(struct mei_cl *cl);
 
 void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_wakeup(struct mei_device *dev);
-void mei_cl_all_write_clear(struct mei_device *dev);
 
 #define MEI_CL_FMT "cl:host=%02d me=%02d "
 #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
@@ -249,6 +245,9 @@
 #define cl_dbg(dev, cl, format, arg...) \
 	dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
 
+#define cl_warn(dev, cl, format, arg...) \
+	dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
 #define cl_err(dev, cl, format, arg...) \
 	dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
 
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a138d8a..c6c051b 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -50,6 +50,7 @@
 	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
 
 	/*  if the driver is not enabled the list won't be consistent */
 	if (dev->dev_state != MEI_DEV_ENABLED)
@@ -90,24 +91,38 @@
 {
 	struct mei_device *dev = fp->private_data;
 	struct mei_cl *cl;
-	const size_t bufsz = 1024;
+	size_t bufsz = 1;
 	char *buf;
 	int i = 0;
 	int pos = 0;
 	int ret;
 
+#define HDR "   |me|host|state|rd|wr|\n"
+
 	if (!dev)
 		return -ENODEV;
 
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if  (!buf)
-		return -ENOMEM;
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"  |me|host|state|rd|wr|\n");
-
 	mutex_lock(&dev->device_lock);
 
+	/*
+	 * if the driver is not enabled the list won't be consistent,
+	 * we output empty table
+	 */
+	if (dev->dev_state == MEI_DEV_ENABLED)
+		list_for_each_entry(cl, &dev->file_list, link)
+			bufsz++;
+
+	bufsz *= sizeof(HDR) + 1;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if  (!buf) {
+		mutex_unlock(&dev->device_lock);
+		return -ENOMEM;
+	}
+
+	pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
+
 	/*  if the driver is not enabled the list won't be consistent */
 	if (dev->dev_state != MEI_DEV_ENABLED)
 		goto out;
@@ -115,7 +130,7 @@
 	list_for_each_entry(cl, &dev->file_list, link) {
 
 		pos += scnprintf(buf + pos, bufsz - pos,
-			"%2d|%2d|%4d|%5d|%2d|%2d|\n",
+			"%3d|%2d|%4d|%5d|%2d|%2d|\n",
 			i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
 			!list_empty(&cl->rd_completed), cl->writing_state);
 		i++;
@@ -150,16 +165,21 @@
 	pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
 			mei_hbm_state_str(dev->hbm_state));
 
-	if (dev->hbm_state == MEI_HBM_STARTED) {
+	if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
+	    dev->hbm_state <= MEI_HBM_STARTED) {
 		pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
 		pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
 				 dev->hbm_f_pg_supported);
 		pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
 				 dev->hbm_f_dc_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n",
+				 dev->hbm_f_ie_supported);
 		pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
 				 dev->hbm_f_dot_supported);
 		pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
 				 dev->hbm_f_ev_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
+				 dev->hbm_f_fa_supported);
 	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",
@@ -175,6 +195,30 @@
 	.llseek = generic_file_llseek,
 };
 
+static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct mei_device *dev;
+	int ret;
+
+	dev = container_of(file->private_data,
+			   struct mei_device, allow_fixed_address);
+
+	ret = debugfs_write_file_bool(file, user_buf, count, ppos);
+	if (ret < 0)
+		return ret;
+	dev->override_fixed_address = true;
+	return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_allow_fa = {
+	.open = simple_open,
+	.read = debugfs_read_file_bool,
+	.write = mei_dbgfs_write_allow_fa,
+	.llseek = generic_file_llseek,
+};
+
 /**
  * mei_dbgfs_deregister - Remove the debugfs files and directories
  *
@@ -224,8 +268,9 @@
 		dev_err(dev->dev, "devstate: registration failed\n");
 		goto err;
 	}
-	f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
-				&dev->allow_fixed_address);
+	f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+				&dev->allow_fixed_address,
+				&mei_dbgfs_fops_allow_fa);
 	if (!f) {
 		dev_err(dev->dev, "allow_fixed_address: registration failed\n");
 		goto err;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index e7b7aad..5e305d2 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -301,7 +301,10 @@
 	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
 	memset(enum_req, 0, len);
 	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-	enum_req->allow_add = dev->hbm_f_dc_supported;
+	enum_req->flags |= dev->hbm_f_dc_supported ?
+			   MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+	enum_req->flags |= dev->hbm_f_ie_supported ?
+			   MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
 
 	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
 	if (ret) {
@@ -401,6 +404,9 @@
 	if (ret)
 		status = !MEI_HBMS_SUCCESS;
 
+	if (dev->dev_state == MEI_DEV_ENABLED)
+		schedule_work(&dev->bus_rescan_work);
+
 	return mei_hbm_add_cl_resp(dev, req->me_addr, status);
 }
 
@@ -543,7 +549,7 @@
 	/* We got all client properties */
 	if (next_client_index == MEI_CLIENTS_MAX) {
 		dev->hbm_state = MEI_HBM_STARTED;
-		schedule_work(&dev->init_work);
+		mei_host_client_init(dev);
 
 		return 0;
 	}
@@ -789,8 +795,11 @@
 		cl->state = MEI_FILE_CONNECTED;
 	else {
 		cl->state = MEI_FILE_DISCONNECT_REPLY;
-		if (rs->status == MEI_CL_CONN_NOT_FOUND)
+		if (rs->status == MEI_CL_CONN_NOT_FOUND) {
 			mei_me_cl_del(dev, cl->me_cl);
+			if (dev->dev_state == MEI_DEV_ENABLED)
+				schedule_work(&dev->bus_rescan_work);
+		}
 	}
 	cl->status = mei_cl_conn_status_to_errno(rs->status);
 }
@@ -866,7 +875,7 @@
 
 	cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
 	if (cl) {
-		cl_dbg(dev, cl, "fw disconnect request received\n");
+		cl_warn(dev, cl, "fw disconnect request received\n");
 		cl->state = MEI_FILE_DISCONNECTING;
 		cl->timer_count = 0;
 
@@ -972,6 +981,9 @@
 	if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
 		dev->hbm_f_dc_supported = 1;
 
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
+		dev->hbm_f_ie_supported = 1;
+
 	/* disconnect on connect timeout instead of link reset */
 	if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
 		dev->hbm_f_dot_supported = 1;
@@ -979,6 +991,10 @@
 	/* Notification Event Support */
 	if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
 		dev->hbm_f_ev_supported = 1;
+
+	/* Fixed Address Client Support */
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
+		dev->hbm_f_fa_supported = 1;
 }
 
 /**
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 25b1997..e2fb44c 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -189,8 +189,11 @@
 
 	fw_status->count = fw_src->count;
 	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
-		ret = pci_read_config_dword(pdev,
-			fw_src->status[i], &fw_status->status[i]);
+		ret = pci_read_config_dword(pdev, fw_src->status[i],
+					    &fw_status->status[i]);
+		trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+				       fw_src->status[i],
+				       fw_status->status[i]);
 		if (ret)
 			return ret;
 	}
@@ -215,6 +218,7 @@
 
 	reg = 0;
 	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+	trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
 	hw->d0i3_supported =
 		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
 
@@ -1248,6 +1252,7 @@
 	u32 reg;
 
 	pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
 	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
 	return (reg & 0x600) == 0x200;
 }
@@ -1260,6 +1265,7 @@
 	u32 reg;
 	/* Read ME FW Status check for SPS Firmware */
 	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
 	/* if bits [19:16] = 15, running SPS Firmware */
 	return (reg & 0xf0000) == 0xf0000;
 }
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index bae680c..4a6c1b8 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -28,6 +28,9 @@
 #include "client.h"
 #include "hbm.h"
 
+#include "mei-trace.h"
+
+
 /**
  * mei_txe_reg_read - Reads 32bit data from the txe device
  *
@@ -640,8 +643,11 @@
 
 	fw_status->count = fw_src->count;
 	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
-		ret = pci_read_config_dword(pdev,
-			fw_src->status[i], &fw_status->status[i]);
+		ret = pci_read_config_dword(pdev, fw_src->status[i],
+					    &fw_status->status[i]);
+		trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+				       fw_src->status[i],
+				       fw_status->status[i]);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 4cebde8..9daf3f9 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -29,7 +29,6 @@
 #define MEI_CLIENTS_INIT_TIMEOUT   15  /* HPS: Clients Enumeration Timeout */
 
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
-#define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 
 #define MEI_PGI_TIMEOUT             1  /* PG Isolation time response 1 sec */
 #define MEI_D0I3_TIMEOUT            5  /* D0i3 set/unset max response time */
@@ -54,6 +53,12 @@
 #define HBM_MAJOR_VERSION_DC               2
 
 /*
+ * MEI version with immediate reply to enum request support
+ */
+#define HBM_MINOR_VERSION_IE               0
+#define HBM_MAJOR_VERSION_IE               2
+
+/*
  * MEI version with disconnect on connection timeout support
  */
 #define HBM_MINOR_VERSION_DOT              0
@@ -65,6 +70,12 @@
 #define HBM_MINOR_VERSION_EV               0
 #define HBM_MAJOR_VERSION_EV               2
 
+/*
+ * MEI version with fixed address client support
+ */
+#define HBM_MINOR_VERSION_FA               0
+#define HBM_MAJOR_VERSION_FA               2
+
 /* Host bus message command opcode */
 #define MEI_HBM_CMD_OP_MSK                  0x7f
 /* Host bus message command RESPONSE */
@@ -241,15 +252,26 @@
 } __packed;
 
 /**
- * struct hbm_host_enum_request -  enumeration request from host to fw
+ * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0)
  *
- * @hbm_cmd: bus message command header
- * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add
+ * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately
+ */
+enum hbm_host_enum_flags {
+	MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0),
+	MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1),
+};
+
+/**
+ * struct hbm_host_enum_request - enumeration request from host to fw
+ *
+ * @hbm_cmd : bus message command header
+ * @flags   : request flags
  * @reserved: reserved
  */
 struct hbm_host_enum_request {
 	u8 hbm_cmd;
-	u8 allow_add;
+	u8 flags;
 	u8 reserved[2];
 } __packed;
 
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 3edafc8..f7c8dfd 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -91,8 +91,8 @@
  */
 void mei_cancel_work(struct mei_device *dev)
 {
-	cancel_work_sync(&dev->init_work);
 	cancel_work_sync(&dev->reset_work);
+	cancel_work_sync(&dev->bus_rescan_work);
 
 	cancel_delayed_work(&dev->timer_work);
 }
@@ -148,16 +148,10 @@
 	    state != MEI_DEV_POWER_UP) {
 
 		/* remove all waiting requests */
-		mei_cl_all_write_clear(dev);
-
 		mei_cl_all_disconnect(dev);
 
-		/* wake up all readers and writers so they can be interrupted */
-		mei_cl_all_wakeup(dev);
-
 		/* remove entry if already in list */
-		dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n");
-		mei_cl_unlink(&dev->wd_cl);
+		dev_dbg(dev->dev, "remove iamthif from the file list.\n");
 		mei_cl_unlink(&dev->iamthif_cl);
 		mei_amthif_reset_params(dev);
 	}
@@ -165,7 +159,6 @@
 	mei_hbm_reset(dev);
 
 	dev->rd_msg_hdr = 0;
-	dev->wd_pending = false;
 
 	if (ret) {
 		dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
@@ -335,16 +328,12 @@
 
 	mutex_lock(&dev->device_lock);
 
-	mei_wd_stop(dev);
-
 	dev->dev_state = MEI_DEV_POWER_DOWN;
 	mei_reset(dev);
 	/* move device to disabled state unconditionally */
 	dev->dev_state = MEI_DEV_DISABLED;
 
 	mutex_unlock(&dev->device_lock);
-
-	mei_watchdog_unregister(dev);
 }
 EXPORT_SYMBOL_GPL(mei_stop);
 
@@ -394,7 +383,6 @@
 	init_waitqueue_head(&dev->wait_hw_ready);
 	init_waitqueue_head(&dev->wait_pg);
 	init_waitqueue_head(&dev->wait_hbm_start);
-	init_waitqueue_head(&dev->wait_stop_wd);
 	dev->dev_state = MEI_DEV_INITIALIZING;
 	dev->reset_count = 0;
 
@@ -404,13 +392,11 @@
 	mei_io_list_init(&dev->ctrl_rd_list);
 
 	INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
-	INIT_WORK(&dev->init_work, mei_host_client_init);
 	INIT_WORK(&dev->reset_work, mei_reset_work);
+	INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
 
-	INIT_LIST_HEAD(&dev->wd_cl.link);
 	INIT_LIST_HEAD(&dev->iamthif_cl.link);
 	mei_io_list_init(&dev->amthif_cmd_list);
-	mei_io_list_init(&dev->amthif_rd_complete_list);
 
 	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
 	dev->open_handle_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 64b568a..1e5cb1f 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -48,7 +48,7 @@
 
 		dev_dbg(dev->dev, "completing call back.\n");
 		if (cl == &dev->iamthif_cl)
-			mei_amthif_complete(dev, cb);
+			mei_amthif_complete(cl, cb);
 		else
 			mei_cl_complete(cl, cb);
 	}
@@ -104,6 +104,7 @@
 	struct mei_device *dev = cl->dev;
 	struct mei_cl_cb *cb;
 	unsigned char *buffer = NULL;
+	size_t buf_sz;
 
 	cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
 	if (!cb) {
@@ -124,11 +125,21 @@
 		goto out;
 	}
 
-	if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
-		cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
+	buf_sz = mei_hdr->length + cb->buf_idx;
+	/* catch for integer overflow */
+	if (buf_sz < cb->buf_idx) {
+		cl_err(dev, cl, "message is too big len %d idx %zu\n",
+		       mei_hdr->length, cb->buf_idx);
+
+		list_move_tail(&cb->list, &complete_list->list);
+		cb->status = -EMSGSIZE;
+		goto out;
+	}
+
+	if (cb->buf.size < buf_sz) {
+		cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
 			cb->buf.size, mei_hdr->length, cb->buf_idx);
-		buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
-				  GFP_KERNEL);
+		buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL);
 
 		if (!buffer) {
 			cb->status = -ENOMEM;
@@ -136,7 +147,7 @@
 			goto out;
 		}
 		cb->buf.data = buffer;
-		cb->buf.size = mei_hdr->length + cb->buf_idx;
+		cb->buf.size = buf_sz;
 	}
 
 	buffer = cb->buf.data + cb->buf_idx;
@@ -145,8 +156,7 @@
 	cb->buf_idx += mei_hdr->length;
 
 	if (mei_hdr->msg_complete) {
-		cb->read_time = jiffies;
-		cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
+		cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
 		list_move_tail(&cb->list, &complete_list->list);
 	} else {
 		pm_runtime_mark_last_busy(dev->dev);
@@ -229,6 +239,16 @@
 	return 0;
 }
 
+static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
+{
+	return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
+}
+
+static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
+{
+	return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
+}
+
 /**
  * mei_irq_read_handler - bottom half read routine after ISR to
  * handle the read processing.
@@ -270,7 +290,7 @@
 	}
 
 	/*  HBM message */
-	if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
+	if (hdr_is_hbm(mei_hdr)) {
 		ret = mei_hbm_dispatch(dev, mei_hdr);
 		if (ret) {
 			dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
@@ -290,6 +310,14 @@
 
 	/* if no recipient cl was found we assume corrupted header */
 	if (&cl->link == &dev->file_list) {
+		/* A message for not connected fixed address clients
+		 * should be silently discarded
+		 */
+		if (hdr_is_fixed(mei_hdr)) {
+			mei_irq_discard_msg(dev, mei_hdr);
+			ret = 0;
+			goto reset_slots;
+		}
 		dev_err(dev->dev, "no destination client found 0x%08X\n",
 				dev->rd_msg_hdr);
 		ret = -EBADMSG;
@@ -360,21 +388,6 @@
 		list_move_tail(&cb->list, &cmpl_list->list);
 	}
 
-	if (dev->wd_state == MEI_WD_STOPPING) {
-		dev->wd_state = MEI_WD_IDLE;
-		wake_up(&dev->wait_stop_wd);
-	}
-
-	if (mei_cl_is_connected(&dev->wd_cl)) {
-		if (dev->wd_pending &&
-		    mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
-			ret = mei_wd_send(dev);
-			if (ret)
-				return ret;
-			dev->wd_pending = false;
-		}
-	}
-
 	/* complete control write list CB */
 	dev_dbg(dev->dev, "complete control write list cb.\n");
 	list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
@@ -462,7 +475,6 @@
  */
 void mei_timer(struct work_struct *work)
 {
-	unsigned long timeout;
 	struct mei_cl *cl;
 
 	struct mei_device *dev = container_of(work,
@@ -508,45 +520,15 @@
 			mei_reset(dev);
 			dev->iamthif_canceled = false;
 			dev->iamthif_state = MEI_IAMTHIF_IDLE;
-			dev->iamthif_timer = 0;
 
 			mei_io_cb_free(dev->iamthif_current_cb);
 			dev->iamthif_current_cb = NULL;
 
-			dev->iamthif_file_object = NULL;
+			dev->iamthif_fp = NULL;
 			mei_amthif_run_next_cmd(dev);
 		}
 	}
 
-	if (dev->iamthif_timer) {
-
-		timeout = dev->iamthif_timer +
-			mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
-		dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
-				dev->iamthif_timer);
-		dev_dbg(dev->dev, "timeout = %ld\n", timeout);
-		dev_dbg(dev->dev, "jiffies = %ld\n", jiffies);
-		if (time_after(jiffies, timeout)) {
-			/*
-			 * User didn't read the AMTHI data on time (15sec)
-			 * freeing AMTHI for other requests
-			 */
-
-			dev_dbg(dev->dev, "freeing AMTHI for other requests\n");
-
-			mei_io_list_flush(&dev->amthif_rd_complete_list,
-				&dev->iamthif_cl);
-			mei_io_cb_free(dev->iamthif_current_cb);
-			dev->iamthif_current_cb = NULL;
-
-			dev->iamthif_file_object->private_data = NULL;
-			dev->iamthif_file_object = NULL;
-			dev->iamthif_timer = 0;
-			mei_amthif_run_next_cmd(dev);
-
-		}
-	}
 out:
 	if (dev->dev_state != MEI_DEV_DISABLED)
 		schedule_delayed_work(&dev->timer_work, 2 * HZ);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 80f9afc..52635b0 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -65,7 +65,7 @@
 		goto err_unlock;
 	}
 
-	cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+	cl = mei_cl_alloc_linked(dev);
 	if (IS_ERR(cl)) {
 		err = PTR_ERR(cl);
 		goto err_unlock;
@@ -159,27 +159,22 @@
 		goto out;
 	}
 
+	if (ubuf == NULL) {
+		rets = -EMSGSIZE;
+		goto out;
+	}
+
 	if (cl == &dev->iamthif_cl) {
 		rets = mei_amthif_read(dev, file, ubuf, length, offset);
 		goto out;
 	}
 
 	cb = mei_cl_read_cb(cl, file);
-	if (cb) {
-		/* read what left */
-		if (cb->buf_idx > *offset)
-			goto copy_buffer;
-		/* offset is beyond buf_idx we have no more data return 0 */
-		if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
-			rets = 0;
-			goto free;
-		}
-		/* Offset needs to be cleaned for contiguous reads*/
-		if (cb->buf_idx == 0 && *offset > 0)
-			*offset = 0;
-	} else if (*offset > 0) {
+	if (cb)
+		goto copy_buffer;
+
+	if (*offset > 0)
 		*offset = 0;
-	}
 
 	err = mei_cl_read_start(cl, length, file);
 	if (err && err != -EBUSY) {
@@ -214,11 +209,6 @@
 
 	cb = mei_cl_read_cb(cl, file);
 	if (!cb) {
-		if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
-			cb = mei_cl_read_cb(cl, NULL);
-			if (cb)
-				goto copy_buffer;
-		}
 		rets = 0;
 		goto out;
 	}
@@ -231,10 +221,10 @@
 		goto free;
 	}
 
-	cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
-	    cb->buf.size, cb->buf_idx);
-	if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
-		rets = -EMSGSIZE;
+	cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
+	       cb->buf.size, cb->buf_idx, *offset);
+	if (*offset >= cb->buf_idx) {
+		rets = 0;
 		goto free;
 	}
 
@@ -250,11 +240,13 @@
 
 	rets = length;
 	*offset += length;
-	if ((unsigned long)*offset < cb->buf_idx)
+	/* not all data was read, keep the cb */
+	if (*offset < cb->buf_idx)
 		goto out;
 
 free:
 	mei_io_cb_free(cb);
+	*offset = 0;
 
 out:
 	cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
@@ -275,9 +267,8 @@
 			 size_t length, loff_t *offset)
 {
 	struct mei_cl *cl = file->private_data;
-	struct mei_cl_cb *write_cb = NULL;
+	struct mei_cl_cb *cb;
 	struct mei_device *dev;
-	unsigned long timeout = 0;
 	int rets;
 
 	if (WARN_ON(!cl || !cl->dev))
@@ -313,52 +304,31 @@
 		goto out;
 	}
 
-	if (cl == &dev->iamthif_cl) {
-		write_cb = mei_amthif_find_read_list_entry(dev, file);
-
-		if (write_cb) {
-			timeout = write_cb->read_time +
-				mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
-			if (time_after(jiffies, timeout)) {
-				*offset = 0;
-				mei_io_cb_free(write_cb);
-				write_cb = NULL;
-			}
-		}
-	}
-
 	*offset = 0;
-	write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
-	if (!write_cb) {
+	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
+	if (!cb) {
 		rets = -ENOMEM;
 		goto out;
 	}
 
-	rets = copy_from_user(write_cb->buf.data, ubuf, length);
+	rets = copy_from_user(cb->buf.data, ubuf, length);
 	if (rets) {
 		dev_dbg(dev->dev, "failed to copy data from userland\n");
 		rets = -EFAULT;
+		mei_io_cb_free(cb);
 		goto out;
 	}
 
 	if (cl == &dev->iamthif_cl) {
-		rets = mei_amthif_write(cl, write_cb);
-
-		if (rets) {
-			dev_err(dev->dev,
-				"amthif write failed with status = %d\n", rets);
-			goto out;
-		}
-		mutex_unlock(&dev->device_lock);
-		return length;
+		rets = mei_amthif_write(cl, cb);
+		if (!rets)
+			rets = length;
+		goto out;
 	}
 
-	rets = mei_cl_write(cl, write_cb, false);
+	rets = mei_cl_write(cl, cb, false);
 out:
 	mutex_unlock(&dev->device_lock);
-	if (rets < 0)
-		mei_io_cb_free(write_cb);
 	return rets;
 }
 
@@ -393,12 +363,22 @@
 
 	/* find ME client we're trying to connect to */
 	me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
-	if (!me_cl ||
-	    (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
+	if (!me_cl) {
 		dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
 			&data->in_client_uuid);
-		mei_me_cl_put(me_cl);
-		return  -ENOTTY;
+		rets = -ENOTTY;
+		goto end;
+	}
+
+	if (me_cl->props.fixed_address) {
+		bool forbidden = dev->override_fixed_address ?
+			 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
+		if (forbidden) {
+			dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
+				&data->in_client_uuid);
+			rets = -ENOTTY;
+			goto end;
+		}
 	}
 
 	dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
@@ -454,7 +434,7 @@
  *
  * Return: 0 on success , <0 on error
  */
-static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
 {
 	struct mei_cl *cl = file->private_data;
 
@@ -473,7 +453,7 @@
  *
  * Return: 0 on success , <0 on error
  */
-static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
 {
 	struct mei_cl *cl = file->private_data;
 	bool notify_ev;
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index 388efb5..e19e6ac 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -22,4 +22,6 @@
 
 EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
 EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
 #endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index 47e1bc6..7d2d5d4 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -60,7 +60,45 @@
 		__entry->offs = offs;
 		__entry->val = val;
 	),
-	TP_printk("[%s] write %s[%#x] = %#x)",
+	TP_printk("[%s] write %s[%#x] = %#x",
+		  __get_str(dev), __entry->reg,  __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_read,
+	TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+	TP_ARGS(dev, reg, offs, val),
+	TP_STRUCT__entry(
+		__string(dev, dev_name(dev))
+		__field(const char *, reg)
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		__assign_str(dev, dev_name(dev))
+		__entry->reg  = reg;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] pci cfg read %s:[%#x] = %#x",
+		  __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_write,
+	TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+	TP_ARGS(dev, reg, offs, val),
+	TP_STRUCT__entry(
+		__string(dev, dev_name(dev))
+		__field(const char *, reg)
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		__assign_str(dev, dev_name(dev))
+		__entry->reg = reg;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] pci cfg write %s[%#x] = %#x",
 		  __get_str(dev), __entry->reg,  __entry->offs, __entry->val)
 );
 
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 4250555..db78e6d 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -18,7 +18,7 @@
 #define _MEI_DEV_H_
 
 #include <linux/types.h>
-#include <linux/watchdog.h>
+#include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
 #include <linux/mei_cl_bus.h>
@@ -26,33 +26,13 @@
 #include "hw.h"
 #include "hbm.h"
 
-/*
- * watch dog definition
- */
-#define MEI_WD_HDR_SIZE       4
-#define MEI_WD_STOP_MSG_SIZE  MEI_WD_HDR_SIZE
-#define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16)
-
-#define MEI_WD_DEFAULT_TIMEOUT   120  /* seconds */
-#define MEI_WD_MIN_TIMEOUT       120  /* seconds */
-#define MEI_WD_MAX_TIMEOUT     65535  /* seconds */
-
-#define MEI_WD_STOP_TIMEOUT      10 /* msecs */
-
-#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT       (1 << 0)
-
-#define MEI_RD_MSG_BUF_SIZE           (128 * sizeof(u32))
-
 
 /*
  * AMTHI Client UUID
  */
 extern const uuid_le mei_amthif_guid;
 
-/*
- * Watchdog Client UUID
- */
-extern const uuid_le mei_wd_guid;
+#define MEI_RD_MSG_BUF_SIZE           (128 * sizeof(u32))
 
 /*
  * Number of Maximum MEI Clients
@@ -73,15 +53,6 @@
  */
 #define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
 
-/*
- * Internal Clients Number
- */
-#define MEI_HOST_CLIENT_ID_ANY        (-1)
-#define MEI_HBM_HOST_CLIENT_ID         0 /* not used, just for documentation */
-#define MEI_WD_HOST_CLIENT_ID          1
-#define MEI_IAMTHIF_HOST_CLIENT_ID     2
-
-
 /* File state */
 enum file_state {
 	MEI_FILE_INITIALIZING = 0,
@@ -123,12 +94,6 @@
 	MEI_READ_COMPLETE
 };
 
-enum mei_wd_states {
-	MEI_WD_IDLE,
-	MEI_WD_RUNNING,
-	MEI_WD_STOPPING,
-};
-
 /**
  * enum mei_cb_file_ops  - file operation associated with the callback
  * @MEI_FOP_READ:       read
@@ -153,7 +118,7 @@
  * Intel MEI message data struct
  */
 struct mei_msg_data {
-	u32 size;
+	size_t size;
 	unsigned char *data;
 };
 
@@ -206,8 +171,7 @@
  * @fop_type: file operation type
  * @buf: buffer for data associated with the callback
  * @buf_idx: last read index
- * @read_time: last read operation time stamp (iamthif)
- * @file_object: pointer to file structure
+ * @fp: pointer to file structure
  * @status: io status of the cb
  * @internal: communication between driver and FW flag
  * @completed: the transfer or reception has completed
@@ -217,9 +181,8 @@
 	struct mei_cl *cl;
 	enum mei_cb_file_ops fop_type;
 	struct mei_msg_data buf;
-	unsigned long buf_idx;
-	unsigned long read_time;
-	struct file *file_object;
+	size_t buf_idx;
+	const struct file *fp;
 	int status;
 	u32 internal:1;
 	u32 completed:1;
@@ -341,12 +304,13 @@
 
 /* MEI bus API*/
 void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_bus_rescan_work(struct work_struct *work);
 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 			bool blocking);
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
-void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_notify_event(struct mei_cl *cl);
+bool mei_cl_bus_rx_event(struct mei_cl *cl);
+bool mei_cl_bus_notify_event(struct mei_cl *cl);
 void mei_cl_bus_remove_devices(struct mei_device *bus);
 int mei_cl_bus_init(void);
 void mei_cl_bus_exit(void);
@@ -404,7 +368,6 @@
  * @wait_hw_ready : wait queue for receive HW ready message form FW
  * @wait_pg     : wait queue for receive PG message from FW
  * @wait_hbm_start : wait queue for receive HBM start message from FW
- * @wait_stop_wd : wait queue for receive WD stop message from FW
  *
  * @reset_count : number of consecutive resets
  * @dev_state   : device state
@@ -426,6 +389,8 @@
  * @hbm_f_dc_supported  : hbm feature dynamic clients
  * @hbm_f_dot_supported : hbm feature disconnect on timeout
  * @hbm_f_ev_supported  : hbm feature event notification
+ * @hbm_f_fa_supported  : hbm feature fixed address client
+ * @hbm_f_ie_supported  : hbm feature immediate reply to enum request
  *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
@@ -434,26 +399,19 @@
  * @me_client_index : last FW client index in enumeration
  *
  * @allow_fixed_address: allow user space to connect a fixed client
- *
- * @wd_cl       : watchdog client
- * @wd_state    : watchdog client state
- * @wd_pending  : watchdog command is pending
- * @wd_timeout  : watchdog expiration timeout
- * @wd_data     : watchdog message buffer
+ * @override_fixed_address: force allow fixed address behavior
  *
  * @amthif_cmd_list : amthif list for cmd waiting
- * @amthif_rd_complete_list : amthif list for reading completed cmd data
- * @iamthif_file_object : file for current amthif operation
+ * @iamthif_fp : file for current amthif operation
  * @iamthif_cl  : amthif host client
  * @iamthif_current_cb : amthif current operation callback
  * @iamthif_open_count : number of opened amthif connections
- * @iamthif_timer : time stamp of current amthif command completion
  * @iamthif_stall_timer : timer to detect amthif hang
  * @iamthif_state : amthif processor state
  * @iamthif_canceled : current amthif command is canceled
  *
- * @init_work   : work item for the device init
  * @reset_work  : work item for the device reset
+ * @bus_rescan_work : work item for the bus rescan
  *
  * @device_list : mei client bus list
  * @cl_bus_lock : client bus list lock
@@ -486,7 +444,6 @@
 	wait_queue_head_t wait_hw_ready;
 	wait_queue_head_t wait_pg;
 	wait_queue_head_t wait_hbm_start;
-	wait_queue_head_t wait_stop_wd;
 
 	/*
 	 * mei device  states
@@ -522,6 +479,8 @@
 	unsigned int hbm_f_dc_supported:1;
 	unsigned int hbm_f_dot_supported:1;
 	unsigned int hbm_f_ev_supported:1;
+	unsigned int hbm_f_fa_supported:1;
+	unsigned int hbm_f_ie_supported:1;
 
 	struct rw_semaphore me_clients_rwsem;
 	struct list_head me_clients;
@@ -530,29 +489,21 @@
 	unsigned long me_client_index;
 
 	bool allow_fixed_address;
-
-	struct mei_cl wd_cl;
-	enum mei_wd_states wd_state;
-	bool wd_pending;
-	u16 wd_timeout;
-	unsigned char wd_data[MEI_WD_START_MSG_SIZE];
-
+	bool override_fixed_address;
 
 	/* amthif list for cmd waiting */
 	struct mei_cl_cb amthif_cmd_list;
 	/* driver managed amthif list for reading completed amthif cmd data */
-	struct mei_cl_cb amthif_rd_complete_list;
-	struct file *iamthif_file_object;
+	const struct file *iamthif_fp;
 	struct mei_cl iamthif_cl;
 	struct mei_cl_cb *iamthif_current_cb;
 	long iamthif_open_count;
-	unsigned long iamthif_timer;
 	u32 iamthif_stall_timer;
 	enum iamthif_states iamthif_state;
 	bool iamthif_canceled;
 
-	struct work_struct init_work;
 	struct work_struct reset_work;
+	struct work_struct bus_rescan_work;
 
 	/* List of bus devices */
 	struct list_head device_list;
@@ -635,47 +586,18 @@
 
 int mei_amthif_release(struct mei_device *dev, struct file *file);
 
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
-						struct file *file);
-
 int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_amthif_run_next_cmd(struct mei_device *dev);
 int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 			struct mei_cl_cb *cmpl_list);
 
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_amthif_irq_read_msg(struct mei_cl *cl,
 			    struct mei_msg_hdr *mei_hdr,
 			    struct mei_cl_cb *complete_list);
 int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
 
 /*
- * NFC functions
- */
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-void mei_nfc_host_exit(struct mei_device *dev);
-
-/*
- * NFC Client UUID
- */
-extern const uuid_le mei_nfc_guid;
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-/*
- * mei_watchdog_register  - Registering watchdog interface
- *   once we got connection to the WD Client
- * @dev: mei device
- */
-int mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister  - Unregistering watchdog interface
- * @dev: mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-/*
  * Register Access Function
  */
 
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 75fc9c6..996344f 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -210,7 +210,7 @@
 
 	err = mei_register(dev, &pdev->dev);
 	if (err)
-		goto release_irq;
+		goto stop;
 
 	pci_set_drvdata(pdev, dev);
 
@@ -231,6 +231,8 @@
 
 	return 0;
 
+stop:
+	mei_stop(dev);
 release_irq:
 	mei_cancel_work(dev);
 	mei_disable_interrupts(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 71f8a74..30cc306 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -154,7 +154,7 @@
 
 	err = mei_register(dev, &pdev->dev);
 	if (err)
-		goto release_irq;
+		goto stop;
 
 	pci_set_drvdata(pdev, dev);
 
@@ -170,6 +170,8 @@
 
 	return 0;
 
+stop:
+	mei_stop(dev);
 release_irq:
 
 	mei_cancel_work(dev);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
deleted file mode 100644
index b346638..0000000
--- a/drivers/misc/mei/wd.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/watchdog.h>
-
-#include <linux/mei.h>
-
-#include "mei_dev.h"
-#include "hbm.h"
-#include "client.h"
-
-static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
-static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
-
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-
-/* UUIDs for AMT F/W clients */
-const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
-						0x9D, 0xA9, 0x15, 0x14, 0xCB,
-						0x32, 0xAB);
-
-static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
-{
-	dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout);
-	memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE);
-	memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16));
-}
-
-/**
- * mei_wd_host_init - connect to the watchdog client
- *
- * @dev: the device structure
- * @me_cl: me client
- *
- * Return: -ENOTTY if wd client cannot be found
- *         -EIO if write has failed
- *         0 on success
- */
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
-	struct mei_cl *cl = &dev->wd_cl;
-	int ret;
-
-	mei_cl_init(cl, dev);
-
-	dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
-	dev->wd_state = MEI_WD_IDLE;
-
-	ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
-	if (ret < 0) {
-		dev_info(dev->dev, "wd: failed link client\n");
-		return ret;
-	}
-
-	ret = mei_cl_connect(cl, me_cl, NULL);
-	if (ret) {
-		dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
-		mei_cl_unlink(cl);
-		return ret;
-	}
-
-	ret = mei_watchdog_register(dev);
-	if (ret) {
-		mei_cl_disconnect(cl);
-		mei_cl_unlink(cl);
-	}
-	return ret;
-}
-
-/**
- * mei_wd_send - sends watch dog message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success,
- *	-EIO when message send fails
- *	-EINVAL when invalid message is to be sent
- *	-ENODEV on flow control failure
- */
-int mei_wd_send(struct mei_device *dev)
-{
-	struct mei_cl *cl = &dev->wd_cl;
-	struct mei_msg_hdr hdr;
-	int ret;
-
-	hdr.host_addr = cl->host_client_id;
-	hdr.me_addr = mei_cl_me_id(cl);
-	hdr.msg_complete = 1;
-	hdr.reserved = 0;
-	hdr.internal = 0;
-
-	if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
-		hdr.length = MEI_WD_START_MSG_SIZE;
-	else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
-		hdr.length = MEI_WD_STOP_MSG_SIZE;
-	else {
-		dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n");
-		return -EINVAL;
-	}
-
-	ret = mei_write_message(dev, &hdr, dev->wd_data);
-	if (ret) {
-		dev_err(dev->dev, "wd: write message failed\n");
-		return ret;
-	}
-
-	ret = mei_cl_flow_ctrl_reduce(cl);
-	if (ret) {
-		dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-/**
- * mei_wd_stop - sends watchdog stop message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success
- * on error:
- *	-EIO    when message send fails
- *	-EINVAL when invalid message is to be sent
- *	-ETIME  on message timeout
- */
-int mei_wd_stop(struct mei_device *dev)
-{
-	struct mei_cl *cl = &dev->wd_cl;
-	int ret;
-
-	if (!mei_cl_is_connected(cl) ||
-	    dev->wd_state != MEI_WD_RUNNING)
-		return 0;
-
-	memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE);
-
-	dev->wd_state = MEI_WD_STOPPING;
-
-	ret = mei_cl_flow_ctrl_creds(cl);
-	if (ret < 0)
-		goto err;
-
-	if (ret && mei_hbuf_acquire(dev)) {
-		ret = mei_wd_send(dev);
-		if (ret)
-			goto err;
-		dev->wd_pending = false;
-	} else {
-		dev->wd_pending = true;
-	}
-
-	mutex_unlock(&dev->device_lock);
-
-	ret = wait_event_timeout(dev->wait_stop_wd,
-				dev->wd_state == MEI_WD_IDLE,
-				msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
-	mutex_lock(&dev->device_lock);
-	if (dev->wd_state != MEI_WD_IDLE) {
-		/* timeout */
-		ret = -ETIME;
-		dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret);
-		goto err;
-	}
-	dev_dbg(dev->dev, "wd: stop completed after %u msec\n",
-			MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
-	return 0;
-err:
-	return ret;
-}
-
-/**
- * mei_wd_ops_start - wd start command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_start(struct watchdog_device *wd_dev)
-{
-	struct mei_device *dev;
-	struct mei_cl *cl;
-	int err = -ENODEV;
-
-	dev = watchdog_get_drvdata(wd_dev);
-	if (!dev)
-		return -ENODEV;
-
-	cl = &dev->wd_cl;
-
-	mutex_lock(&dev->device_lock);
-
-	if (dev->dev_state != MEI_DEV_ENABLED) {
-		dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED  dev_state = %s\n",
-			mei_dev_state_str(dev->dev_state));
-		goto end_unlock;
-	}
-
-	if (!mei_cl_is_connected(cl)) {
-		cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
-		goto end_unlock;
-	}
-
-	mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
-	err = 0;
-end_unlock:
-	mutex_unlock(&dev->device_lock);
-	return err;
-}
-
-/**
- * mei_wd_ops_stop -  wd stop command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
-{
-	struct mei_device *dev;
-
-	dev = watchdog_get_drvdata(wd_dev);
-	if (!dev)
-		return -ENODEV;
-
-	mutex_lock(&dev->device_lock);
-	mei_wd_stop(dev);
-	mutex_unlock(&dev->device_lock);
-
-	return 0;
-}
-
-/**
- * mei_wd_ops_ping - wd ping command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
-{
-	struct mei_device *dev;
-	struct mei_cl *cl;
-	int ret;
-
-	dev = watchdog_get_drvdata(wd_dev);
-	if (!dev)
-		return -ENODEV;
-
-	cl = &dev->wd_cl;
-
-	mutex_lock(&dev->device_lock);
-
-	if (!mei_cl_is_connected(cl)) {
-		cl_err(dev, cl, "wd: not connected.\n");
-		ret = -ENODEV;
-		goto end;
-	}
-
-	dev->wd_state = MEI_WD_RUNNING;
-
-	ret = mei_cl_flow_ctrl_creds(cl);
-	if (ret < 0)
-		goto end;
-
-	/* Check if we can send the ping to HW*/
-	if (ret && mei_hbuf_acquire(dev)) {
-		dev_dbg(dev->dev, "wd: sending ping\n");
-
-		ret = mei_wd_send(dev);
-		if (ret)
-			goto end;
-		dev->wd_pending = false;
-	} else {
-		dev->wd_pending = true;
-	}
-
-end:
-	mutex_unlock(&dev->device_lock);
-	return ret;
-}
-
-/**
- * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- * @timeout: timeout value to set
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev,
-		unsigned int timeout)
-{
-	struct mei_device *dev;
-
-	dev = watchdog_get_drvdata(wd_dev);
-	if (!dev)
-		return -ENODEV;
-
-	/* Check Timeout value */
-	if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT)
-		return -EINVAL;
-
-	mutex_lock(&dev->device_lock);
-
-	dev->wd_timeout = timeout;
-	wd_dev->timeout = timeout;
-	mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
-	mutex_unlock(&dev->device_lock);
-
-	return 0;
-}
-
-/*
- * Watchdog Device structs
- */
-static const struct watchdog_ops wd_ops = {
-		.owner = THIS_MODULE,
-		.start = mei_wd_ops_start,
-		.stop = mei_wd_ops_stop,
-		.ping = mei_wd_ops_ping,
-		.set_timeout = mei_wd_ops_set_timeout,
-};
-static const struct watchdog_info wd_info = {
-		.identity = INTEL_AMT_WATCHDOG_ID,
-		.options = WDIOF_KEEPALIVEPING |
-			   WDIOF_SETTIMEOUT |
-			   WDIOF_ALARMONLY,
-};
-
-static struct watchdog_device amt_wd_dev = {
-		.info = &wd_info,
-		.ops = &wd_ops,
-		.timeout = MEI_WD_DEFAULT_TIMEOUT,
-		.min_timeout = MEI_WD_MIN_TIMEOUT,
-		.max_timeout = MEI_WD_MAX_TIMEOUT,
-};
-
-
-int mei_watchdog_register(struct mei_device *dev)
-{
-
-	int ret;
-
-	amt_wd_dev.parent = dev->dev;
-	/* unlock to perserve correct locking order */
-	mutex_unlock(&dev->device_lock);
-	ret = watchdog_register_device(&amt_wd_dev);
-	mutex_lock(&dev->device_lock);
-	if (ret) {
-		dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n",
-			ret);
-		return ret;
-	}
-
-	dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n");
-	watchdog_set_drvdata(&amt_wd_dev, dev);
-	return 0;
-}
-
-void mei_watchdog_unregister(struct mei_device *dev)
-{
-	if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
-		return;
-
-	watchdog_set_drvdata(&amt_wd_dev, NULL);
-	watchdog_unregister_device(&amt_wd_dev);
-}
-
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 40677df..2e4f3ba 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -32,12 +32,29 @@
 	  OS and tools for MIC to use with this driver are available from
 	  <http://software.intel.com/en-us/mic-developer>.
 
+comment "VOP Bus Driver"
+
+config VOP_BUS
+	tristate "VOP Bus Driver"
+	depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+	help
+	  This option is selected by any driver which registers a
+	  device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
+	  and CONFIG_INTEL_MIC_CARD.
+
+	  If you are building a host/card kernel with an Intel MIC device
+	  then say M (recommended) or Y, else say N. If unsure say N.
+
+	  More information about the Intel MIC family as well as the Linux
+	  OS and tools for MIC to use with this driver are available from
+	  <http://software.intel.com/en-us/mic-developer>.
+
 comment "Intel MIC Host Driver"
 
 config INTEL_MIC_HOST
 	tristate "Intel MIC Host Driver"
-	depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
-	select VHOST_RING
+	depends on 64BIT && PCI && X86
+	depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
 	help
 	  This enables Host Driver support for the Intel Many Integrated
 	  Core (MIC) family of PCIe form factor coprocessor devices that
@@ -56,7 +73,8 @@
 
 config INTEL_MIC_CARD
 	tristate "Intel MIC Card Driver"
-	depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
+	depends on 64BIT && X86
+	depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
 	select VIRTIO
 	help
 	  This enables card driver support for the Intel Many Integrated
@@ -107,3 +125,23 @@
 	  More information about the Intel MIC family as well as the Linux
 	  OS and tools for MIC to use with this driver are available from
 	  <http://software.intel.com/en-us/mic-developer>.
+
+comment "VOP Driver"
+
+config VOP
+	tristate "VOP Driver"
+	depends on 64BIT && PCI && X86 && VOP_BUS
+	select VHOST_RING
+	help
+	  This enables VOP (Virtio over PCIe) Driver support for the Intel
+	  Many Integrated Core (MIC) family of PCIe form factor coprocessor
+	  devices. The VOP driver allows virtio drivers, e.g. net, console
+	  and block drivers, on the card connect to user space virtio
+	  devices on the host.
+
+	  If you are building a host kernel with an Intel MIC device then
+	  say M (recommended) or Y, else say N. If unsure say N.
+
+	  More information about the Intel MIC family as well as the Linux
+	  OS and tools for MIC to use with this driver are available from
+	  <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index e288a11..f2b1323 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_SCIF) += scif/
 obj-$(CONFIG_MIC_COSM) += cosm/
 obj-$(CONFIG_MIC_COSM) += cosm_client/
+obj-$(CONFIG_VOP) += vop/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index 761842b..8758a7d 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
 obj-$(CONFIG_SCIF_BUS) += scif_bus.o
 obj-$(CONFIG_MIC_COSM) += cosm_bus.o
+obj-$(CONFIG_VOP_BUS) += vop_bus.o
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h
index f7c57f2..8b63418 100644
--- a/drivers/misc/mic/bus/cosm_bus.h
+++ b/drivers/misc/mic/bus/cosm_bus.h
@@ -30,6 +30,7 @@
  * @attr_group: Pointer to list of sysfs attribute groups.
  * @sdev: Device for sysfs entries.
  * @state: MIC state.
+ * @prev_state: MIC state previous to MIC_RESETTING
  * @shutdown_status: MIC status reported by card for shutdown/crashes.
  * @shutdown_status_int: Internal shutdown status maintained by the driver
  * @cosm_mutex: Mutex for synchronizing access to data structures.
@@ -55,6 +56,7 @@
 	const struct attribute_group **attr_group;
 	struct device *sdev;
 	u8 state;
+	u8 prev_state;
 	u8 shutdown_status;
 	u8 shutdown_status_int;
 	struct mutex cosm_mutex;
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c
new file mode 100644
index 0000000..303da22
--- /dev/null
+++ b/drivers/misc/mic/bus/vop_bus.c
@@ -0,0 +1,203 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) Bus driver.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_bus.h"
+
+static ssize_t device_show(struct device *d,
+			   struct device_attribute *attr, char *buf)
+{
+	struct vop_device *dev = dev_to_vop(d);
+
+	return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+			   struct device_attribute *attr, char *buf)
+{
+	struct vop_device *dev = dev_to_vop(d);
+
+	return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+			     struct device_attribute *attr, char *buf)
+{
+	struct vop_device *dev = dev_to_vop(d);
+
+	return sprintf(buf, "vop:d%08Xv%08X\n",
+		       dev->id.device, dev->id.vendor);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *vop_dev_attrs[] = {
+	&dev_attr_device.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vop_dev);
+
+static inline int vop_id_match(const struct vop_device *dev,
+			       const struct vop_device_id *id)
+{
+	if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID)
+		return 0;
+
+	return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support.  If any of them
+ * match, we return 1 and the kernel will call vop_dev_probe().
+ */
+static int vop_dev_match(struct device *dv, struct device_driver *dr)
+{
+	unsigned int i;
+	struct vop_device *dev = dev_to_vop(dv);
+	const struct vop_device_id *ids;
+
+	ids = drv_to_vop(dr)->id_table;
+	for (i = 0; ids[i].device; i++)
+		if (vop_id_match(dev, &ids[i]))
+			return 1;
+	return 0;
+}
+
+static int vop_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+	struct vop_device *dev = dev_to_vop(dv);
+
+	return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X",
+			      dev->id.device, dev->id.vendor);
+}
+
+static int vop_dev_probe(struct device *d)
+{
+	struct vop_device *dev = dev_to_vop(d);
+	struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+	return drv->probe(dev);
+}
+
+static int vop_dev_remove(struct device *d)
+{
+	struct vop_device *dev = dev_to_vop(d);
+	struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+	drv->remove(dev);
+	return 0;
+}
+
+static struct bus_type vop_bus = {
+	.name  = "vop_bus",
+	.match = vop_dev_match,
+	.dev_groups = vop_dev_groups,
+	.uevent = vop_uevent,
+	.probe = vop_dev_probe,
+	.remove = vop_dev_remove,
+};
+
+int vop_register_driver(struct vop_driver *driver)
+{
+	driver->driver.bus = &vop_bus;
+	return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_register_driver);
+
+void vop_unregister_driver(struct vop_driver *driver)
+{
+	driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_driver);
+
+static void vop_release_dev(struct device *d)
+{
+	put_device(d);
+}
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+		    const struct dma_map_ops *dma_ops,
+		    struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+		    struct dma_chan *chan)
+{
+	int ret;
+	struct vop_device *vdev;
+
+	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+	if (!vdev)
+		return ERR_PTR(-ENOMEM);
+
+	vdev->dev.parent = pdev;
+	vdev->id.device = id;
+	vdev->id.vendor = VOP_DEV_ANY_ID;
+	vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops;
+	vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
+	dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
+	vdev->dev.release = vop_release_dev;
+	vdev->hw_ops = hw_ops;
+	vdev->dev.bus = &vop_bus;
+	vdev->dnode = dnode;
+	vdev->aper = aper;
+	vdev->dma_ch = chan;
+	vdev->index = dnode - 1;
+	dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
+	/*
+	 * device_register() causes the bus infrastructure to look for a
+	 * matching driver.
+	 */
+	ret = device_register(&vdev->dev);
+	if (ret)
+		goto free_vdev;
+	return vdev;
+free_vdev:
+	kfree(vdev);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vop_register_device);
+
+void vop_unregister_device(struct vop_device *dev)
+{
+	device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_device);
+
+static int __init vop_init(void)
+{
+	return bus_register(&vop_bus);
+}
+
+static void __exit vop_exit(void)
+{
+	bus_unregister(&vop_bus);
+}
+
+core_initcall(vop_init);
+module_exit(vop_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) VOP Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
new file mode 100644
index 0000000..fff7a86
--- /dev/null
+++ b/drivers/misc/mic/bus/vop_bus.h
@@ -0,0 +1,140 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio over PCIe Bus driver.
+ */
+#ifndef _VOP_BUS_H_
+#define _VOP_BUS_H_
+/*
+ * Everything a vop driver needs to work with any particular vop
+ * implementation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "../common/mic_dev.h"
+
+struct vop_device_id {
+	u32 device;
+	u32 vendor;
+};
+
+#define VOP_DEV_TRNSP 1
+#define VOP_DEV_ANY_ID 0xffffffff
+/*
+ * Size of the internal buffer used during DMA's as an intermediate buffer
+ * for copy to/from user. Must be an integral number of pages.
+ */
+#define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
+
+/**
+ * vop_device - representation of a device using vop
+ * @hw_ops: the hardware ops supported by this device.
+ * @id: the device type identification (used to match it with a driver).
+ * @dev: underlying device.
+ * @dnode - The destination node which this device will communicate with.
+ * @aper: Aperture memory window
+ * @dma_ch - DMA channel
+ * @index: unique position on the vop bus
+ */
+struct vop_device {
+	struct vop_hw_ops *hw_ops;
+	struct vop_device_id id;
+	struct device dev;
+	u8 dnode;
+	struct mic_mw *aper;
+	struct dma_chan *dma_ch;
+	int index;
+};
+
+/**
+ * vop_driver - operations for a vop I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found.  Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct vop_driver {
+	struct device_driver driver;
+	const struct vop_device_id *id_table;
+	int (*probe)(struct vop_device *dev);
+	void (*remove)(struct vop_device *dev);
+};
+
+/**
+ * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus.
+ *
+ * @next_db: Obtain the next available doorbell.
+ * @request_irq: Request an interrupt on a particular doorbell.
+ * @free_irq: Free an interrupt requested previously.
+ * @ack_interrupt: acknowledge an interrupt in the ISR.
+ * @get_remote_dp: Get access to the virtio device page used by the remote
+ *                 node to add/remove/configure virtio devices.
+ * @get_dp: Get access to the virtio device page used by the self
+ *          node to add/remove/configure virtio devices.
+ * @send_intr: Send an interrupt to the peer node on a specified doorbell.
+ * @ioremap: Map a buffer with the specified DMA address and length.
+ * @iounmap: Unmap a buffer previously mapped.
+ * @dma_filter: The DMA filter function to use for obtaining access to
+ *		a DMA channel on the peer node.
+ */
+struct vop_hw_ops {
+	int (*next_db)(struct vop_device *vpdev);
+	struct mic_irq *(*request_irq)(struct vop_device *vpdev,
+				       irqreturn_t (*func)(int irq, void *data),
+				       const char *name, void *data,
+				       int intr_src);
+	void (*free_irq)(struct vop_device *vpdev,
+			 struct mic_irq *cookie, void *data);
+	void (*ack_interrupt)(struct vop_device *vpdev, int num);
+	void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
+	void * (*get_dp)(struct vop_device *vpdev);
+	void (*send_intr)(struct vop_device *vpdev, int db);
+	void __iomem * (*ioremap)(struct vop_device *vpdev,
+				  dma_addr_t pa, size_t len);
+	void (*iounmap)(struct vop_device *vpdev, void __iomem *va);
+};
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+		    const struct dma_map_ops *dma_ops,
+		    struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+		    struct dma_chan *chan);
+void vop_unregister_device(struct vop_device *dev);
+int vop_register_driver(struct vop_driver *drv);
+void vop_unregister_driver(struct vop_driver *drv);
+
+/*
+ * module_vop_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_vop_driver(__vop_driver) \
+	module_driver(__vop_driver, vop_register_driver, \
+			vop_unregister_driver)
+
+static inline struct vop_device *dev_to_vop(struct device *dev)
+{
+	return container_of(dev, struct vop_device, dev);
+}
+
+static inline struct vop_driver *drv_to_vop(struct device_driver *drv)
+{
+	return container_of(drv, struct vop_driver, driver);
+}
+#endif /* _VOP_BUS_H */
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
index 69d58be..6e9675e 100644
--- a/drivers/misc/mic/card/Makefile
+++ b/drivers/misc/mic/card/Makefile
@@ -8,4 +8,3 @@
 mic_card-y += mic_x100.o
 mic_card-y += mic_device.o
 mic_card-y += mic_debugfs.o
-mic_card-y += mic_virtio.o
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index d0edaf7..e749af48 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -34,7 +34,6 @@
 #include <linux/mic_common.h>
 #include "../common/mic_dev.h"
 #include "mic_device.h"
-#include "mic_virtio.h"
 
 static struct mic_driver *g_drv;
 
@@ -250,12 +249,82 @@
 	.iounmap = ___mic_iounmap,
 };
 
+static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
+{
+	return dev_get_drvdata(vpdev->dev.parent);
+}
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+		  irqreturn_t (*func)(int irq, void *data),
+		   const char *name, void *data, int intr_src)
+{
+	return mic_request_card_irq(func, NULL, name, data, intr_src);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+			   struct mic_irq *cookie, void *data)
+{
+	return mic_free_card_irq(cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+	struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+	mic_ack_interrupt(&mdrv->mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+	return mic_next_card_db();
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+	struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+	return mdrv->dp;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+	struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+	mic_send_intr(&mdrv->mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+				   dma_addr_t pa, size_t len)
+{
+	struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+	return mic_card_map(&mdrv->mdev, pa, len);
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+	struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+	mic_card_unmap(&mdrv->mdev, va);
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+	.request_irq = __mic_request_irq,
+	.free_irq = __mic_free_irq,
+	.ack_interrupt = __mic_ack_interrupt,
+	.next_db = __mic_next_db,
+	.get_remote_dp = __mic_get_remote_dp,
+	.send_intr = __mic_send_intr,
+	.ioremap = __mic_ioremap,
+	.iounmap = __mic_iounmap,
+};
+
 static int mic_request_dma_chans(struct mic_driver *mdrv)
 {
 	dma_cap_mask_t mask;
 	struct dma_chan *chan;
 
-	request_module("mic_x100_dma");
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_MEMCPY, mask);
 
@@ -309,9 +378,13 @@
 		rc = -ENODEV;
 		goto irq_uninit;
 	}
-	rc = mic_devices_init(mdrv);
-	if (rc)
+	mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP,
+					  NULL, &vop_hw_ops, 0,
+					  NULL, mdrv->dma_ch[0]);
+	if (IS_ERR(mdrv->vpdev)) {
+		rc = PTR_ERR(mdrv->vpdev);
 		goto dma_free;
+	}
 	bootparam = mdrv->dp;
 	node_id = ioread8(&bootparam->node_id);
 	mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
@@ -321,13 +394,13 @@
 					   mdrv->num_dma_ch, true);
 	if (IS_ERR(mdrv->scdev)) {
 		rc = PTR_ERR(mdrv->scdev);
-		goto device_uninit;
+		goto vop_remove;
 	}
 	mic_create_card_debug_dir(mdrv);
 done:
 	return rc;
-device_uninit:
-	mic_devices_uninit(mdrv);
+vop_remove:
+	vop_unregister_device(mdrv->vpdev);
 dma_free:
 	mic_free_dma_chans(mdrv);
 irq_uninit:
@@ -348,7 +421,7 @@
 {
 	mic_delete_card_debug_dir(mdrv);
 	scif_unregister_device(mdrv->scdev);
-	mic_devices_uninit(mdrv);
+	vop_unregister_device(mdrv->vpdev);
 	mic_free_dma_chans(mdrv);
 	mic_uninit_irq();
 	mic_dp_uninit();
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 1dbf83c..333dbed 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -32,6 +32,7 @@
 #include <linux/interrupt.h>
 #include <linux/mic_bus.h>
 #include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
 
 /**
  * struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -76,6 +77,7 @@
  * @dma_ch - Array of DMA channels
  * @num_dma_ch - Number of DMA channels available
  * @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
  */
 struct mic_driver {
 	char name[20];
@@ -90,6 +92,7 @@
 	struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
 	int num_dma_ch;
 	struct scif_hw_dev *scdev;
+	struct vop_device *vpdev;
 };
 
 /**
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
deleted file mode 100644
index f6ed57d..0000000
--- a/drivers/misc/mic/card/mic_virtio.c
+++ /dev/null
@@ -1,634 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Adapted from:
- *
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * Intel MIC Card driver.
- *
- */
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/virtio_config.h>
-
-#include "../common/mic_dev.h"
-#include "mic_virtio.h"
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-#define MIC_MAX_VRINGS                4
-struct mic_vdev {
-	struct virtio_device vdev;
-	struct mic_device_desc __iomem *desc;
-	struct mic_device_ctrl __iomem *dc;
-	struct mic_device *mdev;
-	void __iomem *vr[MIC_MAX_VRINGS];
-	int used_size[MIC_MAX_VRINGS];
-	struct completion reset_done;
-	struct mic_irq *virtio_cookie;
-	int c2h_vdev_db;
-};
-
-static struct mic_irq *virtio_config_cookie;
-#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev)
-
-/* Helper API to obtain the parent of the virtio device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
-	return mvdev->vdev.dev.parent;
-}
-
-/* This gets the device's feature bits. */
-static u64 mic_get_features(struct virtio_device *vdev)
-{
-	unsigned int i, bits;
-	u32 features = 0;
-	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-	u8 __iomem *in_features = mic_vq_features(desc);
-	int feature_len = ioread8(&desc->feature_len);
-
-	bits = min_t(unsigned, feature_len, sizeof(features)) * 8;
-	for (i = 0; i < bits; i++)
-		if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
-			features |= BIT(i);
-
-	return features;
-}
-
-static int mic_finalize_features(struct virtio_device *vdev)
-{
-	unsigned int i, bits;
-	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-	u8 feature_len = ioread8(&desc->feature_len);
-	/* Second half of bitmap is features we accept. */
-	u8 __iomem *out_features =
-		mic_vq_features(desc) + feature_len;
-
-	/* Give virtio_ring a chance to accept features. */
-	vring_transport_features(vdev);
-
-	/* Make sure we don't have any features > 32 bits! */
-	BUG_ON((u32)vdev->features != vdev->features);
-
-	memset_io(out_features, 0, feature_len);
-	bits = min_t(unsigned, feature_len,
-		sizeof(vdev->features)) * 8;
-	for (i = 0; i < bits; i++) {
-		if (__virtio_test_bit(vdev, i))
-			iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
-				 &out_features[i / 8]);
-	}
-
-	return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void mic_get(struct virtio_device *vdev, unsigned int offset,
-		   void *buf, unsigned len)
-{
-	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
-	if (offset + len > ioread8(&desc->config_len))
-		return;
-	memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len);
-}
-
-static void mic_set(struct virtio_device *vdev, unsigned int offset,
-		   const void *buf, unsigned len)
-{
-	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
-	if (offset + len > ioread8(&desc->config_len))
-		return;
-	memcpy_toio(mic_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status
- * field of the device descriptor. set_status also interrupts the host
- * to tell about status changes.
- */
-static u8 mic_get_status(struct virtio_device *vdev)
-{
-	return ioread8(&to_micvdev(vdev)->desc->status);
-}
-
-static void mic_set_status(struct virtio_device *vdev, u8 status)
-{
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-	if (!status)
-		return;
-	iowrite8(status, &mvdev->desc->status);
-	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-}
-
-/* Inform host on a virtio device reset and wait for ack from host */
-static void mic_reset_inform_host(struct virtio_device *vdev)
-{
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-	struct mic_device_ctrl __iomem *dc = mvdev->dc;
-	int retry;
-
-	iowrite8(0, &dc->host_ack);
-	iowrite8(1, &dc->vdev_reset);
-	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-
-	/* Wait till host completes all card accesses and acks the reset */
-	for (retry = 100; retry--;) {
-		if (ioread8(&dc->host_ack))
-			break;
-		msleep(100);
-	};
-
-	dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
-
-	/* Reset status to 0 in case we timed out */
-	iowrite8(0, &mvdev->desc->status);
-}
-
-static void mic_reset(struct virtio_device *vdev)
-{
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-
-	dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n",
-		__func__, vdev->id.device);
-
-	mic_reset_inform_host(vdev);
-	complete_all(&mvdev->reset_done);
-}
-
-/*
- * The virtio_ring code calls this API when it wants to notify the Host.
- */
-static bool mic_notify(struct virtqueue *vq)
-{
-	struct mic_vdev *mvdev = vq->priv;
-
-	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-	return true;
-}
-
-static void mic_del_vq(struct virtqueue *vq, int n)
-{
-	struct mic_vdev *mvdev = to_micvdev(vq->vdev);
-	struct vring *vr = (struct vring *)(vq + 1);
-
-	free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n]));
-	vring_del_virtqueue(vq);
-	mic_card_unmap(mvdev->mdev, mvdev->vr[n]);
-	mvdev->vr[n] = NULL;
-}
-
-static void mic_del_vqs(struct virtio_device *vdev)
-{
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-	struct virtqueue *vq, *n;
-	int idx = 0;
-
-	dev_dbg(mic_dev(mvdev), "%s\n", __func__);
-
-	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
-		mic_del_vq(vq, idx++);
-}
-
-/*
- * This routine will assign vring's allocated in host/io memory. Code in
- * virtio_ring.c however continues to access this io memory as if it were local
- * memory without io accessors.
- */
-static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
-				     unsigned index,
-				     void (*callback)(struct virtqueue *vq),
-				     const char *name)
-{
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-	struct mic_vqconfig __iomem *vqconfig;
-	struct mic_vqconfig config;
-	struct virtqueue *vq;
-	void __iomem *va;
-	struct _mic_vring_info __iomem *info;
-	void *used;
-	int vr_size, _vr_size, err, magic;
-	struct vring *vr;
-	u8 type = ioread8(&mvdev->desc->type);
-
-	if (index >= ioread8(&mvdev->desc->num_vq))
-		return ERR_PTR(-ENOENT);
-
-	if (!name)
-		return ERR_PTR(-ENOENT);
-
-	/* First assign the vring's allocated in host memory */
-	vqconfig = mic_vq_config(mvdev->desc) + index;
-	memcpy_fromio(&config, vqconfig, sizeof(config));
-	_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
-	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
-	va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
-	if (!va)
-		return ERR_PTR(-ENOMEM);
-	mvdev->vr[index] = va;
-	memset_io(va, 0x0, _vr_size);
-	vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
-				 MIC_VIRTIO_RING_ALIGN, vdev, false,
-				 (void __force *)va, mic_notify, callback,
-				 name);
-	if (!vq) {
-		err = -ENOMEM;
-		goto unmap;
-	}
-	info = va + _vr_size;
-	magic = ioread32(&info->magic);
-
-	if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
-		err = -EIO;
-		goto unmap;
-	}
-
-	/* Allocate and reassign used ring now */
-	mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
-					     sizeof(struct vring_used_elem) *
-					     le16_to_cpu(config.num));
-	used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-					get_order(mvdev->used_size[index]));
-	if (!used) {
-		err = -ENOMEM;
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, err);
-		goto del_vq;
-	}
-	iowrite64(virt_to_phys(used), &vqconfig->used_address);
-
-	/*
-	 * To reassign the used ring here we are directly accessing
-	 * struct vring_virtqueue which is a private data structure
-	 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
-	 * vring_new_virtqueue() would ensure that
-	 *  (&vq->vring == (struct vring *) (&vq->vq + 1));
-	 */
-	vr = (struct vring *)(vq + 1);
-	vr->used = used;
-
-	vq->priv = mvdev;
-	return vq;
-del_vq:
-	vring_del_virtqueue(vq);
-unmap:
-	mic_card_unmap(mvdev->mdev, mvdev->vr[index]);
-	return ERR_PTR(err);
-}
-
-static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
-			struct virtqueue *vqs[],
-			vq_callback_t *callbacks[],
-			const char * const names[])
-{
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-	struct mic_device_ctrl __iomem *dc = mvdev->dc;
-	int i, err, retry;
-
-	/* We must have this many virtqueues. */
-	if (nvqs > ioread8(&mvdev->desc->num_vq))
-		return -ENOENT;
-
-	for (i = 0; i < nvqs; ++i) {
-		dev_dbg(mic_dev(mvdev), "%s: %d: %s\n",
-			__func__, i, names[i]);
-		vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]);
-		if (IS_ERR(vqs[i])) {
-			err = PTR_ERR(vqs[i]);
-			goto error;
-		}
-	}
-
-	iowrite8(1, &dc->used_address_updated);
-	/*
-	 * Send an interrupt to the host to inform it that used
-	 * rings have been re-assigned.
-	 */
-	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-	for (retry = 100; retry--;) {
-		if (!ioread8(&dc->used_address_updated))
-			break;
-		msleep(100);
-	};
-
-	dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
-	if (!retry) {
-		err = -ENODEV;
-		goto error;
-	}
-
-	return 0;
-error:
-	mic_del_vqs(vdev);
-	return err;
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static struct virtio_config_ops mic_vq_config_ops = {
-	.get_features = mic_get_features,
-	.finalize_features = mic_finalize_features,
-	.get = mic_get,
-	.set = mic_set,
-	.get_status = mic_get_status,
-	.set_status = mic_set_status,
-	.reset = mic_reset,
-	.find_vqs = mic_find_vqs,
-	.del_vqs = mic_del_vqs,
-};
-
-static irqreturn_t
-mic_virtio_intr_handler(int irq, void *data)
-{
-	struct mic_vdev *mvdev = data;
-	struct virtqueue *vq;
-
-	mic_ack_interrupt(mvdev->mdev);
-	list_for_each_entry(vq, &mvdev->vdev.vqs, list)
-		vring_interrupt(0, vq);
-
-	return IRQ_HANDLED;
-}
-
-static void mic_virtio_release_dev(struct device *_d)
-{
-	/*
-	 * No need for a release method similar to virtio PCI.
-	 * Provide an empty one to avoid getting a warning from core.
-	 */
-}
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static int mic_add_device(struct mic_device_desc __iomem *d,
-	unsigned int offset, struct mic_driver *mdrv)
-{
-	struct mic_vdev *mvdev;
-	int ret;
-	int virtio_db;
-	u8 type = ioread8(&d->type);
-
-	mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
-	if (!mvdev) {
-		dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n",
-			offset, type);
-		return -ENOMEM;
-	}
-
-	mvdev->mdev = &mdrv->mdev;
-	mvdev->vdev.dev.parent = mdrv->dev;
-	mvdev->vdev.dev.release = mic_virtio_release_dev;
-	mvdev->vdev.id.device = type;
-	mvdev->vdev.config = &mic_vq_config_ops;
-	mvdev->desc = d;
-	mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d);
-	init_completion(&mvdev->reset_done);
-
-	virtio_db = mic_next_card_db();
-	mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
-			NULL, "virtio intr", mvdev, virtio_db);
-	if (IS_ERR(mvdev->virtio_cookie)) {
-		ret = PTR_ERR(mvdev->virtio_cookie);
-		goto kfree;
-	}
-	iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db);
-	mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db);
-
-	ret = register_virtio_device(&mvdev->vdev);
-	if (ret) {
-		dev_err(mic_dev(mvdev),
-			"Failed to register mic device %u type %u\n",
-			offset, type);
-		goto free_irq;
-	}
-	iowrite64((u64)mvdev, &mvdev->dc->vdev);
-	dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n",
-		__func__, offset, type, mvdev);
-
-	return 0;
-
-free_irq:
-	mic_free_card_irq(mvdev->virtio_cookie, mvdev);
-kfree:
-	kfree(mvdev);
-	return ret;
-}
-
-/*
- * match for a mic device with a specific desc pointer
- */
-static int mic_match_desc(struct device *dev, void *data)
-{
-	struct virtio_device *vdev = dev_to_virtio(dev);
-	struct mic_vdev *mvdev = to_micvdev(vdev);
-
-	return mvdev->desc == (void __iomem *)data;
-}
-
-static void mic_handle_config_change(struct mic_device_desc __iomem *d,
-	unsigned int offset, struct mic_driver *mdrv)
-{
-	struct mic_device_ctrl __iomem *dc
-		= (void __iomem *)d + mic_aligned_desc_size(d);
-	struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
-
-	if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
-		return;
-
-	dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__);
-	virtio_config_changed(&mvdev->vdev);
-	iowrite8(1, &dc->guest_ack);
-}
-
-/*
- * removes a virtio device if a hot remove event has been
- * requested by the host.
- */
-static int mic_remove_device(struct mic_device_desc __iomem *d,
-	unsigned int offset, struct mic_driver *mdrv)
-{
-	struct mic_device_ctrl __iomem *dc
-		= (void __iomem *)d + mic_aligned_desc_size(d);
-	struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
-	u8 status;
-	int ret = -1;
-
-	if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
-		dev_dbg(mdrv->dev,
-			"%s %d config_change %d type %d mvdev %p\n",
-			__func__, __LINE__,
-			ioread8(&dc->config_change), ioread8(&d->type), mvdev);
-
-		status = ioread8(&d->status);
-		reinit_completion(&mvdev->reset_done);
-		unregister_virtio_device(&mvdev->vdev);
-		mic_free_card_irq(mvdev->virtio_cookie, mvdev);
-		if (status & VIRTIO_CONFIG_S_DRIVER_OK)
-			wait_for_completion(&mvdev->reset_done);
-		kfree(mvdev);
-		iowrite8(1, &dc->guest_ack);
-		dev_dbg(mdrv->dev, "%s %d guest_ack %d\n",
-			__func__, __LINE__, ioread8(&dc->guest_ack));
-		ret = 0;
-	}
-
-	return ret;
-}
-
-#define REMOVE_DEVICES true
-
-static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
-{
-	s8 type;
-	unsigned int i;
-	struct mic_device_desc __iomem *d;
-	struct mic_device_ctrl __iomem *dc;
-	struct device *dev;
-	int ret;
-
-	for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
-		i += mic_total_desc_size(d)) {
-		d = mdrv->dp + i;
-		dc = (void __iomem *)d + mic_aligned_desc_size(d);
-		/*
-		 * This read barrier is paired with the corresponding write
-		 * barrier on the host which is inserted before adding or
-		 * removing a virtio device descriptor, by updating the type.
-		 */
-		rmb();
-		type = ioread8(&d->type);
-
-		/* end of list */
-		if (type == 0)
-			break;
-
-		if (type == -1)
-			continue;
-
-		/* device already exists */
-		dev = device_find_child(mdrv->dev, (void __force *)d,
-					mic_match_desc);
-		if (dev) {
-			if (remove)
-				iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
-					 &dc->config_change);
-			put_device(dev);
-			mic_handle_config_change(d, i, mdrv);
-			ret = mic_remove_device(d, i, mdrv);
-			if (!ret && !remove)
-				iowrite8(-1, &d->type);
-			if (remove) {
-				iowrite8(0, &dc->config_change);
-				iowrite8(0, &dc->guest_ack);
-			}
-			continue;
-		}
-
-		/* new device */
-		dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n",
-			__func__, __LINE__, d);
-		if (!remove)
-			mic_add_device(d, i, mdrv);
-	}
-}
-
-/*
- * mic_hotplug_device tries to find changes in the device page.
- */
-static void mic_hotplug_devices(struct work_struct *work)
-{
-	struct mic_driver *mdrv = container_of(work,
-		struct mic_driver, hotplug_work);
-
-	mic_scan_devices(mdrv, !REMOVE_DEVICES);
-}
-
-/*
- * Interrupt handler for hot plug/config changes etc.
- */
-static irqreturn_t
-mic_extint_handler(int irq, void *data)
-{
-	struct mic_driver *mdrv = (struct mic_driver *)data;
-
-	dev_dbg(mdrv->dev, "%s %d hotplug work\n",
-		__func__, __LINE__);
-	mic_ack_interrupt(&mdrv->mdev);
-	schedule_work(&mdrv->hotplug_work);
-	return IRQ_HANDLED;
-}
-
-/*
- * Init function for virtio
- */
-int mic_devices_init(struct mic_driver *mdrv)
-{
-	int rc;
-	struct mic_bootparam __iomem *bootparam;
-	int config_db;
-
-	INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices);
-	mic_scan_devices(mdrv, !REMOVE_DEVICES);
-
-	config_db = mic_next_card_db();
-	virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL,
-						    "virtio_config_intr", mdrv,
-						    config_db);
-	if (IS_ERR(virtio_config_cookie)) {
-		rc = PTR_ERR(virtio_config_cookie);
-		goto exit;
-	}
-
-	bootparam = mdrv->dp;
-	iowrite8(config_db, &bootparam->h2c_config_db);
-	return 0;
-exit:
-	return rc;
-}
-
-/*
- * Uninit function for virtio
- */
-void mic_devices_uninit(struct mic_driver *mdrv)
-{
-	struct mic_bootparam __iomem *bootparam = mdrv->dp;
-	iowrite8(-1, &bootparam->h2c_config_db);
-	mic_free_card_irq(virtio_config_cookie, mdrv);
-	flush_work(&mdrv->hotplug_work);
-	mic_scan_devices(mdrv, REMOVE_DEVICES);
-}
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
deleted file mode 100644
index d0407ba..0000000
--- a/drivers/misc/mic/card/mic_virtio.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- *
- */
-#ifndef __MIC_CARD_VIRTIO_H
-#define __MIC_CARD_VIRTIO_H
-
-#include <linux/mic_common.h>
-#include "mic_device.h"
-
-/*
- * 64 bit I/O access
- */
-#ifndef ioread64
-#define ioread64 readq
-#endif
-#ifndef iowrite64
-#define iowrite64 writeq
-#endif
-
-static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
-{
-	return sizeof(*desc)
-		+ ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
-		+ ioread8(&desc->feature_len) * 2
-		+ ioread8(&desc->config_len);
-}
-
-static inline struct mic_vqconfig __iomem *
-mic_vq_config(struct mic_device_desc __iomem *desc)
-{
-	return (struct mic_vqconfig __iomem *)(desc + 1);
-}
-
-static inline __u8 __iomem *
-mic_vq_features(struct mic_device_desc __iomem *desc)
-{
-	return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq));
-}
-
-static inline __u8 __iomem *
-mic_vq_configspace(struct mic_device_desc __iomem *desc)
-{
-	return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2;
-}
-static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
-{
-	return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-
-int mic_devices_init(struct mic_driver *mdrv);
-void mic_devices_uninit(struct mic_driver *mdrv);
-
-#endif
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index b2958ce..b9f0710 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -326,6 +326,7 @@
 		goto done;
 	}
 
+	request_module("mic_x100_dma");
 	mic_init_card_debugfs();
 	ret = platform_device_register(&mic_platform_dev);
 	if (ret) {
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
index 4b4b356..7005cb1 100644
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -153,8 +153,10 @@
 		 * stop(..) calls device_unregister and will crash the system if
 		 * called multiple times.
 		 */
-		bool call_hw_ops = cdev->state != MIC_RESET_FAILED &&
-					cdev->state != MIC_READY;
+		u8 state = cdev->state == MIC_RESETTING ?
+					cdev->prev_state : cdev->state;
+		bool call_hw_ops = state != MIC_RESET_FAILED &&
+					state != MIC_READY;
 
 		if (cdev->state != MIC_RESETTING)
 			cosm_set_state(cdev, MIC_RESETTING);
@@ -195,8 +197,11 @@
 
 	mutex_lock(&cdev->cosm_mutex);
 	if (cdev->state != MIC_READY) {
-		cosm_set_state(cdev, MIC_RESETTING);
-		schedule_work(&cdev->reset_trigger_work);
+		if (cdev->state != MIC_RESETTING) {
+			cdev->prev_state = cdev->state;
+			cosm_set_state(cdev, MIC_RESETTING);
+			schedule_work(&cdev->reset_trigger_work);
+		}
 	} else {
 		dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
 		rc = -EINVAL;
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
index 004d3db..f3b5023 100644
--- a/drivers/misc/mic/host/Makefile
+++ b/drivers/misc/mic/host/Makefile
@@ -9,5 +9,3 @@
 mic_host-objs += mic_intr.o
 mic_host-objs += mic_boot.o
 mic_host-objs += mic_debugfs.o
-mic_host-objs += mic_fops.o
-mic_host-objs += mic_virtio.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 7845564..8c91c99 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -25,10 +25,117 @@
 #include <linux/mic_common.h>
 #include <linux/mic_bus.h>
 #include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 #include "mic_smpt.h"
-#include "mic_virtio.h"
+
+static inline struct mic_device *vpdev_to_mdev(struct device *dev)
+{
+	return dev_get_drvdata(dev->parent);
+}
+
+static dma_addr_t
+_mic_dma_map_page(struct device *dev, struct page *page,
+		  unsigned long offset, size_t size,
+		  enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	void *va = phys_to_virt(page_to_phys(page)) + offset;
+	struct mic_device *mdev = vpdev_to_mdev(dev);
+
+	return mic_map_single(mdev, va, size);
+}
+
+static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+				size_t size, enum dma_data_direction dir,
+				struct dma_attrs *attrs)
+{
+	struct mic_device *mdev = vpdev_to_mdev(dev);
+
+	mic_unmap_single(mdev, dma_addr, size);
+}
+
+static const struct dma_map_ops _mic_dma_ops = {
+	.map_page = _mic_dma_map_page,
+	.unmap_page = _mic_dma_unmap_page,
+};
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+		  irqreturn_t (*func)(int irq, void *data),
+		  const char *name, void *data, int intr_src)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	return mic_request_threaded_irq(mdev, func, NULL, name, data,
+					intr_src, MIC_INTR_DB);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+			   struct mic_irq *cookie, void *data)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	return mic_free_irq(mdev, cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	mdev->ops->intr_workarounds(mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	return mic_next_db(mdev);
+}
+
+static void *__mic_get_dp(struct vop_device *vpdev)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	return mdev->dp;
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+	return NULL;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	mdev->ops->send_intr(mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+				   dma_addr_t pa, size_t len)
+{
+	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+	return mdev->aper.va + pa;
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+	/* nothing to do */
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+	.request_irq = __mic_request_irq,
+	.free_irq = __mic_free_irq,
+	.ack_interrupt = __mic_ack_interrupt,
+	.next_db = __mic_next_db,
+	.get_dp = __mic_get_dp,
+	.get_remote_dp = __mic_get_remote_dp,
+	.send_intr = __mic_send_intr,
+	.ioremap = __mic_ioremap,
+	.iounmap = __mic_iounmap,
+};
 
 static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
 {
@@ -315,7 +422,6 @@
 	dma_cap_mask_t mask;
 	struct dma_chan *chan;
 
-	request_module("mic_x100_dma");
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_MEMCPY, mask);
 
@@ -387,9 +493,18 @@
 		goto dma_free;
 	}
 
+	mdev->vpdev = vop_register_device(&mdev->pdev->dev,
+					  VOP_DEV_TRNSP, &_mic_dma_ops,
+					  &vop_hw_ops, id + 1, &mdev->aper,
+					  mdev->dma_ch[0]);
+	if (IS_ERR(mdev->vpdev)) {
+		rc = PTR_ERR(mdev->vpdev);
+		goto scif_remove;
+	}
+
 	rc = mdev->ops->load_mic_fw(mdev, NULL);
 	if (rc)
-		goto scif_remove;
+		goto vop_remove;
 	mic_smpt_restore(mdev);
 	mic_intr_restore(mdev);
 	mdev->intr_ops->enable_interrupts(mdev);
@@ -397,6 +512,8 @@
 	mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
 	mdev->ops->send_firmware_intr(mdev);
 	goto unlock_ret;
+vop_remove:
+	vop_unregister_device(mdev->vpdev);
 scif_remove:
 	scif_unregister_device(mdev->scdev);
 dma_free:
@@ -423,7 +540,7 @@
 	 * will be the first to be registered and the last to be
 	 * unregistered.
 	 */
-	mic_virtio_reset_devices(mdev);
+	vop_unregister_device(mdev->vpdev);
 	scif_unregister_device(mdev->scdev);
 	mic_free_dma_chans(mdev);
 	mbus_unregister_device(mdev->dma_mbdev);
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 1058160..0a9daba 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -26,7 +26,6 @@
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 #include "mic_smpt.h"
-#include "mic_virtio.h"
 
 /* Debugfs parent dir */
 static struct dentry *mic_dbg;
@@ -100,190 +99,6 @@
 	.release = mic_post_code_debug_release
 };
 
-static int mic_dp_show(struct seq_file *s, void *pos)
-{
-	struct mic_device *mdev = s->private;
-	struct mic_device_desc *d;
-	struct mic_device_ctrl *dc;
-	struct mic_vqconfig *vqconfig;
-	__u32 *features;
-	__u8 *config;
-	struct mic_bootparam *bootparam = mdev->dp;
-	int i, j;
-
-	seq_printf(s, "Bootparam: magic 0x%x\n",
-		   bootparam->magic);
-	seq_printf(s, "Bootparam: h2c_config_db %d\n",
-		   bootparam->h2c_config_db);
-	seq_printf(s, "Bootparam: node_id %d\n",
-		   bootparam->node_id);
-	seq_printf(s, "Bootparam: c2h_scif_db %d\n",
-		   bootparam->c2h_scif_db);
-	seq_printf(s, "Bootparam: h2c_scif_db %d\n",
-		   bootparam->h2c_scif_db);
-	seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
-		   bootparam->scif_host_dma_addr);
-	seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
-		   bootparam->scif_card_dma_addr);
-
-
-	for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
-	     i += mic_total_desc_size(d)) {
-		d = mdev->dp + i;
-		dc = (void *)d + mic_aligned_desc_size(d);
-
-		/* end of list */
-		if (d->type == 0)
-			break;
-
-		if (d->type == -1)
-			continue;
-
-		seq_printf(s, "Type %d ", d->type);
-		seq_printf(s, "Num VQ %d ", d->num_vq);
-		seq_printf(s, "Feature Len %d\n", d->feature_len);
-		seq_printf(s, "Config Len %d ", d->config_len);
-		seq_printf(s, "Shutdown Status %d\n", d->status);
-
-		for (j = 0; j < d->num_vq; j++) {
-			vqconfig = mic_vq_config(d) + j;
-			seq_printf(s, "vqconfig[%d]: ", j);
-			seq_printf(s, "address 0x%llx ", vqconfig->address);
-			seq_printf(s, "num %d ", vqconfig->num);
-			seq_printf(s, "used address 0x%llx\n",
-				   vqconfig->used_address);
-		}
-
-		features = (__u32 *)mic_vq_features(d);
-		seq_printf(s, "Features: Host 0x%x ", features[0]);
-		seq_printf(s, "Guest 0x%x\n", features[1]);
-
-		config = mic_vq_configspace(d);
-		for (j = 0; j < d->config_len; j++)
-			seq_printf(s, "config[%d]=%d\n", j, config[j]);
-
-		seq_puts(s, "Device control:\n");
-		seq_printf(s, "Config Change %d ", dc->config_change);
-		seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
-		seq_printf(s, "Guest Ack %d ", dc->guest_ack);
-		seq_printf(s, "Host ack %d\n", dc->host_ack);
-		seq_printf(s, "Used address updated %d ",
-			   dc->used_address_updated);
-		seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
-		seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
-		seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
-	}
-
-	return 0;
-}
-
-static int mic_dp_debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mic_dp_show, inode->i_private);
-}
-
-static int mic_dp_debug_release(struct inode *inode, struct file *file)
-{
-	return single_release(inode, file);
-}
-
-static const struct file_operations dp_ops = {
-	.owner   = THIS_MODULE,
-	.open    = mic_dp_debug_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = mic_dp_debug_release
-};
-
-static int mic_vdev_info_show(struct seq_file *s, void *unused)
-{
-	struct mic_device *mdev = s->private;
-	struct list_head *pos, *tmp;
-	struct mic_vdev *mvdev;
-	int i, j;
-
-	mutex_lock(&mdev->mic_mutex);
-	list_for_each_safe(pos, tmp, &mdev->vdev_list) {
-		mvdev = list_entry(pos, struct mic_vdev, list);
-		seq_printf(s, "VDEV type %d state %s in %ld out %ld\n",
-			   mvdev->virtio_id,
-			   mic_vdevup(mvdev) ? "UP" : "DOWN",
-			   mvdev->in_bytes,
-			   mvdev->out_bytes);
-		for (i = 0; i < MIC_MAX_VRINGS; i++) {
-			struct vring_desc *desc;
-			struct vring_avail *avail;
-			struct vring_used *used;
-			struct mic_vringh *mvr = &mvdev->mvr[i];
-			struct vringh *vrh = &mvr->vrh;
-			int num = vrh->vring.num;
-			if (!num)
-				continue;
-			desc = vrh->vring.desc;
-			seq_printf(s, "vring i %d avail_idx %d",
-				   i, mvr->vring.info->avail_idx & (num - 1));
-			seq_printf(s, " vring i %d avail_idx %d\n",
-				   i, mvr->vring.info->avail_idx);
-			seq_printf(s, "vrh i %d weak_barriers %d",
-				   i, vrh->weak_barriers);
-			seq_printf(s, " last_avail_idx %d last_used_idx %d",
-				   vrh->last_avail_idx, vrh->last_used_idx);
-			seq_printf(s, " completed %d\n", vrh->completed);
-			for (j = 0; j < num; j++) {
-				seq_printf(s, "desc[%d] addr 0x%llx len %d",
-					   j, desc->addr, desc->len);
-				seq_printf(s, " flags 0x%x next %d\n",
-					   desc->flags, desc->next);
-				desc++;
-			}
-			avail = vrh->vring.avail;
-			seq_printf(s, "avail flags 0x%x idx %d\n",
-				   vringh16_to_cpu(vrh, avail->flags),
-				   vringh16_to_cpu(vrh, avail->idx) & (num - 1));
-			seq_printf(s, "avail flags 0x%x idx %d\n",
-				   vringh16_to_cpu(vrh, avail->flags),
-				   vringh16_to_cpu(vrh, avail->idx));
-			for (j = 0; j < num; j++)
-				seq_printf(s, "avail ring[%d] %d\n",
-					   j, avail->ring[j]);
-			used = vrh->vring.used;
-			seq_printf(s, "used flags 0x%x idx %d\n",
-				   vringh16_to_cpu(vrh, used->flags),
-				   vringh16_to_cpu(vrh, used->idx) & (num - 1));
-			seq_printf(s, "used flags 0x%x idx %d\n",
-				   vringh16_to_cpu(vrh, used->flags),
-				   vringh16_to_cpu(vrh, used->idx));
-			for (j = 0; j < num; j++)
-				seq_printf(s, "used ring[%d] id %d len %d\n",
-					   j, vringh32_to_cpu(vrh,
-							      used->ring[j].id),
-					   vringh32_to_cpu(vrh,
-							   used->ring[j].len));
-		}
-	}
-	mutex_unlock(&mdev->mic_mutex);
-
-	return 0;
-}
-
-static int mic_vdev_info_debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mic_vdev_info_show, inode->i_private);
-}
-
-static int mic_vdev_info_debug_release(struct inode *inode, struct file *file)
-{
-	return single_release(inode, file);
-}
-
-static const struct file_operations vdev_info_ops = {
-	.owner   = THIS_MODULE,
-	.open    = mic_vdev_info_debug_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = mic_vdev_info_debug_release
-};
-
 static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
 {
 	struct mic_device *mdev  = s->private;
@@ -367,11 +182,6 @@
 	debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
 			    &post_code_ops);
 
-	debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops);
-
-	debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev,
-			    &vdev_info_ops);
-
 	debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
 			    &msi_irq_info_ops);
 }
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 461184a..52b12b2 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -29,6 +29,7 @@
 #include <linux/miscdevice.h>
 #include <linux/mic_bus.h>
 #include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
 #include "../bus/cosm_bus.h"
 #include "mic_intr.h"
 
@@ -64,13 +65,11 @@
  * @bootaddr: MIC boot address.
  * @dp: virtio device page
  * @dp_dma_addr: virtio device page DMA address.
- * @name: name for the misc char device
- * @miscdev: registered misc char device
- * @vdev_list: list of virtio devices.
  * @dma_mbdev: MIC BUS DMA device.
  * @dma_ch - Array of DMA channels
  * @num_dma_ch - Number of DMA channels available
  * @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
  * @cosm_dev: COSM device
  */
 struct mic_device {
@@ -91,13 +90,11 @@
 	u32 bootaddr;
 	void *dp;
 	dma_addr_t dp_dma_addr;
-	char name[16];
-	struct miscdevice miscdev;
-	struct list_head vdev_list;
 	struct mbus_device *dma_mbdev;
 	struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
 	int num_dma_ch;
 	struct scif_hw_dev *scdev;
+	struct vop_device *vpdev;
 	struct cosm_device *cosm_dev;
 };
 
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c
deleted file mode 100644
index 8cc1d90..0000000
--- a/drivers/misc/mic/host/mic_fops.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/poll.h>
-#include <linux/pci.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
-
-int mic_open(struct inode *inode, struct file *f)
-{
-	struct mic_vdev *mvdev;
-	struct mic_device *mdev = container_of(f->private_data,
-		struct mic_device, miscdev);
-
-	mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
-	if (!mvdev)
-		return -ENOMEM;
-
-	init_waitqueue_head(&mvdev->waitq);
-	INIT_LIST_HEAD(&mvdev->list);
-	mvdev->mdev = mdev;
-	mvdev->virtio_id = -1;
-
-	f->private_data = mvdev;
-	return 0;
-}
-
-int mic_release(struct inode *inode, struct file *f)
-{
-	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-
-	if (-1 != mvdev->virtio_id)
-		mic_virtio_del_device(mvdev);
-	f->private_data = NULL;
-	kfree(mvdev);
-	return 0;
-}
-
-long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
-	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-	void __user *argp = (void __user *)arg;
-	int ret;
-
-	switch (cmd) {
-	case MIC_VIRTIO_ADD_DEVICE:
-	{
-		ret = mic_virtio_add_device(mvdev, argp);
-		if (ret < 0) {
-			dev_err(mic_dev(mvdev),
-				"%s %d errno ret %d\n",
-				__func__, __LINE__, ret);
-			return ret;
-		}
-		break;
-	}
-	case MIC_VIRTIO_COPY_DESC:
-	{
-		struct mic_copy_desc copy;
-
-		ret = mic_vdev_inited(mvdev);
-		if (ret)
-			return ret;
-
-		if (copy_from_user(&copy, argp, sizeof(copy)))
-			return -EFAULT;
-
-		dev_dbg(mic_dev(mvdev),
-			"%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n",
-			__func__, __LINE__, copy.iovcnt, copy.vr_idx,
-			copy.update_used);
-
-		ret = mic_virtio_copy_desc(mvdev, &copy);
-		if (ret < 0) {
-			dev_err(mic_dev(mvdev),
-				"%s %d errno ret %d\n",
-				__func__, __LINE__, ret);
-			return ret;
-		}
-		if (copy_to_user(
-			&((struct mic_copy_desc __user *)argp)->out_len,
-			&copy.out_len, sizeof(copy.out_len))) {
-			dev_err(mic_dev(mvdev), "%s %d errno ret %d\n",
-				__func__, __LINE__, -EFAULT);
-			return -EFAULT;
-		}
-		break;
-	}
-	case MIC_VIRTIO_CONFIG_CHANGE:
-	{
-		ret = mic_vdev_inited(mvdev);
-		if (ret)
-			return ret;
-
-		ret = mic_virtio_config_change(mvdev, argp);
-		if (ret < 0) {
-			dev_err(mic_dev(mvdev),
-				"%s %d errno ret %d\n",
-				__func__, __LINE__, ret);
-			return ret;
-		}
-		break;
-	}
-	default:
-		return -ENOIOCTLCMD;
-	};
-	return 0;
-}
-
-/*
- * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
- * not when previously enqueued buffers may be available. This means that
- * in the card->host (TX) path, when userspace is unblocked by poll it
- * must drain all available descriptors or it can stall.
- */
-unsigned int mic_poll(struct file *f, poll_table *wait)
-{
-	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-	int mask = 0;
-
-	poll_wait(f, &mvdev->waitq, wait);
-
-	if (mic_vdev_inited(mvdev)) {
-		mask = POLLERR;
-	} else if (mvdev->poll_wake) {
-		mvdev->poll_wake = 0;
-		mask = POLLIN | POLLOUT;
-	}
-
-	return mask;
-}
-
-static inline int
-mic_query_offset(struct mic_vdev *mvdev, unsigned long offset,
-		 unsigned long *size, unsigned long *pa)
-{
-	struct mic_device *mdev = mvdev->mdev;
-	unsigned long start = MIC_DP_SIZE;
-	int i;
-
-	/*
-	 * MMAP interface is as follows:
-	 * offset				region
-	 * 0x0					virtio device_page
-	 * 0x1000				first vring
-	 * 0x1000 + size of 1st vring		second vring
-	 * ....
-	 */
-	if (!offset) {
-		*pa = virt_to_phys(mdev->dp);
-		*size = MIC_DP_SIZE;
-		return 0;
-	}
-
-	for (i = 0; i < mvdev->dd->num_vq; i++) {
-		struct mic_vringh *mvr = &mvdev->mvr[i];
-		if (offset == start) {
-			*pa = virt_to_phys(mvr->vring.va);
-			*size = mvr->vring.len;
-			return 0;
-		}
-		start += mvr->vring.len;
-	}
-	return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-int
-mic_mmap(struct file *f, struct vm_area_struct *vma)
-{
-	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-	unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
-	int i, err;
-
-	err = mic_vdev_inited(mvdev);
-	if (err)
-		return err;
-
-	if (vma->vm_flags & VM_WRITE)
-		return -EACCES;
-
-	while (size_rem) {
-		i = mic_query_offset(mvdev, offset, &size, &pa);
-		if (i < 0)
-			return -EINVAL;
-		err = remap_pfn_range(vma, vma->vm_start + offset,
-			pa >> PAGE_SHIFT, size, vma->vm_page_prot);
-		if (err)
-			return err;
-		dev_dbg(mic_dev(mvdev),
-			"%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n",
-			__func__, __LINE__, mvdev->virtio_id, size, offset,
-			pa, vma->vm_start + offset);
-		size_rem -= size;
-		offset += size;
-	}
-	return 0;
-}
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h
deleted file mode 100644
index dc3893d..0000000
--- a/drivers/misc/mic/host/mic_fops.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef _MIC_FOPS_H_
-#define _MIC_FOPS_H_
-
-int mic_open(struct inode *inode, struct file *filp);
-int mic_release(struct inode *inode, struct file *filp);
-ssize_t mic_read(struct file *filp, char __user *buf,
-			size_t count, loff_t *pos);
-long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int mic_mmap(struct file *f, struct vm_area_struct *vma);
-unsigned int mic_poll(struct file *f, poll_table *wait);
-
-#endif
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index 153894e..035be3e 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -27,8 +27,6 @@
 #include "mic_device.h"
 #include "mic_x100.h"
 #include "mic_smpt.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
 
 static const char mic_driver_name[] = "mic";
 
@@ -57,17 +55,6 @@
 
 /* ID allocator for MIC devices */
 static struct ida g_mic_ida;
-/* Base device node number for MIC devices */
-static dev_t g_mic_devno;
-
-static const struct file_operations mic_fops = {
-	.open = mic_open,
-	.release = mic_release,
-	.unlocked_ioctl = mic_ioctl,
-	.poll = mic_poll,
-	.mmap = mic_mmap,
-	.owner = THIS_MODULE,
-};
 
 /* Initialize the device page */
 static int mic_dp_init(struct mic_device *mdev)
@@ -169,7 +156,6 @@
 	mic_ops_init(mdev);
 	mutex_init(&mdev->mic_mutex);
 	mdev->irq_info.next_avail_src = 0;
-	INIT_LIST_HEAD(&mdev->vdev_list);
 }
 
 /**
@@ -259,30 +245,15 @@
 		goto smpt_uninit;
 	}
 	mic_bootparam_init(mdev);
-
 	mic_create_debug_dir(mdev);
 
-	mdev->miscdev.minor = MISC_DYNAMIC_MINOR;
-	snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id);
-	mdev->miscdev.name = mdev->name;
-	mdev->miscdev.fops = &mic_fops;
-	mdev->miscdev.parent = &mdev->pdev->dev;
-	rc = misc_register(&mdev->miscdev);
-	if (rc) {
-		dev_err(&pdev->dev, "misc_register err id %d rc %d\n",
-			mdev->id, rc);
-		goto cleanup_debug_dir;
-	}
-
 	mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
 	if (IS_ERR(mdev->cosm_dev)) {
 		rc = PTR_ERR(mdev->cosm_dev);
 		dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
-		goto misc_dereg;
+		goto cleanup_debug_dir;
 	}
 	return 0;
-misc_dereg:
-	misc_deregister(&mdev->miscdev);
 cleanup_debug_dir:
 	mic_delete_debug_dir(mdev);
 	mic_dp_uninit(mdev);
@@ -323,7 +294,6 @@
 		return;
 
 	cosm_unregister_device(mdev->cosm_dev);
-	misc_deregister(&mdev->miscdev);
 	mic_delete_debug_dir(mdev);
 	mic_dp_uninit(mdev);
 	mic_smpt_uninit(mdev);
@@ -347,26 +317,18 @@
 {
 	int ret;
 
-	ret = alloc_chrdev_region(&g_mic_devno, 0,
-				  MIC_MAX_NUM_DEVS, mic_driver_name);
-	if (ret) {
-		pr_err("alloc_chrdev_region failed ret %d\n", ret);
-		goto error;
-	}
-
+	request_module("mic_x100_dma");
 	mic_init_debugfs();
 	ida_init(&g_mic_ida);
 	ret = pci_register_driver(&mic_driver);
 	if (ret) {
 		pr_err("pci_register_driver failed ret %d\n", ret);
-		goto cleanup_chrdev;
+		goto cleanup_debugfs;
 	}
-	return ret;
-cleanup_chrdev:
+	return 0;
+cleanup_debugfs:
 	ida_destroy(&g_mic_ida);
 	mic_exit_debugfs();
-	unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
-error:
 	return ret;
 }
 
@@ -375,7 +337,6 @@
 	pci_unregister_driver(&mic_driver);
 	ida_destroy(&g_mic_ida);
 	mic_exit_debugfs();
-	unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
 }
 
 module_init(mic_init);
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
deleted file mode 100644
index 58b107a..0000000
--- a/drivers/misc/mic/host/mic_virtio.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/dmaengine.h>
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-#include "mic_virtio.h"
-
-/*
- * Size of the internal buffer used during DMA's as an intermediate buffer
- * for copy to/from user.
- */
-#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
-
-static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
-			dma_addr_t src, size_t len)
-{
-	int err = 0;
-	struct dma_async_tx_descriptor *tx;
-	struct dma_chan *mic_ch = mdev->dma_ch[0];
-
-	if (!mic_ch) {
-		err = -EBUSY;
-		goto error;
-	}
-
-	tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
-						    DMA_PREP_FENCE);
-	if (!tx) {
-		err = -ENOMEM;
-		goto error;
-	} else {
-		dma_cookie_t cookie = tx->tx_submit(tx);
-
-		err = dma_submit_error(cookie);
-		if (err)
-			goto error;
-		err = dma_sync_wait(mic_ch, cookie);
-	}
-error:
-	if (err)
-		dev_err(&mdev->pdev->dev, "%s %d err %d\n",
-			__func__, __LINE__, err);
-	return err;
-}
-
-/*
- * Initiates the copies across the PCIe bus from card memory to a user
- * space buffer. When transfers are done using DMA, source/destination
- * addresses and transfer length must follow the alignment requirements of
- * the MIC DMA engine.
- */
-static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
-				   size_t len, u64 daddr, size_t dlen,
-				   int vr_idx)
-{
-	struct mic_device *mdev = mvdev->mdev;
-	void __iomem *dbuf = mdev->aper.va + daddr;
-	struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
-	size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
-	size_t dma_offset;
-	size_t partlen;
-	int err;
-
-	dma_offset = daddr - round_down(daddr, dma_alignment);
-	daddr -= dma_offset;
-	len += dma_offset;
-
-	while (len) {
-		partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
-		err = mic_sync_dma(mdev, mvr->buf_da, daddr,
-				   ALIGN(partlen, dma_alignment));
-		if (err)
-			goto err;
-
-		if (copy_to_user(ubuf, mvr->buf + dma_offset,
-				 partlen - dma_offset)) {
-			err = -EFAULT;
-			goto err;
-		}
-		daddr += partlen;
-		ubuf += partlen;
-		dbuf += partlen;
-		mvdev->in_bytes_dma += partlen;
-		mvdev->in_bytes += partlen;
-		len -= partlen;
-		dma_offset = 0;
-	}
-	return 0;
-err:
-	dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
-	return err;
-}
-
-/*
- * Initiates copies across the PCIe bus from a user space buffer to card
- * memory. When transfers are done using DMA, source/destination addresses
- * and transfer length must follow the alignment requirements of the MIC
- * DMA engine.
- */
-static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
-				     size_t len, u64 daddr, size_t dlen,
-				     int vr_idx)
-{
-	struct mic_device *mdev = mvdev->mdev;
-	void __iomem *dbuf = mdev->aper.va + daddr;
-	struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
-	size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
-	size_t partlen;
-	int err;
-
-	if (daddr & (dma_alignment - 1)) {
-		mvdev->tx_dst_unaligned += len;
-		goto memcpy;
-	} else if (ALIGN(len, dma_alignment) > dlen) {
-		mvdev->tx_len_unaligned += len;
-		goto memcpy;
-	}
-
-	while (len) {
-		partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
-		if (copy_from_user(mvr->buf, ubuf, partlen)) {
-			err = -EFAULT;
-			goto err;
-		}
-		err = mic_sync_dma(mdev, daddr, mvr->buf_da,
-				   ALIGN(partlen, dma_alignment));
-		if (err)
-			goto err;
-		daddr += partlen;
-		ubuf += partlen;
-		dbuf += partlen;
-		mvdev->out_bytes_dma += partlen;
-		mvdev->out_bytes += partlen;
-		len -= partlen;
-	}
-memcpy:
-	/*
-	 * We are copying to IO below and should ideally use something
-	 * like copy_from_user_toio(..) if it existed.
-	 */
-	if (copy_from_user((void __force *)dbuf, ubuf, len)) {
-		err = -EFAULT;
-		goto err;
-	}
-	mvdev->out_bytes += len;
-	return 0;
-err:
-	dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
-	return err;
-}
-
-#define MIC_VRINGH_READ true
-
-/* The function to call to notify the card about added buffers */
-static void mic_notify(struct vringh *vrh)
-{
-	struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
-	struct mic_vdev *mvdev = mvrh->mvdev;
-	s8 db = mvdev->dc->h2c_vdev_db;
-
-	if (db != -1)
-		mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-}
-
-/* Determine the total number of bytes consumed in a VRINGH KIOV */
-static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
-{
-	int i;
-	u32 total = iov->consumed;
-
-	for (i = 0; i < iov->i; i++)
-		total += iov->iov[i].iov_len;
-	return total;
-}
-
-/*
- * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
- * This API is heavily based on the vringh_iov_xfer(..) implementation
- * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
- * and vringh_iov_push_kern(..) directly is because there is no
- * way to override the VRINGH xfer(..) routines as of v3.10.
- */
-static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
-			void __user *ubuf, size_t len, bool read, int vr_idx,
-			size_t *out_len)
-{
-	int ret = 0;
-	size_t partlen, tot_len = 0;
-
-	while (len && iov->i < iov->used) {
-		partlen = min(iov->iov[iov->i].iov_len, len);
-		if (read)
-			ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
-						(u64)iov->iov[iov->i].iov_base,
-						iov->iov[iov->i].iov_len,
-						vr_idx);
-		else
-			ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
-						(u64)iov->iov[iov->i].iov_base,
-						iov->iov[iov->i].iov_len,
-						vr_idx);
-		if (ret) {
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			break;
-		}
-		len -= partlen;
-		ubuf += partlen;
-		tot_len += partlen;
-		iov->consumed += partlen;
-		iov->iov[iov->i].iov_len -= partlen;
-		iov->iov[iov->i].iov_base += partlen;
-		if (!iov->iov[iov->i].iov_len) {
-			/* Fix up old iov element then increment. */
-			iov->iov[iov->i].iov_len = iov->consumed;
-			iov->iov[iov->i].iov_base -= iov->consumed;
-
-			iov->consumed = 0;
-			iov->i++;
-		}
-	}
-	*out_len = tot_len;
-	return ret;
-}
-
-/*
- * Use the standard VRINGH infrastructure in the kernel to fetch new
- * descriptors, initiate the copies and update the used ring.
- */
-static int _mic_virtio_copy(struct mic_vdev *mvdev,
-	struct mic_copy_desc *copy)
-{
-	int ret = 0;
-	u32 iovcnt = copy->iovcnt;
-	struct iovec iov;
-	struct iovec __user *u_iov = copy->iov;
-	void __user *ubuf = NULL;
-	struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
-	struct vringh_kiov *riov = &mvr->riov;
-	struct vringh_kiov *wiov = &mvr->wiov;
-	struct vringh *vrh = &mvr->vrh;
-	u16 *head = &mvr->head;
-	struct mic_vring *vr = &mvr->vring;
-	size_t len = 0, out_len;
-
-	copy->out_len = 0;
-	/* Fetch a new IOVEC if all previous elements have been processed */
-	if (riov->i == riov->used && wiov->i == wiov->used) {
-		ret = vringh_getdesc_kern(vrh, riov, wiov,
-				head, GFP_KERNEL);
-		/* Check if there are available descriptors */
-		if (ret <= 0)
-			return ret;
-	}
-	while (iovcnt) {
-		if (!len) {
-			/* Copy over a new iovec from user space. */
-			ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
-			if (ret) {
-				ret = -EINVAL;
-				dev_err(mic_dev(mvdev), "%s %d err %d\n",
-					__func__, __LINE__, ret);
-				break;
-			}
-			len = iov.iov_len;
-			ubuf = iov.iov_base;
-		}
-		/* Issue all the read descriptors first */
-		ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
-				      copy->vr_idx, &out_len);
-		if (ret) {
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			break;
-		}
-		len -= out_len;
-		ubuf += out_len;
-		copy->out_len += out_len;
-		/* Issue the write descriptors next */
-		ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
-				      copy->vr_idx, &out_len);
-		if (ret) {
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			break;
-		}
-		len -= out_len;
-		ubuf += out_len;
-		copy->out_len += out_len;
-		if (!len) {
-			/* One user space iovec is now completed */
-			iovcnt--;
-			u_iov++;
-		}
-		/* Exit loop if all elements in KIOVs have been processed. */
-		if (riov->i == riov->used && wiov->i == wiov->used)
-			break;
-	}
-	/*
-	 * Update the used ring if a descriptor was available and some data was
-	 * copied in/out and the user asked for a used ring update.
-	 */
-	if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
-		u32 total = 0;
-
-		/* Determine the total data consumed */
-		total += mic_vringh_iov_consumed(riov);
-		total += mic_vringh_iov_consumed(wiov);
-		vringh_complete_kern(vrh, *head, total);
-		*head = USHRT_MAX;
-		if (vringh_need_notify_kern(vrh) > 0)
-			vringh_notify(vrh);
-		vringh_kiov_cleanup(riov);
-		vringh_kiov_cleanup(wiov);
-		/* Update avail idx for user space */
-		vr->info->avail_idx = vrh->last_avail_idx;
-	}
-	return ret;
-}
-
-static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
-		struct mic_copy_desc *copy)
-{
-	if (copy->vr_idx >= mvdev->dd->num_vq) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -EINVAL);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/* Copy a specified number of virtio descriptors in a chain */
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
-		struct mic_copy_desc *copy)
-{
-	int err;
-	struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
-
-	err = mic_verify_copy_args(mvdev, copy);
-	if (err)
-		return err;
-
-	mutex_lock(&mvr->vr_mutex);
-	if (!mic_vdevup(mvdev)) {
-		err = -ENODEV;
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, err);
-		goto err;
-	}
-	err = _mic_virtio_copy(mvdev, copy);
-	if (err) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, err);
-	}
-err:
-	mutex_unlock(&mvr->vr_mutex);
-	return err;
-}
-
-static void mic_virtio_init_post(struct mic_vdev *mvdev)
-{
-	struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
-	int i;
-
-	for (i = 0; i < mvdev->dd->num_vq; i++) {
-		if (!le64_to_cpu(vqconfig[i].used_address)) {
-			dev_warn(mic_dev(mvdev), "used_address zero??\n");
-			continue;
-		}
-		mvdev->mvr[i].vrh.vring.used =
-			(void __force *)mvdev->mdev->aper.va +
-			le64_to_cpu(vqconfig[i].used_address);
-	}
-
-	mvdev->dc->used_address_updated = 0;
-
-	dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
-		__func__, mvdev->virtio_id);
-}
-
-static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
-{
-	int i;
-
-	dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
-		__func__, mvdev->dd->status, mvdev->virtio_id);
-
-	for (i = 0; i < mvdev->dd->num_vq; i++)
-		/*
-		 * Avoid lockdep false positive. The + 1 is for the mic
-		 * mutex which is held in the reset devices code path.
-		 */
-		mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
-	/* 0 status means "reset" */
-	mvdev->dd->status = 0;
-	mvdev->dc->vdev_reset = 0;
-	mvdev->dc->host_ack = 1;
-
-	for (i = 0; i < mvdev->dd->num_vq; i++) {
-		struct vringh *vrh = &mvdev->mvr[i].vrh;
-		mvdev->mvr[i].vring.info->avail_idx = 0;
-		vrh->completed = 0;
-		vrh->last_avail_idx = 0;
-		vrh->last_used_idx = 0;
-	}
-
-	for (i = 0; i < mvdev->dd->num_vq; i++)
-		mutex_unlock(&mvdev->mvr[i].vr_mutex);
-}
-
-void mic_virtio_reset_devices(struct mic_device *mdev)
-{
-	struct list_head *pos, *tmp;
-	struct mic_vdev *mvdev;
-
-	dev_dbg(&mdev->pdev->dev, "%s\n",  __func__);
-
-	list_for_each_safe(pos, tmp, &mdev->vdev_list) {
-		mvdev = list_entry(pos, struct mic_vdev, list);
-		mic_virtio_device_reset(mvdev);
-		mvdev->poll_wake = 1;
-		wake_up(&mvdev->waitq);
-	}
-}
-
-void mic_bh_handler(struct work_struct *work)
-{
-	struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
-			virtio_bh_work);
-
-	if (mvdev->dc->used_address_updated)
-		mic_virtio_init_post(mvdev);
-
-	if (mvdev->dc->vdev_reset)
-		mic_virtio_device_reset(mvdev);
-
-	mvdev->poll_wake = 1;
-	wake_up(&mvdev->waitq);
-}
-
-static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
-{
-	struct mic_vdev *mvdev = data;
-	struct mic_device *mdev = mvdev->mdev;
-
-	mdev->ops->intr_workarounds(mdev);
-	schedule_work(&mvdev->virtio_bh_work);
-	return IRQ_HANDLED;
-}
-
-int mic_virtio_config_change(struct mic_vdev *mvdev,
-			void __user *argp)
-{
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
-	int ret = 0, retry, i;
-	struct mic_bootparam *bootparam = mvdev->mdev->dp;
-	s8 db = bootparam->h2c_config_db;
-
-	mutex_lock(&mvdev->mdev->mic_mutex);
-	for (i = 0; i < mvdev->dd->num_vq; i++)
-		mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
-	if (db == -1 || mvdev->dd->type == -1) {
-		ret = -EIO;
-		goto exit;
-	}
-
-	if (copy_from_user(mic_vq_configspace(mvdev->dd),
-			   argp, mvdev->dd->config_len)) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -EFAULT);
-		ret = -EFAULT;
-		goto exit;
-	}
-	mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
-	mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-
-	for (retry = 100; retry--;) {
-		ret = wait_event_timeout(wake,
-			mvdev->dc->guest_ack, msecs_to_jiffies(100));
-		if (ret)
-			break;
-	}
-
-	dev_dbg(mic_dev(mvdev),
-		"%s %d retry: %d\n", __func__, __LINE__, retry);
-	mvdev->dc->config_change = 0;
-	mvdev->dc->guest_ack = 0;
-exit:
-	for (i = 0; i < mvdev->dd->num_vq; i++)
-		mutex_unlock(&mvdev->mvr[i].vr_mutex);
-	mutex_unlock(&mvdev->mdev->mic_mutex);
-	return ret;
-}
-
-static int mic_copy_dp_entry(struct mic_vdev *mvdev,
-					void __user *argp,
-					__u8 *type,
-					struct mic_device_desc **devpage)
-{
-	struct mic_device *mdev = mvdev->mdev;
-	struct mic_device_desc dd, *dd_config, *devp;
-	struct mic_vqconfig *vqconfig;
-	int ret = 0, i;
-	bool slot_found = false;
-
-	if (copy_from_user(&dd, argp, sizeof(dd))) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -EFAULT);
-		return -EFAULT;
-	}
-
-	if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
-	    dd.num_vq > MIC_MAX_VRINGS) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -EINVAL);
-		return -EINVAL;
-	}
-
-	dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
-	if (dd_config == NULL) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -ENOMEM);
-		return -ENOMEM;
-	}
-	if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
-		ret = -EFAULT;
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, ret);
-		goto exit;
-	}
-
-	vqconfig = mic_vq_config(dd_config);
-	for (i = 0; i < dd.num_vq; i++) {
-		if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
-			ret =  -EINVAL;
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			goto exit;
-		}
-	}
-
-	/* Find the first free device page entry */
-	for (i = sizeof(struct mic_bootparam);
-		i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
-		i += mic_total_desc_size(devp)) {
-		devp = mdev->dp + i;
-		if (devp->type == 0 || devp->type == -1) {
-			slot_found = true;
-			break;
-		}
-	}
-	if (!slot_found) {
-		ret =  -EINVAL;
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, ret);
-		goto exit;
-	}
-	/*
-	 * Save off the type before doing the memcpy. Type will be set in the
-	 * end after completing all initialization for the new device.
-	 */
-	*type = dd_config->type;
-	dd_config->type = 0;
-	memcpy(devp, dd_config, mic_desc_size(dd_config));
-
-	*devpage = devp;
-exit:
-	kfree(dd_config);
-	return ret;
-}
-
-static void mic_init_device_ctrl(struct mic_vdev *mvdev,
-				struct mic_device_desc *devpage)
-{
-	struct mic_device_ctrl *dc;
-
-	dc = (void *)devpage + mic_aligned_desc_size(devpage);
-
-	dc->config_change = 0;
-	dc->guest_ack = 0;
-	dc->vdev_reset = 0;
-	dc->host_ack = 0;
-	dc->used_address_updated = 0;
-	dc->c2h_vdev_db = -1;
-	dc->h2c_vdev_db = -1;
-	mvdev->dc = dc;
-}
-
-int mic_virtio_add_device(struct mic_vdev *mvdev,
-			void __user *argp)
-{
-	struct mic_device *mdev = mvdev->mdev;
-	struct mic_device_desc *dd = NULL;
-	struct mic_vqconfig *vqconfig;
-	int vr_size, i, j, ret;
-	u8 type = 0;
-	s8 db;
-	char irqname[10];
-	struct mic_bootparam *bootparam = mdev->dp;
-	u16 num;
-	dma_addr_t vr_addr;
-
-	mutex_lock(&mdev->mic_mutex);
-
-	ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
-	if (ret) {
-		mutex_unlock(&mdev->mic_mutex);
-		return ret;
-	}
-
-	mic_init_device_ctrl(mvdev, dd);
-
-	mvdev->dd = dd;
-	mvdev->virtio_id = type;
-	vqconfig = mic_vq_config(dd);
-	INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
-
-	for (i = 0; i < dd->num_vq; i++) {
-		struct mic_vringh *mvr = &mvdev->mvr[i];
-		struct mic_vring *vr = &mvdev->mvr[i].vring;
-		num = le16_to_cpu(vqconfig[i].num);
-		mutex_init(&mvr->vr_mutex);
-		vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
-			sizeof(struct _mic_vring_info));
-		vr->va = (void *)
-			__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-					 get_order(vr_size));
-		if (!vr->va) {
-			ret = -ENOMEM;
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			goto err;
-		}
-		vr->len = vr_size;
-		vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
-		vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
-		vr_addr = mic_map_single(mdev, vr->va, vr_size);
-		if (mic_map_error(vr_addr)) {
-			free_pages((unsigned long)vr->va, get_order(vr_size));
-			ret = -ENOMEM;
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			goto err;
-		}
-		vqconfig[i].address = cpu_to_le64(vr_addr);
-
-		vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
-		ret = vringh_init_kern(&mvr->vrh,
-			*(u32 *)mic_vq_features(mvdev->dd), num, false,
-			vr->vr.desc, vr->vr.avail, vr->vr.used);
-		if (ret) {
-			dev_err(mic_dev(mvdev), "%s %d err %d\n",
-				__func__, __LINE__, ret);
-			goto err;
-		}
-		vringh_kiov_init(&mvr->riov, NULL, 0);
-		vringh_kiov_init(&mvr->wiov, NULL, 0);
-		mvr->head = USHRT_MAX;
-		mvr->mvdev = mvdev;
-		mvr->vrh.notify = mic_notify;
-		dev_dbg(&mdev->pdev->dev,
-			"%s %d index %d va %p info %p vr_size 0x%x\n",
-			__func__, __LINE__, i, vr->va, vr->info, vr_size);
-		mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
-					get_order(MIC_INT_DMA_BUF_SIZE));
-		mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
-					  MIC_INT_DMA_BUF_SIZE);
-	}
-
-	snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
-		 mvdev->virtio_id);
-	mvdev->virtio_db = mic_next_db(mdev);
-	mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
-					       mic_virtio_intr_handler,
-					       NULL, irqname, mvdev,
-					       mvdev->virtio_db, MIC_INTR_DB);
-	if (IS_ERR(mvdev->virtio_cookie)) {
-		ret = PTR_ERR(mvdev->virtio_cookie);
-		dev_dbg(&mdev->pdev->dev, "request irq failed\n");
-		goto err;
-	}
-
-	mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
-
-	list_add_tail(&mvdev->list, &mdev->vdev_list);
-	/*
-	 * Order the type update with previous stores. This write barrier
-	 * is paired with the corresponding read barrier before the uncached
-	 * system memory read of the type, on the card while scanning the
-	 * device page.
-	 */
-	smp_wmb();
-	dd->type = type;
-
-	dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type);
-
-	db = bootparam->h2c_config_db;
-	if (db != -1)
-		mdev->ops->send_intr(mdev, db);
-	mutex_unlock(&mdev->mic_mutex);
-	return 0;
-err:
-	vqconfig = mic_vq_config(dd);
-	for (j = 0; j < i; j++) {
-		struct mic_vringh *mvr = &mvdev->mvr[j];
-		mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
-				 mvr->vring.len);
-		free_pages((unsigned long)mvr->vring.va,
-			   get_order(mvr->vring.len));
-	}
-	mutex_unlock(&mdev->mic_mutex);
-	return ret;
-}
-
-void mic_virtio_del_device(struct mic_vdev *mvdev)
-{
-	struct list_head *pos, *tmp;
-	struct mic_vdev *tmp_mvdev;
-	struct mic_device *mdev = mvdev->mdev;
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
-	int i, ret, retry;
-	struct mic_vqconfig *vqconfig;
-	struct mic_bootparam *bootparam = mdev->dp;
-	s8 db;
-
-	mutex_lock(&mdev->mic_mutex);
-	db = bootparam->h2c_config_db;
-	if (db == -1)
-		goto skip_hot_remove;
-	dev_dbg(&mdev->pdev->dev,
-		"Requesting hot remove id %d\n", mvdev->virtio_id);
-	mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
-	mdev->ops->send_intr(mdev, db);
-	for (retry = 100; retry--;) {
-		ret = wait_event_timeout(wake,
-			mvdev->dc->guest_ack, msecs_to_jiffies(100));
-		if (ret)
-			break;
-	}
-	dev_dbg(&mdev->pdev->dev,
-		"Device id %d config_change %d guest_ack %d retry %d\n",
-		mvdev->virtio_id, mvdev->dc->config_change,
-		mvdev->dc->guest_ack, retry);
-	mvdev->dc->config_change = 0;
-	mvdev->dc->guest_ack = 0;
-skip_hot_remove:
-	mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
-	flush_work(&mvdev->virtio_bh_work);
-	vqconfig = mic_vq_config(mvdev->dd);
-	for (i = 0; i < mvdev->dd->num_vq; i++) {
-		struct mic_vringh *mvr = &mvdev->mvr[i];
-
-		mic_unmap_single(mvdev->mdev, mvr->buf_da,
-				 MIC_INT_DMA_BUF_SIZE);
-		free_pages((unsigned long)mvr->buf,
-			   get_order(MIC_INT_DMA_BUF_SIZE));
-		vringh_kiov_cleanup(&mvr->riov);
-		vringh_kiov_cleanup(&mvr->wiov);
-		mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
-				 mvr->vring.len);
-		free_pages((unsigned long)mvr->vring.va,
-			   get_order(mvr->vring.len));
-	}
-
-	list_for_each_safe(pos, tmp, &mdev->vdev_list) {
-		tmp_mvdev = list_entry(pos, struct mic_vdev, list);
-		if (tmp_mvdev == mvdev) {
-			list_del(pos);
-			dev_dbg(&mdev->pdev->dev,
-				"Removing virtio device id %d\n",
-				mvdev->virtio_id);
-			break;
-		}
-	}
-	/*
-	 * Order the type update with previous stores. This write barrier
-	 * is paired with the corresponding read barrier before the uncached
-	 * system memory read of the type, on the card while scanning the
-	 * device page.
-	 */
-	smp_wmb();
-	mvdev->dd->type = -1;
-	mutex_unlock(&mdev->mic_mutex);
-}
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h
deleted file mode 100644
index a80631f..0000000
--- a/drivers/misc/mic/host/mic_virtio.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef MIC_VIRTIO_H
-#define MIC_VIRTIO_H
-
-#include <linux/virtio_config.h>
-#include <linux/mic_ioctl.h>
-
-/*
- * Note on endianness.
- * 1. Host can be both BE or LE
- * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
- *    rings and ioreadXX/iowriteXX to access used ring.
- * 3. Device page exposed by host to guest contains LE values. Guest
- *    accesses these using ioreadXX/iowriteXX etc. This way in general we
- *    obey the virtio spec according to which guest works with native
- *    endianness and host is aware of guest endianness and does all
- *    required endianness conversion.
- * 4. Data provided from user space to guest (in ADD_DEVICE and
- *    CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
- *    in guest endianness.
- */
-
-/**
- * struct mic_vringh - Virtio ring host information.
- *
- * @vring: The MIC vring used for setting up user space mappings.
- * @vrh: The host VRINGH used for accessing the card vrings.
- * @riov: The VRINGH read kernel IOV.
- * @wiov: The VRINGH write kernel IOV.
- * @vr_mutex: Mutex for synchronizing access to the VRING.
- * @buf: Temporary kernel buffer used to copy in/out data
- * from/to the card via DMA.
- * @buf_da: dma address of buf.
- * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
- */
-struct mic_vringh {
-	struct mic_vring vring;
-	struct vringh vrh;
-	struct vringh_kiov riov;
-	struct vringh_kiov wiov;
-	struct mutex vr_mutex;
-	void *buf;
-	dma_addr_t buf_da;
-	struct mic_vdev *mvdev;
-	u16 head;
-};
-
-/**
- * struct mic_vdev - Host information for a card Virtio device.
- *
- * @virtio_id - Virtio device id.
- * @waitq - Waitqueue to allow ring3 apps to poll.
- * @mdev - Back pointer to host MIC device.
- * @poll_wake - Used for waking up threads blocked in poll.
- * @out_bytes - Debug stats for number of bytes copied from host to card.
- * @in_bytes - Debug stats for number of bytes copied from card to host.
- * @out_bytes_dma - Debug stats for number of bytes copied from host to card
- * using DMA.
- * @in_bytes_dma - Debug stats for number of bytes copied from card to host
- * using DMA.
- * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
- * the transfer length did not have the required DMA alignment.
- * @tx_dst_unaligned - Debug stats for number of bytes copied where the
- * destination address on the card did not have the required DMA alignment.
- * @mvr - Store per VRING data structures.
- * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
- * @dd - Virtio device descriptor.
- * @dc - Virtio device control fields.
- * @list - List of Virtio devices.
- * @virtio_db - The doorbell used by the card to interrupt the host.
- * @virtio_cookie - The cookie returned while requesting interrupts.
- */
-struct mic_vdev {
-	int virtio_id;
-	wait_queue_head_t waitq;
-	struct mic_device *mdev;
-	int poll_wake;
-	unsigned long out_bytes;
-	unsigned long in_bytes;
-	unsigned long out_bytes_dma;
-	unsigned long in_bytes_dma;
-	unsigned long tx_len_unaligned;
-	unsigned long tx_dst_unaligned;
-	struct mic_vringh mvr[MIC_MAX_VRINGS];
-	struct work_struct virtio_bh_work;
-	struct mic_device_desc *dd;
-	struct mic_device_ctrl *dc;
-	struct list_head list;
-	int virtio_db;
-	struct mic_irq *virtio_cookie;
-};
-
-void mic_virtio_uninit(struct mic_device *mdev);
-int mic_virtio_add_device(struct mic_vdev *mvdev,
-			void __user *argp);
-void mic_virtio_del_device(struct mic_vdev *mvdev);
-int mic_virtio_config_change(struct mic_vdev *mvdev,
-			void __user *argp);
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
-	struct mic_copy_desc *request);
-void mic_virtio_reset_devices(struct mic_device *mdev);
-void mic_bh_handler(struct work_struct *work);
-
-/* Helper API to obtain the MIC PCIe device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
-	return &mvdev->mdev->pdev->dev;
-}
-
-/* Helper API to check if a virtio device is initialized */
-static inline int mic_vdev_inited(struct mic_vdev *mvdev)
-{
-	/* Device has not been created yet */
-	if (!mvdev->dd || !mvdev->dd->type) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -EINVAL);
-		return -EINVAL;
-	}
-
-	/* Device has been removed/deleted */
-	if (mvdev->dd->type == -1) {
-		dev_err(mic_dev(mvdev), "%s %d err %d\n",
-			__func__, __LINE__, -ENODEV);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-/* Helper API to check if a virtio device is running */
-static inline bool mic_vdevup(struct mic_vdev *mvdev)
-{
-	return !!mvdev->dd->status;
-}
-#endif
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 8118ac4..82a973c 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -450,26 +450,29 @@
 
 	rc = mic_x100_get_boot_addr(mdev);
 	if (rc)
-		goto error;
+		return rc;
 	/* load OS */
 	rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
 	if (rc < 0) {
 		dev_err(&mdev->pdev->dev,
 			"ramdisk request_firmware failed: %d %s\n",
 			rc, mdev->cosm_dev->firmware);
-		goto error;
+		return rc;
 	}
 	if (mdev->bootaddr > mdev->aper.len - fw->size) {
 		rc = -EINVAL;
 		dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
 			__func__, __LINE__, rc, mdev->bootaddr);
-		release_firmware(fw);
 		goto error;
 	}
 	memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
 	mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
-	if (!strcmp(mdev->cosm_dev->bootmode, "flash"))
-		goto done;
+	if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
+		rc = -EINVAL;
+		dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
+			__func__, __LINE__, rc);
+		goto error;
+	}
 	/* load command line */
 	rc = mic_x100_load_command_line(mdev, fw);
 	if (rc) {
@@ -481,9 +484,11 @@
 	/* load ramdisk */
 	if (mdev->cosm_dev->ramdisk)
 		rc = mic_x100_load_ramdisk(mdev);
+
+	return rc;
+
 error:
-	dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc);
-done:
+	release_firmware(fw);
 	return rc;
 }
 
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 95a13c6..cd01a0e 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -74,11 +74,6 @@
 	bool ordered;
 };
 
-#ifndef list_entry_next
-#define list_entry_next(pos, member) \
-	list_entry(pos->member.next, typeof(*pos), member)
-#endif
-
 /**
  * scif_reserve_dma_chan:
  * @ep: Endpoint Descriptor.
@@ -276,13 +271,10 @@
 scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
 {
 	struct scif_mmu_notif *mmn;
-	struct list_head *item;
 
-	list_for_each(item, &rma->mmn_list) {
-		mmn = list_entry(item, struct scif_mmu_notif, list);
+	list_for_each_entry(mmn, &rma->mmn_list, list)
 		if (mmn->mm == mm)
 			return mmn;
-	}
 	return NULL;
 }
 
@@ -293,13 +285,12 @@
 		 = kzalloc(sizeof(*mmn), GFP_KERNEL);
 
 	if (!mmn)
-		return ERR_PTR(ENOMEM);
+		return ERR_PTR(-ENOMEM);
 
 	scif_init_mmu_notifier(mmn, current->mm, ep);
-	if (mmu_notifier_register(&mmn->ep_mmu_notifier,
-				  current->mm)) {
+	if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
 		kfree(mmn);
-		return ERR_PTR(EBUSY);
+		return ERR_PTR(-EBUSY);
 	}
 	list_add(&mmn->list, &ep->rma_info.mmn_list);
 	return mmn;
@@ -851,7 +842,7 @@
 		(window->nr_pages << PAGE_SHIFT);
 	while (rem_len) {
 		if (offset == end_offset) {
-			window = list_entry_next(window, list);
+			window = list_next_entry(window, list);
 			end_offset = window->offset +
 				(window->nr_pages << PAGE_SHIFT);
 		}
@@ -957,7 +948,7 @@
 	remaining_len -= tail_len;
 	while (remaining_len) {
 		if (offset == end_offset) {
-			window = list_entry_next(window, list);
+			window = list_next_entry(window, list);
 			end_offset = window->offset +
 				(window->nr_pages << PAGE_SHIFT);
 		}
@@ -1064,7 +1055,7 @@
 	}
 	if (tail_len) {
 		if (offset == end_offset) {
-			window = list_entry_next(window, list);
+			window = list_next_entry(window, list);
 			end_offset = window->offset +
 				(window->nr_pages << PAGE_SHIFT);
 		}
@@ -1147,13 +1138,13 @@
 		(dst_window->nr_pages << PAGE_SHIFT);
 	while (remaining_len) {
 		if (src_offset == end_src_offset) {
-			src_window = list_entry_next(src_window, list);
+			src_window = list_next_entry(src_window, list);
 			end_src_offset = src_window->offset +
 				(src_window->nr_pages << PAGE_SHIFT);
 			scif_init_window_iter(src_window, &src_win_iter);
 		}
 		if (dst_offset == end_dst_offset) {
-			dst_window = list_entry_next(dst_window, list);
+			dst_window = list_next_entry(dst_window, list);
 			end_dst_offset = dst_window->offset +
 				(dst_window->nr_pages << PAGE_SHIFT);
 			scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1314,13 +1305,13 @@
 	remaining_len -= tail_len;
 	while (remaining_len) {
 		if (src_offset == end_src_offset) {
-			src_window = list_entry_next(src_window, list);
+			src_window = list_next_entry(src_window, list);
 			end_src_offset = src_window->offset +
 				(src_window->nr_pages << PAGE_SHIFT);
 			scif_init_window_iter(src_window, &src_win_iter);
 		}
 		if (dst_offset == end_dst_offset) {
-			dst_window = list_entry_next(dst_window, list);
+			dst_window = list_next_entry(dst_window, list);
 			end_dst_offset = dst_window->offset +
 				(dst_window->nr_pages << PAGE_SHIFT);
 			scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1405,9 +1396,9 @@
 	if (remaining_len) {
 		loop_len = remaining_len;
 		if (src_offset == end_src_offset)
-			src_window = list_entry_next(src_window, list);
+			src_window = list_next_entry(src_window, list);
 		if (dst_offset == end_dst_offset)
-			dst_window = list_entry_next(dst_window, list);
+			dst_window = list_next_entry(dst_window, list);
 
 		src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
 		dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
@@ -1550,12 +1541,12 @@
 			end_dst_offset = dst_window->offset +
 				(dst_window->nr_pages << PAGE_SHIFT);
 			if (src_offset == end_src_offset) {
-				src_window = list_entry_next(src_window, list);
+				src_window = list_next_entry(src_window, list);
 				scif_init_window_iter(src_window,
 						      &src_win_iter);
 			}
 			if (dst_offset == end_dst_offset) {
-				dst_window = list_entry_next(dst_window, list);
+				dst_window = list_next_entry(dst_window, list);
 				scif_init_window_iter(dst_window,
 						      &dst_win_iter);
 			}
@@ -1730,7 +1721,7 @@
 		mutex_lock(&ep->rma_info.mmn_lock);
 		mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
 		if (!mmn)
-			scif_add_mmu_notifier(current->mm, ep);
+			mmn = scif_add_mmu_notifier(current->mm, ep);
 		mutex_unlock(&ep->rma_info.mmn_lock);
 		if (IS_ERR(mmn)) {
 			scif_put_peer_dev(spdev);
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 8310b4d..6a451bd 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1511,7 +1511,7 @@
 	if ((map_flags & SCIF_MAP_FIXED) &&
 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
 	    (offset < 0) ||
-	    (offset + (off_t)len < offset)))
+	    (len > LONG_MAX - offset)))
 		return -EINVAL;
 
 	might_sleep();
@@ -1614,7 +1614,7 @@
 	if ((map_flags & SCIF_MAP_FIXED) &&
 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
 	    (offset < 0) ||
-	    (offset + (off_t)len < offset)))
+	    (len > LONG_MAX - offset)))
 		return -EINVAL;
 
 	/* Unsupported protection requested */
@@ -1732,7 +1732,8 @@
 
 	/* Offset is not page aligned or offset+len wraps around */
 	if ((ALIGN(offset, PAGE_SIZE) != offset) ||
-	    (offset + (off_t)len < offset))
+	    (offset < 0) ||
+	    (len > LONG_MAX - offset))
 		return -EINVAL;
 
 	err = scif_verify_epd(ep);
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile
new file mode 100644
index 0000000..78819c8
--- /dev/null
+++ b/drivers/misc/mic/vop/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2016, Intel Corporation.
+#
+obj-m := vop.o
+
+vop-objs += vop_main.o
+vop-objs += vop_debugfs.o
+vop-objs += vop_vringh.o
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
new file mode 100644
index 0000000..ab43884
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_debugfs.c
@@ -0,0 +1,232 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "vop_main.h"
+
+static int vop_dp_show(struct seq_file *s, void *pos)
+{
+	struct mic_device_desc *d;
+	struct mic_device_ctrl *dc;
+	struct mic_vqconfig *vqconfig;
+	__u32 *features;
+	__u8 *config;
+	struct vop_info *vi = s->private;
+	struct vop_device *vpdev = vi->vpdev;
+	struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+	int j, k;
+
+	seq_printf(s, "Bootparam: magic 0x%x\n",
+		   bootparam->magic);
+	seq_printf(s, "Bootparam: h2c_config_db %d\n",
+		   bootparam->h2c_config_db);
+	seq_printf(s, "Bootparam: node_id %d\n",
+		   bootparam->node_id);
+	seq_printf(s, "Bootparam: c2h_scif_db %d\n",
+		   bootparam->c2h_scif_db);
+	seq_printf(s, "Bootparam: h2c_scif_db %d\n",
+		   bootparam->h2c_scif_db);
+	seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
+		   bootparam->scif_host_dma_addr);
+	seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
+		   bootparam->scif_card_dma_addr);
+
+	for (j = sizeof(*bootparam);
+		j < MIC_DP_SIZE; j += mic_total_desc_size(d)) {
+		d = (void *)bootparam + j;
+		dc = (void *)d + mic_aligned_desc_size(d);
+
+		/* end of list */
+		if (d->type == 0)
+			break;
+
+		if (d->type == -1)
+			continue;
+
+		seq_printf(s, "Type %d ", d->type);
+		seq_printf(s, "Num VQ %d ", d->num_vq);
+		seq_printf(s, "Feature Len %d\n", d->feature_len);
+		seq_printf(s, "Config Len %d ", d->config_len);
+		seq_printf(s, "Shutdown Status %d\n", d->status);
+
+		for (k = 0; k < d->num_vq; k++) {
+			vqconfig = mic_vq_config(d) + k;
+			seq_printf(s, "vqconfig[%d]: ", k);
+			seq_printf(s, "address 0x%llx ",
+				   vqconfig->address);
+			seq_printf(s, "num %d ", vqconfig->num);
+			seq_printf(s, "used address 0x%llx\n",
+				   vqconfig->used_address);
+		}
+
+		features = (__u32 *)mic_vq_features(d);
+		seq_printf(s, "Features: Host 0x%x ", features[0]);
+		seq_printf(s, "Guest 0x%x\n", features[1]);
+
+		config = mic_vq_configspace(d);
+		for (k = 0; k < d->config_len; k++)
+			seq_printf(s, "config[%d]=%d\n", k, config[k]);
+
+		seq_puts(s, "Device control:\n");
+		seq_printf(s, "Config Change %d ", dc->config_change);
+		seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
+		seq_printf(s, "Guest Ack %d ", dc->guest_ack);
+		seq_printf(s, "Host ack %d\n", dc->host_ack);
+		seq_printf(s, "Used address updated %d ",
+			   dc->used_address_updated);
+		seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
+		seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
+		seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
+	}
+	schedule_work(&vi->hotplug_work);
+	return 0;
+}
+
+static int vop_dp_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, vop_dp_show, inode->i_private);
+}
+
+static int vop_dp_debug_release(struct inode *inode, struct file *file)
+{
+	return single_release(inode, file);
+}
+
+static const struct file_operations dp_ops = {
+	.owner   = THIS_MODULE,
+	.open    = vop_dp_debug_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = vop_dp_debug_release
+};
+
+static int vop_vdev_info_show(struct seq_file *s, void *unused)
+{
+	struct vop_info *vi = s->private;
+	struct list_head *pos, *tmp;
+	struct vop_vdev *vdev;
+	int i, j;
+
+	mutex_lock(&vi->vop_mutex);
+	list_for_each_safe(pos, tmp, &vi->vdev_list) {
+		vdev = list_entry(pos, struct vop_vdev, list);
+		seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n",
+			   vdev->virtio_id,
+			   vop_vdevup(vdev) ? "UP" : "DOWN",
+			   vdev->in_bytes,
+			   vdev->out_bytes,
+			   vdev->in_bytes_dma,
+			   vdev->out_bytes_dma);
+		for (i = 0; i < MIC_MAX_VRINGS; i++) {
+			struct vring_desc *desc;
+			struct vring_avail *avail;
+			struct vring_used *used;
+			struct vop_vringh *vvr = &vdev->vvr[i];
+			struct vringh *vrh = &vvr->vrh;
+			int num = vrh->vring.num;
+
+			if (!num)
+				continue;
+			desc = vrh->vring.desc;
+			seq_printf(s, "vring i %d avail_idx %d",
+				   i, vvr->vring.info->avail_idx & (num - 1));
+			seq_printf(s, " vring i %d avail_idx %d\n",
+				   i, vvr->vring.info->avail_idx);
+			seq_printf(s, "vrh i %d weak_barriers %d",
+				   i, vrh->weak_barriers);
+			seq_printf(s, " last_avail_idx %d last_used_idx %d",
+				   vrh->last_avail_idx, vrh->last_used_idx);
+			seq_printf(s, " completed %d\n", vrh->completed);
+			for (j = 0; j < num; j++) {
+				seq_printf(s, "desc[%d] addr 0x%llx len %d",
+					   j, desc->addr, desc->len);
+				seq_printf(s, " flags 0x%x next %d\n",
+					   desc->flags, desc->next);
+				desc++;
+			}
+			avail = vrh->vring.avail;
+			seq_printf(s, "avail flags 0x%x idx %d\n",
+				   vringh16_to_cpu(vrh, avail->flags),
+				   vringh16_to_cpu(vrh,
+						   avail->idx) & (num - 1));
+			seq_printf(s, "avail flags 0x%x idx %d\n",
+				   vringh16_to_cpu(vrh, avail->flags),
+				   vringh16_to_cpu(vrh, avail->idx));
+			for (j = 0; j < num; j++)
+				seq_printf(s, "avail ring[%d] %d\n",
+					   j, avail->ring[j]);
+			used = vrh->vring.used;
+			seq_printf(s, "used flags 0x%x idx %d\n",
+				   vringh16_to_cpu(vrh, used->flags),
+				   vringh16_to_cpu(vrh, used->idx) & (num - 1));
+			seq_printf(s, "used flags 0x%x idx %d\n",
+				   vringh16_to_cpu(vrh, used->flags),
+				   vringh16_to_cpu(vrh, used->idx));
+			for (j = 0; j < num; j++)
+				seq_printf(s, "used ring[%d] id %d len %d\n",
+					   j, vringh32_to_cpu(vrh,
+							      used->ring[j].id),
+					   vringh32_to_cpu(vrh,
+							   used->ring[j].len));
+		}
+	}
+	mutex_unlock(&vi->vop_mutex);
+
+	return 0;
+}
+
+static int vop_vdev_info_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, vop_vdev_info_show, inode->i_private);
+}
+
+static int vop_vdev_info_debug_release(struct inode *inode, struct file *file)
+{
+	return single_release(inode, file);
+}
+
+static const struct file_operations vdev_info_ops = {
+	.owner   = THIS_MODULE,
+	.open    = vop_vdev_info_debug_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = vop_vdev_info_debug_release
+};
+
+void vop_init_debugfs(struct vop_info *vi)
+{
+	char name[16];
+
+	snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode);
+	vi->dbg = debugfs_create_dir(name, NULL);
+	if (!vi->dbg) {
+		pr_err("can't create debugfs dir vop\n");
+		return;
+	}
+	debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops);
+	debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops);
+}
+
+void vop_exit_debugfs(struct vop_info *vi)
+{
+	debugfs_remove_recursive(vi->dbg);
+}
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
new file mode 100644
index 0000000..1a2b67f3
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -0,0 +1,755 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Adapted from:
+ *
+ * virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_main.h"
+
+#define VOP_MAX_VRINGS 4
+
+/*
+ * _vop_vdev - Allocated per virtio device instance injected by the peer.
+ *
+ * @vdev: Virtio device
+ * @desc: Virtio device page descriptor
+ * @dc: Virtio device control
+ * @vpdev: VOP device which is the parent for this virtio device
+ * @vr: Buffer for accessing the VRING
+ * @used: Buffer for used
+ * @used_size: Size of the used buffer
+ * @reset_done: Track whether VOP reset is complete
+ * @virtio_cookie: Cookie returned upon requesting a interrupt
+ * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
+ * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
+ * @dnode: The destination node
+ */
+struct _vop_vdev {
+	struct virtio_device vdev;
+	struct mic_device_desc __iomem *desc;
+	struct mic_device_ctrl __iomem *dc;
+	struct vop_device *vpdev;
+	void __iomem *vr[VOP_MAX_VRINGS];
+	dma_addr_t used[VOP_MAX_VRINGS];
+	int used_size[VOP_MAX_VRINGS];
+	struct completion reset_done;
+	struct mic_irq *virtio_cookie;
+	int c2h_vdev_db;
+	int h2c_vdev_db;
+	int dnode;
+};
+
+#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
+
+#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
+
+/* Helper API to obtain the parent of the virtio device */
+static inline struct device *_vop_dev(struct _vop_vdev *vdev)
+{
+	return vdev->vdev.dev.parent;
+}
+
+static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
+{
+	return sizeof(*desc)
+		+ ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
+		+ ioread8(&desc->feature_len) * 2
+		+ ioread8(&desc->config_len);
+}
+
+static inline struct mic_vqconfig __iomem *
+_vop_vq_config(struct mic_device_desc __iomem *desc)
+{
+	return (struct mic_vqconfig __iomem *)(desc + 1);
+}
+
+static inline u8 __iomem *
+_vop_vq_features(struct mic_device_desc __iomem *desc)
+{
+	return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
+}
+
+static inline u8 __iomem *
+_vop_vq_configspace(struct mic_device_desc __iomem *desc)
+{
+	return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
+}
+
+static inline unsigned
+_vop_total_desc_size(struct mic_device_desc __iomem *desc)
+{
+	return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
+}
+
+/* This gets the device's feature bits. */
+static u64 vop_get_features(struct virtio_device *vdev)
+{
+	unsigned int i, bits;
+	u32 features = 0;
+	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+	u8 __iomem *in_features = _vop_vq_features(desc);
+	int feature_len = ioread8(&desc->feature_len);
+
+	bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
+	for (i = 0; i < bits; i++)
+		if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
+			features |= BIT(i);
+
+	return features;
+}
+
+static int vop_finalize_features(struct virtio_device *vdev)
+{
+	unsigned int i, bits;
+	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+	u8 feature_len = ioread8(&desc->feature_len);
+	/* Second half of bitmap is features we accept. */
+	u8 __iomem *out_features =
+		_vop_vq_features(desc) + feature_len;
+
+	/* Give virtio_ring a chance to accept features. */
+	vring_transport_features(vdev);
+
+	memset_io(out_features, 0, feature_len);
+	bits = min_t(unsigned, feature_len,
+		     sizeof(vdev->features)) * 8;
+	for (i = 0; i < bits; i++) {
+		if (__virtio_test_bit(vdev, i))
+			iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
+				 &out_features[i / 8]);
+	}
+	return 0;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void vop_get(struct virtio_device *vdev, unsigned int offset,
+		    void *buf, unsigned len)
+{
+	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+	if (offset + len > ioread8(&desc->config_len))
+		return;
+	memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
+}
+
+static void vop_set(struct virtio_device *vdev, unsigned int offset,
+		    const void *buf, unsigned len)
+{
+	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+	if (offset + len > ioread8(&desc->config_len))
+		return;
+	memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access the status
+ * field of the device descriptor. set_status also interrupts the host
+ * to tell about status changes.
+ */
+static u8 vop_get_status(struct virtio_device *vdev)
+{
+	return ioread8(&to_vopvdev(vdev)->desc->status);
+}
+
+static void vop_set_status(struct virtio_device *dev, u8 status)
+{
+	struct _vop_vdev *vdev = to_vopvdev(dev);
+	struct vop_device *vpdev = vdev->vpdev;
+
+	if (!status)
+		return;
+	iowrite8(status, &vdev->desc->status);
+	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+}
+
+/* Inform host on a virtio device reset and wait for ack from host */
+static void vop_reset_inform_host(struct virtio_device *dev)
+{
+	struct _vop_vdev *vdev = to_vopvdev(dev);
+	struct mic_device_ctrl __iomem *dc = vdev->dc;
+	struct vop_device *vpdev = vdev->vpdev;
+	int retry;
+
+	iowrite8(0, &dc->host_ack);
+	iowrite8(1, &dc->vdev_reset);
+	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+
+	/* Wait till host completes all card accesses and acks the reset */
+	for (retry = 100; retry--;) {
+		if (ioread8(&dc->host_ack))
+			break;
+		msleep(100);
+	};
+
+	dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+
+	/* Reset status to 0 in case we timed out */
+	iowrite8(0, &vdev->desc->status);
+}
+
+static void vop_reset(struct virtio_device *dev)
+{
+	struct _vop_vdev *vdev = to_vopvdev(dev);
+
+	dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
+		__func__, dev->id.device);
+
+	vop_reset_inform_host(dev);
+	complete_all(&vdev->reset_done);
+}
+
+/*
+ * The virtio_ring code calls this API when it wants to notify the Host.
+ */
+static bool vop_notify(struct virtqueue *vq)
+{
+	struct _vop_vdev *vdev = vq->priv;
+	struct vop_device *vpdev = vdev->vpdev;
+
+	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+	return true;
+}
+
+static void vop_del_vq(struct virtqueue *vq, int n)
+{
+	struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
+	struct vring *vr = (struct vring *)(vq + 1);
+	struct vop_device *vpdev = vdev->vpdev;
+
+	dma_unmap_single(&vpdev->dev, vdev->used[n],
+			 vdev->used_size[n], DMA_BIDIRECTIONAL);
+	free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
+	vring_del_virtqueue(vq);
+	vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
+	vdev->vr[n] = NULL;
+}
+
+static void vop_del_vqs(struct virtio_device *dev)
+{
+	struct _vop_vdev *vdev = to_vopvdev(dev);
+	struct virtqueue *vq, *n;
+	int idx = 0;
+
+	dev_dbg(_vop_dev(vdev), "%s\n", __func__);
+
+	list_for_each_entry_safe(vq, n, &dev->vqs, list)
+		vop_del_vq(vq, idx++);
+}
+
+/*
+ * This routine will assign vring's allocated in host/io memory. Code in
+ * virtio_ring.c however continues to access this io memory as if it were local
+ * memory without io accessors.
+ */
+static struct virtqueue *vop_find_vq(struct virtio_device *dev,
+				     unsigned index,
+				     void (*callback)(struct virtqueue *vq),
+				     const char *name)
+{
+	struct _vop_vdev *vdev = to_vopvdev(dev);
+	struct vop_device *vpdev = vdev->vpdev;
+	struct mic_vqconfig __iomem *vqconfig;
+	struct mic_vqconfig config;
+	struct virtqueue *vq;
+	void __iomem *va;
+	struct _mic_vring_info __iomem *info;
+	void *used;
+	int vr_size, _vr_size, err, magic;
+	struct vring *vr;
+	u8 type = ioread8(&vdev->desc->type);
+
+	if (index >= ioread8(&vdev->desc->num_vq))
+		return ERR_PTR(-ENOENT);
+
+	if (!name)
+		return ERR_PTR(-ENOENT);
+
+	/* First assign the vring's allocated in host memory */
+	vqconfig = _vop_vq_config(vdev->desc) + index;
+	memcpy_fromio(&config, vqconfig, sizeof(config));
+	_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
+	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+	va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
+			vr_size);
+	if (!va)
+		return ERR_PTR(-ENOMEM);
+	vdev->vr[index] = va;
+	memset_io(va, 0x0, _vr_size);
+	vq = vring_new_virtqueue(
+				index,
+				le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
+				dev,
+				false,
+				(void __force *)va, vop_notify, callback, name);
+	if (!vq) {
+		err = -ENOMEM;
+		goto unmap;
+	}
+	info = va + _vr_size;
+	magic = ioread32(&info->magic);
+
+	if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
+		err = -EIO;
+		goto unmap;
+	}
+
+	/* Allocate and reassign used ring now */
+	vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
+					     sizeof(struct vring_used_elem) *
+					     le16_to_cpu(config.num));
+	used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					get_order(vdev->used_size[index]));
+	if (!used) {
+		err = -ENOMEM;
+		dev_err(_vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, err);
+		goto del_vq;
+	}
+	vdev->used[index] = dma_map_single(&vpdev->dev, used,
+					    vdev->used_size[index],
+					    DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
+		err = -ENOMEM;
+		dev_err(_vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, err);
+		goto free_used;
+	}
+	writeq(vdev->used[index], &vqconfig->used_address);
+	/*
+	 * To reassign the used ring here we are directly accessing
+	 * struct vring_virtqueue which is a private data structure
+	 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
+	 * vring_new_virtqueue() would ensure that
+	 *  (&vq->vring == (struct vring *) (&vq->vq + 1));
+	 */
+	vr = (struct vring *)(vq + 1);
+	vr->used = used;
+
+	vq->priv = vdev;
+	return vq;
+free_used:
+	free_pages((unsigned long)used,
+		   get_order(vdev->used_size[index]));
+del_vq:
+	vring_del_virtqueue(vq);
+unmap:
+	vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
+	return ERR_PTR(err);
+}
+
+static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
+			struct virtqueue *vqs[],
+			vq_callback_t *callbacks[],
+			const char * const names[])
+{
+	struct _vop_vdev *vdev = to_vopvdev(dev);
+	struct vop_device *vpdev = vdev->vpdev;
+	struct mic_device_ctrl __iomem *dc = vdev->dc;
+	int i, err, retry;
+
+	/* We must have this many virtqueues. */
+	if (nvqs > ioread8(&vdev->desc->num_vq))
+		return -ENOENT;
+
+	for (i = 0; i < nvqs; ++i) {
+		dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
+			__func__, i, names[i]);
+		vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]);
+		if (IS_ERR(vqs[i])) {
+			err = PTR_ERR(vqs[i]);
+			goto error;
+		}
+	}
+
+	iowrite8(1, &dc->used_address_updated);
+	/*
+	 * Send an interrupt to the host to inform it that used
+	 * rings have been re-assigned.
+	 */
+	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+	for (retry = 100; --retry;) {
+		if (!ioread8(&dc->used_address_updated))
+			break;
+		msleep(100);
+	};
+
+	dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+	if (!retry) {
+		err = -ENODEV;
+		goto error;
+	}
+
+	return 0;
+error:
+	vop_del_vqs(dev);
+	return err;
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops vop_vq_config_ops = {
+	.get_features = vop_get_features,
+	.finalize_features = vop_finalize_features,
+	.get = vop_get,
+	.set = vop_set,
+	.get_status = vop_get_status,
+	.set_status = vop_set_status,
+	.reset = vop_reset,
+	.find_vqs = vop_find_vqs,
+	.del_vqs = vop_del_vqs,
+};
+
+static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
+{
+	struct _vop_vdev *vdev = data;
+	struct vop_device *vpdev = vdev->vpdev;
+	struct virtqueue *vq;
+
+	vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
+	list_for_each_entry(vq, &vdev->vdev.vqs, list)
+		vring_interrupt(0, vq);
+
+	return IRQ_HANDLED;
+}
+
+static void vop_virtio_release_dev(struct device *_d)
+{
+	/*
+	 * No need for a release method similar to virtio PCI.
+	 * Provide an empty one to avoid getting a warning from core.
+	 */
+}
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static int _vop_add_device(struct mic_device_desc __iomem *d,
+			   unsigned int offset, struct vop_device *vpdev,
+			   int dnode)
+{
+	struct _vop_vdev *vdev;
+	int ret;
+	u8 type = ioread8(&d->type);
+
+	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+	if (!vdev)
+		return -ENOMEM;
+
+	vdev->vpdev = vpdev;
+	vdev->vdev.dev.parent = &vpdev->dev;
+	vdev->vdev.dev.release = vop_virtio_release_dev;
+	vdev->vdev.id.device = type;
+	vdev->vdev.config = &vop_vq_config_ops;
+	vdev->desc = d;
+	vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+	vdev->dnode = dnode;
+	vdev->vdev.priv = (void *)(u64)dnode;
+	init_completion(&vdev->reset_done);
+
+	vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
+	vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+			vop_virtio_intr_handler, "virtio intr",
+			vdev, vdev->h2c_vdev_db);
+	if (IS_ERR(vdev->virtio_cookie)) {
+		ret = PTR_ERR(vdev->virtio_cookie);
+		goto kfree;
+	}
+	iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
+	vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
+
+	ret = register_virtio_device(&vdev->vdev);
+	if (ret) {
+		dev_err(_vop_dev(vdev),
+			"Failed to register vop device %u type %u\n",
+			offset, type);
+		goto free_irq;
+	}
+	writeq((u64)vdev, &vdev->dc->vdev);
+	dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
+		__func__, offset, type, vdev);
+
+	return 0;
+
+free_irq:
+	vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+kfree:
+	kfree(vdev);
+	return ret;
+}
+
+/*
+ * match for a vop device with a specific desc pointer
+ */
+static int vop_match_desc(struct device *dev, void *data)
+{
+	struct virtio_device *_dev = dev_to_virtio(dev);
+	struct _vop_vdev *vdev = to_vopvdev(_dev);
+
+	return vdev->desc == (void __iomem *)data;
+}
+
+static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
+				      unsigned int offset,
+				      struct vop_device *vpdev)
+{
+	struct mic_device_ctrl __iomem *dc
+		= (void __iomem *)d + _vop_aligned_desc_size(d);
+	struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+
+	if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
+		return;
+
+	dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
+	virtio_config_changed(&vdev->vdev);
+	iowrite8(1, &dc->guest_ack);
+}
+
+/*
+ * removes a virtio device if a hot remove event has been
+ * requested by the host.
+ */
+static int _vop_remove_device(struct mic_device_desc __iomem *d,
+			      unsigned int offset, struct vop_device *vpdev)
+{
+	struct mic_device_ctrl __iomem *dc
+		= (void __iomem *)d + _vop_aligned_desc_size(d);
+	struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+	u8 status;
+	int ret = -1;
+
+	if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+		dev_dbg(&vpdev->dev,
+			"%s %d config_change %d type %d vdev %p\n",
+			__func__, __LINE__,
+			ioread8(&dc->config_change), ioread8(&d->type), vdev);
+		status = ioread8(&d->status);
+		reinit_completion(&vdev->reset_done);
+		unregister_virtio_device(&vdev->vdev);
+		vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+		iowrite8(-1, &dc->h2c_vdev_db);
+		if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+			wait_for_completion(&vdev->reset_done);
+		kfree(vdev);
+		iowrite8(1, &dc->guest_ack);
+		dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
+			__func__, __LINE__, ioread8(&dc->guest_ack));
+		iowrite8(-1, &d->type);
+		ret = 0;
+	}
+	return ret;
+}
+
+#define REMOVE_DEVICES true
+
+static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
+			      bool remove, int dnode)
+{
+	s8 type;
+	unsigned int i;
+	struct mic_device_desc __iomem *d;
+	struct mic_device_ctrl __iomem *dc;
+	struct device *dev;
+	int ret;
+
+	for (i = sizeof(struct mic_bootparam);
+			i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
+		d = dp + i;
+		dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+		/*
+		 * This read barrier is paired with the corresponding write
+		 * barrier on the host which is inserted before adding or
+		 * removing a virtio device descriptor, by updating the type.
+		 */
+		rmb();
+		type = ioread8(&d->type);
+
+		/* end of list */
+		if (type == 0)
+			break;
+
+		if (type == -1)
+			continue;
+
+		/* device already exists */
+		dev = device_find_child(&vpdev->dev, (void __force *)d,
+					vop_match_desc);
+		if (dev) {
+			if (remove)
+				iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
+					 &dc->config_change);
+			put_device(dev);
+			_vop_handle_config_change(d, i, vpdev);
+			ret = _vop_remove_device(d, i, vpdev);
+			if (remove) {
+				iowrite8(0, &dc->config_change);
+				iowrite8(0, &dc->guest_ack);
+			}
+			continue;
+		}
+
+		/* new device */
+		dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
+			__func__, __LINE__, d);
+		if (!remove)
+			_vop_add_device(d, i, vpdev, dnode);
+	}
+}
+
+static void vop_scan_devices(struct vop_info *vi,
+			     struct vop_device *vpdev, bool remove)
+{
+	void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
+
+	if (!dp)
+		return;
+	mutex_lock(&vi->vop_mutex);
+	_vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
+	mutex_unlock(&vi->vop_mutex);
+}
+
+/*
+ * vop_hotplug_device tries to find changes in the device page.
+ */
+static void vop_hotplug_devices(struct work_struct *work)
+{
+	struct vop_info *vi = container_of(work, struct vop_info,
+					     hotplug_work);
+
+	vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
+}
+
+/*
+ * Interrupt handler for hot plug/config changes etc.
+ */
+static irqreturn_t vop_extint_handler(int irq, void *data)
+{
+	struct vop_info *vi = data;
+	struct mic_bootparam __iomem *bp;
+	struct vop_device *vpdev = vi->vpdev;
+
+	bp = vpdev->hw_ops->get_remote_dp(vpdev);
+	dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
+		__func__, __LINE__);
+	vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
+	schedule_work(&vi->hotplug_work);
+	return IRQ_HANDLED;
+}
+
+static int vop_driver_probe(struct vop_device *vpdev)
+{
+	struct vop_info *vi;
+	int rc;
+
+	vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+	if (!vi) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+	dev_set_drvdata(&vpdev->dev, vi);
+	vi->vpdev = vpdev;
+
+	mutex_init(&vi->vop_mutex);
+	INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
+	if (vpdev->dnode) {
+		rc = vop_host_init(vi);
+		if (rc < 0)
+			goto free;
+	} else {
+		struct mic_bootparam __iomem *bootparam;
+
+		vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
+
+		vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
+		vi->cookie = vpdev->hw_ops->request_irq(vpdev,
+							vop_extint_handler,
+							"virtio_config_intr",
+							vi, vi->h2c_config_db);
+		if (IS_ERR(vi->cookie)) {
+			rc = PTR_ERR(vi->cookie);
+			goto free;
+		}
+		bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
+		iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
+	}
+	vop_init_debugfs(vi);
+	return 0;
+free:
+	kfree(vi);
+exit:
+	return rc;
+}
+
+static void vop_driver_remove(struct vop_device *vpdev)
+{
+	struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+
+	if (vpdev->dnode) {
+		vop_host_uninit(vi);
+	} else {
+		struct mic_bootparam __iomem *bootparam =
+			vpdev->hw_ops->get_remote_dp(vpdev);
+		if (bootparam)
+			iowrite8(-1, &bootparam->h2c_config_db);
+		vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
+		flush_work(&vi->hotplug_work);
+		vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
+	}
+	vop_exit_debugfs(vi);
+	kfree(vi);
+}
+
+static struct vop_device_id id_table[] = {
+	{ VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct vop_driver vop_driver = {
+	.driver.name =	KBUILD_MODNAME,
+	.driver.owner =	THIS_MODULE,
+	.id_table = id_table,
+	.probe = vop_driver_probe,
+	.remove = vop_driver_remove,
+};
+
+module_vop_driver(vop_driver);
+
+MODULE_DEVICE_TABLE(mbus, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/vop/vop_main.h b/drivers/misc/mic/vop/vop_main.h
new file mode 100644
index 0000000..ba47ec7
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_main.h
@@ -0,0 +1,170 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#ifndef _VOP_MAIN_H_
+#define _VOP_MAIN_H_
+
+#include <linux/vringh.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include "../bus/vop_bus.h"
+
+/*
+ * Note on endianness.
+ * 1. Host can be both BE or LE
+ * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
+ *    rings and ioreadXX/iowriteXX to access used ring.
+ * 3. Device page exposed by host to guest contains LE values. Guest
+ *    accesses these using ioreadXX/iowriteXX etc. This way in general we
+ *    obey the virtio spec according to which guest works with native
+ *    endianness and host is aware of guest endianness and does all
+ *    required endianness conversion.
+ * 4. Data provided from user space to guest (in ADD_DEVICE and
+ *    CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
+ *    in guest endianness.
+ */
+
+/*
+ * vop_info - Allocated per invocation of VOP probe
+ *
+ * @vpdev: VOP device
+ * @hotplug_work: Handle virtio device creation, deletion and configuration
+ * @cookie: Cookie received upon requesting a virtio configuration interrupt
+ * @h2c_config_db: The doorbell used by the peer to indicate a config change
+ * @vdev_list: List of "active" virtio devices injected in the peer node
+ * @vop_mutex: Synchronize access to the device page as well as serialize
+ *             creation/deletion of virtio devices on the peer node
+ * @dp: Peer device page information
+ * @dbg: Debugfs entry
+ * @dma_ch: The DMA channel used by this transport for data transfers.
+ * @name: Name for this transport used in misc device creation.
+ * @miscdev: The misc device registered.
+ */
+struct vop_info {
+	struct vop_device *vpdev;
+	struct work_struct hotplug_work;
+	struct mic_irq *cookie;
+	int h2c_config_db;
+	struct list_head vdev_list;
+	struct mutex vop_mutex;
+	void __iomem *dp;
+	struct dentry *dbg;
+	struct dma_chan *dma_ch;
+	char name[16];
+	struct miscdevice miscdev;
+};
+
+/**
+ * struct vop_vringh - Virtio ring host information.
+ *
+ * @vring: The VOP vring used for setting up user space mappings.
+ * @vrh: The host VRINGH used for accessing the card vrings.
+ * @riov: The VRINGH read kernel IOV.
+ * @wiov: The VRINGH write kernel IOV.
+ * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
+ * @vr_mutex: Mutex for synchronizing access to the VRING.
+ * @buf: Temporary kernel buffer used to copy in/out data
+ * from/to the card via DMA.
+ * @buf_da: dma address of buf.
+ * @vdev: Back pointer to VOP virtio device for vringh_notify(..).
+ */
+struct vop_vringh {
+	struct mic_vring vring;
+	struct vringh vrh;
+	struct vringh_kiov riov;
+	struct vringh_kiov wiov;
+	u16 head;
+	struct mutex vr_mutex;
+	void *buf;
+	dma_addr_t buf_da;
+	struct vop_vdev *vdev;
+};
+
+/**
+ * struct vop_vdev - Host information for a card Virtio device.
+ *
+ * @virtio_id - Virtio device id.
+ * @waitq - Waitqueue to allow ring3 apps to poll.
+ * @vpdev - pointer to VOP bus device.
+ * @poll_wake - Used for waking up threads blocked in poll.
+ * @out_bytes - Debug stats for number of bytes copied from host to card.
+ * @in_bytes - Debug stats for number of bytes copied from card to host.
+ * @out_bytes_dma - Debug stats for number of bytes copied from host to card
+ * using DMA.
+ * @in_bytes_dma - Debug stats for number of bytes copied from card to host
+ * using DMA.
+ * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
+ * the transfer length did not have the required DMA alignment.
+ * @tx_dst_unaligned - Debug stats for number of bytes copied where the
+ * destination address on the card did not have the required DMA alignment.
+ * @vvr - Store per VRING data structures.
+ * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
+ * @dd - Virtio device descriptor.
+ * @dc - Virtio device control fields.
+ * @list - List of Virtio devices.
+ * @virtio_db - The doorbell used by the card to interrupt the host.
+ * @virtio_cookie - The cookie returned while requesting interrupts.
+ * @vi: Transport information.
+ * @vdev_mutex: Mutex synchronizing virtio device injection,
+ *              removal and data transfers.
+ * @destroy: Track if a virtio device is being destroyed.
+ * @deleted: The virtio device has been deleted.
+ */
+struct vop_vdev {
+	int virtio_id;
+	wait_queue_head_t waitq;
+	struct vop_device *vpdev;
+	int poll_wake;
+	unsigned long out_bytes;
+	unsigned long in_bytes;
+	unsigned long out_bytes_dma;
+	unsigned long in_bytes_dma;
+	unsigned long tx_len_unaligned;
+	unsigned long tx_dst_unaligned;
+	unsigned long rx_dst_unaligned;
+	struct vop_vringh vvr[MIC_MAX_VRINGS];
+	struct work_struct virtio_bh_work;
+	struct mic_device_desc *dd;
+	struct mic_device_ctrl *dc;
+	struct list_head list;
+	int virtio_db;
+	struct mic_irq *virtio_cookie;
+	struct vop_info *vi;
+	struct mutex vdev_mutex;
+	struct completion destroy;
+	bool deleted;
+};
+
+/* Helper API to check if a virtio device is running */
+static inline bool vop_vdevup(struct vop_vdev *vdev)
+{
+	return !!vdev->dd->status;
+}
+
+void vop_init_debugfs(struct vop_info *vi);
+void vop_exit_debugfs(struct vop_info *vi);
+int vop_host_init(struct vop_info *vi);
+void vop_host_uninit(struct vop_info *vi);
+#endif
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
new file mode 100644
index 0000000..e94c7fb
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -0,0 +1,1165 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include <linux/mic_ioctl.h>
+#include "vop_main.h"
+
+/* Helper API to obtain the VOP PCIe device */
+static inline struct device *vop_dev(struct vop_vdev *vdev)
+{
+	return vdev->vpdev->dev.parent;
+}
+
+/* Helper API to check if a virtio device is initialized */
+static inline int vop_vdev_inited(struct vop_vdev *vdev)
+{
+	if (!vdev)
+		return -EINVAL;
+	/* Device has not been created yet */
+	if (!vdev->dd || !vdev->dd->type) {
+		dev_err(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, -EINVAL);
+		return -EINVAL;
+	}
+	/* Device has been removed/deleted */
+	if (vdev->dd->type == -1) {
+		dev_dbg(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, -ENODEV);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void _vop_notify(struct vringh *vrh)
+{
+	struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh);
+	struct vop_vdev *vdev = vvrh->vdev;
+	struct vop_device *vpdev = vdev->vpdev;
+	s8 db = vdev->dc->h2c_vdev_db;
+
+	if (db != -1)
+		vpdev->hw_ops->send_intr(vpdev, db);
+}
+
+static void vop_virtio_init_post(struct vop_vdev *vdev)
+{
+	struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
+	struct vop_device *vpdev = vdev->vpdev;
+	int i, used_size;
+
+	for (i = 0; i < vdev->dd->num_vq; i++) {
+		used_size = PAGE_ALIGN(sizeof(u16) * 3 +
+				sizeof(struct vring_used_elem) *
+				le16_to_cpu(vqconfig->num));
+		if (!le64_to_cpu(vqconfig[i].used_address)) {
+			dev_warn(vop_dev(vdev), "used_address zero??\n");
+			continue;
+		}
+		vdev->vvr[i].vrh.vring.used =
+			(void __force *)vpdev->hw_ops->ioremap(
+			vpdev,
+			le64_to_cpu(vqconfig[i].used_address),
+			used_size);
+	}
+
+	vdev->dc->used_address_updated = 0;
+
+	dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
+		 __func__, vdev->virtio_id);
+}
+
+static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
+{
+	int i;
+
+	dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n",
+		__func__, vdev->dd->status, vdev->virtio_id);
+
+	for (i = 0; i < vdev->dd->num_vq; i++)
+		/*
+		 * Avoid lockdep false positive. The + 1 is for the vop
+		 * mutex which is held in the reset devices code path.
+		 */
+		mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+	/* 0 status means "reset" */
+	vdev->dd->status = 0;
+	vdev->dc->vdev_reset = 0;
+	vdev->dc->host_ack = 1;
+
+	for (i = 0; i < vdev->dd->num_vq; i++) {
+		struct vringh *vrh = &vdev->vvr[i].vrh;
+
+		vdev->vvr[i].vring.info->avail_idx = 0;
+		vrh->completed = 0;
+		vrh->last_avail_idx = 0;
+		vrh->last_used_idx = 0;
+	}
+
+	for (i = 0; i < vdev->dd->num_vq; i++)
+		mutex_unlock(&vdev->vvr[i].vr_mutex);
+}
+
+static void vop_virtio_reset_devices(struct vop_info *vi)
+{
+	struct list_head *pos, *tmp;
+	struct vop_vdev *vdev;
+
+	list_for_each_safe(pos, tmp, &vi->vdev_list) {
+		vdev = list_entry(pos, struct vop_vdev, list);
+		vop_virtio_device_reset(vdev);
+		vdev->poll_wake = 1;
+		wake_up(&vdev->waitq);
+	}
+}
+
+static void vop_bh_handler(struct work_struct *work)
+{
+	struct vop_vdev *vdev = container_of(work, struct vop_vdev,
+			virtio_bh_work);
+
+	if (vdev->dc->used_address_updated)
+		vop_virtio_init_post(vdev);
+
+	if (vdev->dc->vdev_reset)
+		vop_virtio_device_reset(vdev);
+
+	vdev->poll_wake = 1;
+	wake_up(&vdev->waitq);
+}
+
+static irqreturn_t _vop_virtio_intr_handler(int irq, void *data)
+{
+	struct vop_vdev *vdev = data;
+	struct vop_device *vpdev = vdev->vpdev;
+
+	vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db);
+	schedule_work(&vdev->virtio_bh_work);
+	return IRQ_HANDLED;
+}
+
+static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+	int ret = 0, retry, i;
+	struct vop_device *vpdev = vdev->vpdev;
+	struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+	struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+	s8 db = bootparam->h2c_config_db;
+
+	mutex_lock(&vi->vop_mutex);
+	for (i = 0; i < vdev->dd->num_vq; i++)
+		mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+	if (db == -1 || vdev->dd->type == -1) {
+		ret = -EIO;
+		goto exit;
+	}
+
+	memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
+	vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
+	vpdev->hw_ops->send_intr(vpdev, db);
+
+	for (retry = 100; retry--;) {
+		ret = wait_event_timeout(wake, vdev->dc->guest_ack,
+					 msecs_to_jiffies(100));
+		if (ret)
+			break;
+	}
+
+	dev_dbg(vop_dev(vdev),
+		"%s %d retry: %d\n", __func__, __LINE__, retry);
+	vdev->dc->config_change = 0;
+	vdev->dc->guest_ack = 0;
+exit:
+	for (i = 0; i < vdev->dd->num_vq; i++)
+		mutex_unlock(&vdev->vvr[i].vr_mutex);
+	mutex_unlock(&vi->vop_mutex);
+	return ret;
+}
+
+static int vop_copy_dp_entry(struct vop_vdev *vdev,
+			     struct mic_device_desc *argp, __u8 *type,
+			     struct mic_device_desc **devpage)
+{
+	struct vop_device *vpdev = vdev->vpdev;
+	struct mic_device_desc *devp;
+	struct mic_vqconfig *vqconfig;
+	int ret = 0, i;
+	bool slot_found = false;
+
+	vqconfig = mic_vq_config(argp);
+	for (i = 0; i < argp->num_vq; i++) {
+		if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
+			ret =  -EINVAL;
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			goto exit;
+		}
+	}
+
+	/* Find the first free device page entry */
+	for (i = sizeof(struct mic_bootparam);
+		i < MIC_DP_SIZE - mic_total_desc_size(argp);
+		i += mic_total_desc_size(devp)) {
+		devp = vpdev->hw_ops->get_dp(vpdev) + i;
+		if (devp->type == 0 || devp->type == -1) {
+			slot_found = true;
+			break;
+		}
+	}
+	if (!slot_found) {
+		ret =  -EINVAL;
+		dev_err(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, ret);
+		goto exit;
+	}
+	/*
+	 * Save off the type before doing the memcpy. Type will be set in the
+	 * end after completing all initialization for the new device.
+	 */
+	*type = argp->type;
+	argp->type = 0;
+	memcpy(devp, argp, mic_desc_size(argp));
+
+	*devpage = devp;
+exit:
+	return ret;
+}
+
+static void vop_init_device_ctrl(struct vop_vdev *vdev,
+				 struct mic_device_desc *devpage)
+{
+	struct mic_device_ctrl *dc;
+
+	dc = (void *)devpage + mic_aligned_desc_size(devpage);
+
+	dc->config_change = 0;
+	dc->guest_ack = 0;
+	dc->vdev_reset = 0;
+	dc->host_ack = 0;
+	dc->used_address_updated = 0;
+	dc->c2h_vdev_db = -1;
+	dc->h2c_vdev_db = -1;
+	vdev->dc = dc;
+}
+
+static int vop_virtio_add_device(struct vop_vdev *vdev,
+				 struct mic_device_desc *argp)
+{
+	struct vop_info *vi = vdev->vi;
+	struct vop_device *vpdev = vi->vpdev;
+	struct mic_device_desc *dd = NULL;
+	struct mic_vqconfig *vqconfig;
+	int vr_size, i, j, ret;
+	u8 type = 0;
+	s8 db = -1;
+	char irqname[16];
+	struct mic_bootparam *bootparam;
+	u16 num;
+	dma_addr_t vr_addr;
+
+	bootparam = vpdev->hw_ops->get_dp(vpdev);
+	init_waitqueue_head(&vdev->waitq);
+	INIT_LIST_HEAD(&vdev->list);
+	vdev->vpdev = vpdev;
+
+	ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
+	if (ret) {
+		dev_err(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, ret);
+		kfree(vdev);
+		return ret;
+	}
+
+	vop_init_device_ctrl(vdev, dd);
+
+	vdev->dd = dd;
+	vdev->virtio_id = type;
+	vqconfig = mic_vq_config(dd);
+	INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
+
+	for (i = 0; i < dd->num_vq; i++) {
+		struct vop_vringh *vvr = &vdev->vvr[i];
+		struct mic_vring *vr = &vdev->vvr[i].vring;
+
+		num = le16_to_cpu(vqconfig[i].num);
+		mutex_init(&vvr->vr_mutex);
+		vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
+			sizeof(struct _mic_vring_info));
+		vr->va = (void *)
+			__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					 get_order(vr_size));
+		if (!vr->va) {
+			ret = -ENOMEM;
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			goto err;
+		}
+		vr->len = vr_size;
+		vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
+		vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
+		vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
+					 DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(&vpdev->dev, vr_addr)) {
+			free_pages((unsigned long)vr->va, get_order(vr_size));
+			ret = -ENOMEM;
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			goto err;
+		}
+		vqconfig[i].address = cpu_to_le64(vr_addr);
+
+		vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
+		ret = vringh_init_kern(&vvr->vrh,
+				       *(u32 *)mic_vq_features(vdev->dd),
+				       num, false, vr->vr.desc, vr->vr.avail,
+				       vr->vr.used);
+		if (ret) {
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			goto err;
+		}
+		vringh_kiov_init(&vvr->riov, NULL, 0);
+		vringh_kiov_init(&vvr->wiov, NULL, 0);
+		vvr->head = USHRT_MAX;
+		vvr->vdev = vdev;
+		vvr->vrh.notify = _vop_notify;
+		dev_dbg(&vpdev->dev,
+			"%s %d index %d va %p info %p vr_size 0x%x\n",
+			__func__, __LINE__, i, vr->va, vr->info, vr_size);
+		vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
+					get_order(VOP_INT_DMA_BUF_SIZE));
+		vvr->buf_da = dma_map_single(&vpdev->dev,
+					  vvr->buf, VOP_INT_DMA_BUF_SIZE,
+					  DMA_BIDIRECTIONAL);
+	}
+
+	snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
+		 vdev->virtio_id);
+	vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
+	vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+			_vop_virtio_intr_handler, irqname, vdev,
+			vdev->virtio_db);
+	if (IS_ERR(vdev->virtio_cookie)) {
+		ret = PTR_ERR(vdev->virtio_cookie);
+		dev_dbg(&vpdev->dev, "request irq failed\n");
+		goto err;
+	}
+
+	vdev->dc->c2h_vdev_db = vdev->virtio_db;
+
+	/*
+	 * Order the type update with previous stores. This write barrier
+	 * is paired with the corresponding read barrier before the uncached
+	 * system memory read of the type, on the card while scanning the
+	 * device page.
+	 */
+	smp_wmb();
+	dd->type = type;
+	argp->type = type;
+
+	if (bootparam) {
+		db = bootparam->h2c_config_db;
+		if (db != -1)
+			vpdev->hw_ops->send_intr(vpdev, db);
+	}
+	dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
+	return 0;
+err:
+	vqconfig = mic_vq_config(dd);
+	for (j = 0; j < i; j++) {
+		struct vop_vringh *vvr = &vdev->vvr[j];
+
+		dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
+				 vvr->vring.len, DMA_BIDIRECTIONAL);
+		free_pages((unsigned long)vvr->vring.va,
+			   get_order(vvr->vring.len));
+	}
+	return ret;
+}
+
+static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp,
+			   struct vop_device *vpdev)
+{
+	struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+	s8 db;
+	int ret, retry;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+
+	devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
+	db = bootparam->h2c_config_db;
+	if (db != -1)
+		vpdev->hw_ops->send_intr(vpdev, db);
+	else
+		goto done;
+	for (retry = 15; retry--;) {
+		ret = wait_event_timeout(wake, devp->guest_ack,
+					 msecs_to_jiffies(1000));
+		if (ret)
+			break;
+	}
+done:
+	devp->config_change = 0;
+	devp->guest_ack = 0;
+}
+
+static void vop_virtio_del_device(struct vop_vdev *vdev)
+{
+	struct vop_info *vi = vdev->vi;
+	struct vop_device *vpdev = vdev->vpdev;
+	int i;
+	struct mic_vqconfig *vqconfig;
+	struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+
+	if (!bootparam)
+		goto skip_hot_remove;
+	vop_dev_remove(vi, vdev->dc, vpdev);
+skip_hot_remove:
+	vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+	flush_work(&vdev->virtio_bh_work);
+	vqconfig = mic_vq_config(vdev->dd);
+	for (i = 0; i < vdev->dd->num_vq; i++) {
+		struct vop_vringh *vvr = &vdev->vvr[i];
+
+		dma_unmap_single(&vpdev->dev,
+				 vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
+				 DMA_BIDIRECTIONAL);
+		free_pages((unsigned long)vvr->buf,
+			   get_order(VOP_INT_DMA_BUF_SIZE));
+		vringh_kiov_cleanup(&vvr->riov);
+		vringh_kiov_cleanup(&vvr->wiov);
+		dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
+				 vvr->vring.len, DMA_BIDIRECTIONAL);
+		free_pages((unsigned long)vvr->vring.va,
+			   get_order(vvr->vring.len));
+	}
+	/*
+	 * Order the type update with previous stores. This write barrier
+	 * is paired with the corresponding read barrier before the uncached
+	 * system memory read of the type, on the card while scanning the
+	 * device page.
+	 */
+	smp_wmb();
+	vdev->dd->type = -1;
+}
+
+/*
+ * vop_sync_dma - Wrapper for synchronous DMAs.
+ *
+ * @dev - The address of the pointer to the device instance used
+ * for DMA registration.
+ * @dst - destination DMA address.
+ * @src - source DMA address.
+ * @len - size of the transfer.
+ *
+ * Return DMA_SUCCESS on success
+ */
+static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src,
+			size_t len)
+{
+	int err = 0;
+	struct dma_device *ddev;
+	struct dma_async_tx_descriptor *tx;
+	struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+	struct dma_chan *vop_ch = vi->dma_ch;
+
+	if (!vop_ch) {
+		err = -EBUSY;
+		goto error;
+	}
+	ddev = vop_ch->device;
+	tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len,
+		DMA_PREP_FENCE);
+	if (!tx) {
+		err = -ENOMEM;
+		goto error;
+	} else {
+		dma_cookie_t cookie;
+
+		cookie = tx->tx_submit(tx);
+		if (dma_submit_error(cookie)) {
+			err = -ENOMEM;
+			goto error;
+		}
+		dma_async_issue_pending(vop_ch);
+		err = dma_sync_wait(vop_ch, cookie);
+	}
+error:
+	if (err)
+		dev_err(&vi->vpdev->dev, "%s %d err %d\n",
+			__func__, __LINE__, err);
+	return err;
+}
+
+#define VOP_USE_DMA true
+
+/*
+ * Initiates the copies across the PCIe bus from card memory to a user
+ * space buffer. When transfers are done using DMA, source/destination
+ * addresses and transfer length must follow the alignment requirements of
+ * the MIC DMA engine.
+ */
+static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
+				   size_t len, u64 daddr, size_t dlen,
+				   int vr_idx)
+{
+	struct vop_device *vpdev = vdev->vpdev;
+	void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+	struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+	struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+	size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+	bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+	size_t dma_offset, partlen;
+	int err;
+
+	if (!VOP_USE_DMA) {
+		if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
+			err = -EFAULT;
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, err);
+			goto err;
+		}
+		vdev->in_bytes += len;
+		err = 0;
+		goto err;
+	}
+
+	dma_offset = daddr - round_down(daddr, dma_alignment);
+	daddr -= dma_offset;
+	len += dma_offset;
+	/*
+	 * X100 uses DMA addresses as seen by the card so adding
+	 * the aperture base is not required for DMA. However x200
+	 * requires DMA addresses to be an offset into the bar so
+	 * add the aperture base for x200.
+	 */
+	if (x200)
+		daddr += vpdev->aper->pa;
+	while (len) {
+		partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+		err = vop_sync_dma(vdev, vvr->buf_da, daddr,
+				   ALIGN(partlen, dma_alignment));
+		if (err) {
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, err);
+			goto err;
+		}
+		if (copy_to_user(ubuf, vvr->buf + dma_offset,
+				 partlen - dma_offset)) {
+			err = -EFAULT;
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, err);
+			goto err;
+		}
+		daddr += partlen;
+		ubuf += partlen;
+		dbuf += partlen;
+		vdev->in_bytes_dma += partlen;
+		vdev->in_bytes += partlen;
+		len -= partlen;
+		dma_offset = 0;
+	}
+	err = 0;
+err:
+	vpdev->hw_ops->iounmap(vpdev, dbuf);
+	dev_dbg(vop_dev(vdev),
+		"%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+		__func__, ubuf, dbuf, len, vr_idx);
+	return err;
+}
+
+/*
+ * Initiates copies across the PCIe bus from a user space buffer to card
+ * memory. When transfers are done using DMA, source/destination addresses
+ * and transfer length must follow the alignment requirements of the MIC
+ * DMA engine.
+ */
+static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
+				     size_t len, u64 daddr, size_t dlen,
+				     int vr_idx)
+{
+	struct vop_device *vpdev = vdev->vpdev;
+	void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+	struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+	struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+	size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+	bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+	size_t partlen;
+	bool dma = VOP_USE_DMA;
+	int err = 0;
+
+	if (daddr & (dma_alignment - 1)) {
+		vdev->tx_dst_unaligned += len;
+		dma = false;
+	} else if (ALIGN(len, dma_alignment) > dlen) {
+		vdev->tx_len_unaligned += len;
+		dma = false;
+	}
+
+	if (!dma)
+		goto memcpy;
+
+	/*
+	 * X100 uses DMA addresses as seen by the card so adding
+	 * the aperture base is not required for DMA. However x200
+	 * requires DMA addresses to be an offset into the bar so
+	 * add the aperture base for x200.
+	 */
+	if (x200)
+		daddr += vpdev->aper->pa;
+	while (len) {
+		partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+
+		if (copy_from_user(vvr->buf, ubuf, partlen)) {
+			err = -EFAULT;
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, err);
+			goto err;
+		}
+		err = vop_sync_dma(vdev, daddr, vvr->buf_da,
+				   ALIGN(partlen, dma_alignment));
+		if (err) {
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, err);
+			goto err;
+		}
+		daddr += partlen;
+		ubuf += partlen;
+		dbuf += partlen;
+		vdev->out_bytes_dma += partlen;
+		vdev->out_bytes += partlen;
+		len -= partlen;
+	}
+memcpy:
+	/*
+	 * We are copying to IO below and should ideally use something
+	 * like copy_from_user_toio(..) if it existed.
+	 */
+	if (copy_from_user((void __force *)dbuf, ubuf, len)) {
+		err = -EFAULT;
+		dev_err(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, err);
+		goto err;
+	}
+	vdev->out_bytes += len;
+	err = 0;
+err:
+	vpdev->hw_ops->iounmap(vpdev, dbuf);
+	dev_dbg(vop_dev(vdev),
+		"%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+		__func__, ubuf, dbuf, len, vr_idx);
+	return err;
+}
+
+#define MIC_VRINGH_READ true
+
+/* Determine the total number of bytes consumed in a VRINGH KIOV */
+static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov)
+{
+	int i;
+	u32 total = iov->consumed;
+
+	for (i = 0; i < iov->i; i++)
+		total += iov->iov[i].iov_len;
+	return total;
+}
+
+/*
+ * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
+ * This API is heavily based on the vringh_iov_xfer(..) implementation
+ * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
+ * and vringh_iov_push_kern(..) directly is because there is no
+ * way to override the VRINGH xfer(..) routines as of v3.10.
+ */
+static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
+			   void __user *ubuf, size_t len, bool read, int vr_idx,
+			   size_t *out_len)
+{
+	int ret = 0;
+	size_t partlen, tot_len = 0;
+
+	while (len && iov->i < iov->used) {
+		struct kvec *kiov = &iov->iov[iov->i];
+
+		partlen = min(kiov->iov_len, len);
+		if (read)
+			ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
+						      (u64)kiov->iov_base,
+						      kiov->iov_len,
+						      vr_idx);
+		else
+			ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
+							(u64)kiov->iov_base,
+							kiov->iov_len,
+							vr_idx);
+		if (ret) {
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			break;
+		}
+		len -= partlen;
+		ubuf += partlen;
+		tot_len += partlen;
+		iov->consumed += partlen;
+		kiov->iov_len -= partlen;
+		kiov->iov_base += partlen;
+		if (!kiov->iov_len) {
+			/* Fix up old iov element then increment. */
+			kiov->iov_len = iov->consumed;
+			kiov->iov_base -= iov->consumed;
+
+			iov->consumed = 0;
+			iov->i++;
+		}
+	}
+	*out_len = tot_len;
+	return ret;
+}
+
+/*
+ * Use the standard VRINGH infrastructure in the kernel to fetch new
+ * descriptors, initiate the copies and update the used ring.
+ */
+static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy)
+{
+	int ret = 0;
+	u32 iovcnt = copy->iovcnt;
+	struct iovec iov;
+	struct iovec __user *u_iov = copy->iov;
+	void __user *ubuf = NULL;
+	struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx];
+	struct vringh_kiov *riov = &vvr->riov;
+	struct vringh_kiov *wiov = &vvr->wiov;
+	struct vringh *vrh = &vvr->vrh;
+	u16 *head = &vvr->head;
+	struct mic_vring *vr = &vvr->vring;
+	size_t len = 0, out_len;
+
+	copy->out_len = 0;
+	/* Fetch a new IOVEC if all previous elements have been processed */
+	if (riov->i == riov->used && wiov->i == wiov->used) {
+		ret = vringh_getdesc_kern(vrh, riov, wiov,
+					  head, GFP_KERNEL);
+		/* Check if there are available descriptors */
+		if (ret <= 0)
+			return ret;
+	}
+	while (iovcnt) {
+		if (!len) {
+			/* Copy over a new iovec from user space. */
+			ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
+			if (ret) {
+				ret = -EINVAL;
+				dev_err(vop_dev(vdev), "%s %d err %d\n",
+					__func__, __LINE__, ret);
+				break;
+			}
+			len = iov.iov_len;
+			ubuf = iov.iov_base;
+		}
+		/* Issue all the read descriptors first */
+		ret = vop_vringh_copy(vdev, riov, ubuf, len,
+				      MIC_VRINGH_READ, copy->vr_idx, &out_len);
+		if (ret) {
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			break;
+		}
+		len -= out_len;
+		ubuf += out_len;
+		copy->out_len += out_len;
+		/* Issue the write descriptors next */
+		ret = vop_vringh_copy(vdev, wiov, ubuf, len,
+				      !MIC_VRINGH_READ, copy->vr_idx, &out_len);
+		if (ret) {
+			dev_err(vop_dev(vdev), "%s %d err %d\n",
+				__func__, __LINE__, ret);
+			break;
+		}
+		len -= out_len;
+		ubuf += out_len;
+		copy->out_len += out_len;
+		if (!len) {
+			/* One user space iovec is now completed */
+			iovcnt--;
+			u_iov++;
+		}
+		/* Exit loop if all elements in KIOVs have been processed. */
+		if (riov->i == riov->used && wiov->i == wiov->used)
+			break;
+	}
+	/*
+	 * Update the used ring if a descriptor was available and some data was
+	 * copied in/out and the user asked for a used ring update.
+	 */
+	if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
+		u32 total = 0;
+
+		/* Determine the total data consumed */
+		total += vop_vringh_iov_consumed(riov);
+		total += vop_vringh_iov_consumed(wiov);
+		vringh_complete_kern(vrh, *head, total);
+		*head = USHRT_MAX;
+		if (vringh_need_notify_kern(vrh) > 0)
+			vringh_notify(vrh);
+		vringh_kiov_cleanup(riov);
+		vringh_kiov_cleanup(wiov);
+		/* Update avail idx for user space */
+		vr->info->avail_idx = vrh->last_avail_idx;
+	}
+	return ret;
+}
+
+static inline int vop_verify_copy_args(struct vop_vdev *vdev,
+				       struct mic_copy_desc *copy)
+{
+	if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
+		return -EINVAL;
+	return 0;
+}
+
+/* Copy a specified number of virtio descriptors in a chain */
+static int vop_virtio_copy_desc(struct vop_vdev *vdev,
+				struct mic_copy_desc *copy)
+{
+	int err;
+	struct vop_vringh *vvr;
+
+	err = vop_verify_copy_args(vdev, copy);
+	if (err)
+		return err;
+
+	vvr = &vdev->vvr[copy->vr_idx];
+	mutex_lock(&vvr->vr_mutex);
+	if (!vop_vdevup(vdev)) {
+		err = -ENODEV;
+		dev_err(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, err);
+		goto err;
+	}
+	err = _vop_virtio_copy(vdev, copy);
+	if (err) {
+		dev_err(vop_dev(vdev), "%s %d err %d\n",
+			__func__, __LINE__, err);
+	}
+err:
+	mutex_unlock(&vvr->vr_mutex);
+	return err;
+}
+
+static int vop_open(struct inode *inode, struct file *f)
+{
+	struct vop_vdev *vdev;
+	struct vop_info *vi = container_of(f->private_data,
+		struct vop_info, miscdev);
+
+	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+	if (!vdev)
+		return -ENOMEM;
+	vdev->vi = vi;
+	mutex_init(&vdev->vdev_mutex);
+	f->private_data = vdev;
+	init_completion(&vdev->destroy);
+	complete(&vdev->destroy);
+	return 0;
+}
+
+static int vop_release(struct inode *inode, struct file *f)
+{
+	struct vop_vdev *vdev = f->private_data, *vdev_tmp;
+	struct vop_info *vi = vdev->vi;
+	struct list_head *pos, *tmp;
+	bool found = false;
+
+	mutex_lock(&vdev->vdev_mutex);
+	if (vdev->deleted)
+		goto unlock;
+	mutex_lock(&vi->vop_mutex);
+	list_for_each_safe(pos, tmp, &vi->vdev_list) {
+		vdev_tmp = list_entry(pos, struct vop_vdev, list);
+		if (vdev == vdev_tmp) {
+			vop_virtio_del_device(vdev);
+			list_del(pos);
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&vi->vop_mutex);
+unlock:
+	mutex_unlock(&vdev->vdev_mutex);
+	if (!found)
+		wait_for_completion(&vdev->destroy);
+	f->private_data = NULL;
+	kfree(vdev);
+	return 0;
+}
+
+static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	struct vop_vdev *vdev = f->private_data;
+	struct vop_info *vi = vdev->vi;
+	void __user *argp = (void __user *)arg;
+	int ret;
+
+	switch (cmd) {
+	case MIC_VIRTIO_ADD_DEVICE:
+	{
+		struct mic_device_desc dd, *dd_config;
+
+		if (copy_from_user(&dd, argp, sizeof(dd)))
+			return -EFAULT;
+
+		if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
+		    dd.num_vq > MIC_MAX_VRINGS)
+			return -EINVAL;
+
+		dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
+		if (!dd_config)
+			return -ENOMEM;
+		if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
+			ret = -EFAULT;
+			goto free_ret;
+		}
+		mutex_lock(&vdev->vdev_mutex);
+		mutex_lock(&vi->vop_mutex);
+		ret = vop_virtio_add_device(vdev, dd_config);
+		if (ret)
+			goto unlock_ret;
+		list_add_tail(&vdev->list, &vi->vdev_list);
+unlock_ret:
+		mutex_unlock(&vi->vop_mutex);
+		mutex_unlock(&vdev->vdev_mutex);
+free_ret:
+		kfree(dd_config);
+		return ret;
+	}
+	case MIC_VIRTIO_COPY_DESC:
+	{
+		struct mic_copy_desc copy;
+
+		mutex_lock(&vdev->vdev_mutex);
+		ret = vop_vdev_inited(vdev);
+		if (ret)
+			goto _unlock_ret;
+
+		if (copy_from_user(&copy, argp, sizeof(copy))) {
+			ret = -EFAULT;
+			goto _unlock_ret;
+		}
+
+		ret = vop_virtio_copy_desc(vdev, &copy);
+		if (ret < 0)
+			goto _unlock_ret;
+		if (copy_to_user(
+			&((struct mic_copy_desc __user *)argp)->out_len,
+			&copy.out_len, sizeof(copy.out_len)))
+			ret = -EFAULT;
+_unlock_ret:
+		mutex_unlock(&vdev->vdev_mutex);
+		return ret;
+	}
+	case MIC_VIRTIO_CONFIG_CHANGE:
+	{
+		void *buf;
+
+		mutex_lock(&vdev->vdev_mutex);
+		ret = vop_vdev_inited(vdev);
+		if (ret)
+			goto __unlock_ret;
+		buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
+		if (!buf) {
+			ret = -ENOMEM;
+			goto __unlock_ret;
+		}
+		if (copy_from_user(buf, argp, vdev->dd->config_len)) {
+			ret = -EFAULT;
+			goto done;
+		}
+		ret = vop_virtio_config_change(vdev, buf);
+done:
+		kfree(buf);
+__unlock_ret:
+		mutex_unlock(&vdev->vdev_mutex);
+		return ret;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	};
+	return 0;
+}
+
+/*
+ * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
+ * not when previously enqueued buffers may be available. This means that
+ * in the card->host (TX) path, when userspace is unblocked by poll it
+ * must drain all available descriptors or it can stall.
+ */
+static unsigned int vop_poll(struct file *f, poll_table *wait)
+{
+	struct vop_vdev *vdev = f->private_data;
+	int mask = 0;
+
+	mutex_lock(&vdev->vdev_mutex);
+	if (vop_vdev_inited(vdev)) {
+		mask = POLLERR;
+		goto done;
+	}
+	poll_wait(f, &vdev->waitq, wait);
+	if (vop_vdev_inited(vdev)) {
+		mask = POLLERR;
+	} else if (vdev->poll_wake) {
+		vdev->poll_wake = 0;
+		mask = POLLIN | POLLOUT;
+	}
+done:
+	mutex_unlock(&vdev->vdev_mutex);
+	return mask;
+}
+
+static inline int
+vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
+		 unsigned long *size, unsigned long *pa)
+{
+	struct vop_device *vpdev = vdev->vpdev;
+	unsigned long start = MIC_DP_SIZE;
+	int i;
+
+	/*
+	 * MMAP interface is as follows:
+	 * offset				region
+	 * 0x0					virtio device_page
+	 * 0x1000				first vring
+	 * 0x1000 + size of 1st vring		second vring
+	 * ....
+	 */
+	if (!offset) {
+		*pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
+		*size = MIC_DP_SIZE;
+		return 0;
+	}
+
+	for (i = 0; i < vdev->dd->num_vq; i++) {
+		struct vop_vringh *vvr = &vdev->vvr[i];
+
+		if (offset == start) {
+			*pa = virt_to_phys(vvr->vring.va);
+			*size = vvr->vring.len;
+			return 0;
+		}
+		start += vvr->vring.len;
+	}
+	return -1;
+}
+
+/*
+ * Maps the device page and virtio rings to user space for readonly access.
+ */
+static int vop_mmap(struct file *f, struct vm_area_struct *vma)
+{
+	struct vop_vdev *vdev = f->private_data;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
+	int i, err;
+
+	err = vop_vdev_inited(vdev);
+	if (err)
+		goto ret;
+	if (vma->vm_flags & VM_WRITE) {
+		err = -EACCES;
+		goto ret;
+	}
+	while (size_rem) {
+		i = vop_query_offset(vdev, offset, &size, &pa);
+		if (i < 0) {
+			err = -EINVAL;
+			goto ret;
+		}
+		err = remap_pfn_range(vma, vma->vm_start + offset,
+				      pa >> PAGE_SHIFT, size,
+				      vma->vm_page_prot);
+		if (err)
+			goto ret;
+		size_rem -= size;
+		offset += size;
+	}
+ret:
+	return err;
+}
+
+static const struct file_operations vop_fops = {
+	.open = vop_open,
+	.release = vop_release,
+	.unlocked_ioctl = vop_ioctl,
+	.poll = vop_poll,
+	.mmap = vop_mmap,
+	.owner = THIS_MODULE,
+};
+
+int vop_host_init(struct vop_info *vi)
+{
+	int rc;
+	struct miscdevice *mdev;
+	struct vop_device *vpdev = vi->vpdev;
+
+	INIT_LIST_HEAD(&vi->vdev_list);
+	vi->dma_ch = vpdev->dma_ch;
+	mdev = &vi->miscdev;
+	mdev->minor = MISC_DYNAMIC_MINOR;
+	snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index);
+	mdev->name = vi->name;
+	mdev->fops = &vop_fops;
+	mdev->parent = &vpdev->dev;
+
+	rc = misc_register(mdev);
+	if (rc)
+		dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc);
+	return rc;
+}
+
+void vop_host_uninit(struct vop_info *vi)
+{
+	struct list_head *pos, *tmp;
+	struct vop_vdev *vdev;
+
+	mutex_lock(&vi->vop_mutex);
+	vop_virtio_reset_devices(vi);
+	list_for_each_safe(pos, tmp, &vi->vdev_list) {
+		vdev = list_entry(pos, struct vop_vdev, list);
+		list_del(pos);
+		reinit_completion(&vdev->destroy);
+		mutex_unlock(&vi->vop_mutex);
+		mutex_lock(&vdev->vdev_mutex);
+		vop_virtio_del_device(vdev);
+		vdev->deleted = true;
+		mutex_unlock(&vdev->vdev_mutex);
+		complete(&vdev->destroy);
+		mutex_lock(&vi->vop_mutex);
+	}
+	mutex_unlock(&vi->vop_mutex);
+	misc_deregister(&vi->miscdev);
+}
diff --git a/drivers/staging/panel/panel.c b/drivers/misc/panel.c
similarity index 96%
rename from drivers/staging/panel/panel.c
rename to drivers/misc/panel.c
index 70b8f4f..6030ac5 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/misc/panel.c
@@ -172,8 +172,6 @@
 /* logical or of the input bits involved in the scan matrix */
 static __u8 scan_mask_i;
 
-typedef __u64 pmask_t;
-
 enum input_type {
 	INPUT_TYPE_STD,
 	INPUT_TYPE_KBD,
@@ -188,8 +186,8 @@
 
 struct logical_input {
 	struct list_head list;
-	pmask_t mask;
-	pmask_t value;
+	__u64 mask;
+	__u64 value;
 	enum input_type type;
 	enum input_state state;
 	__u8 rise_time, fall_time;
@@ -219,19 +217,19 @@
  * corresponds to the ground.
  * Within each group, bits are stored in the same order as read on the port :
  * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0).
- * So, each __u64 (or pmask_t) is represented like this :
+ * So, each __u64 is represented like this :
  * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
  * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
  */
 
 /* what has just been read from the I/O ports */
-static pmask_t phys_read;
+static __u64 phys_read;
 /* previous phys_read */
-static pmask_t phys_read_prev;
+static __u64 phys_read_prev;
 /* stabilized phys_read (phys_read|phys_read_prev) */
-static pmask_t phys_curr;
+static __u64 phys_curr;
 /* previous phys_curr */
-static pmask_t phys_prev;
+static __u64 phys_prev;
 /* 0 means that at least one logical signal needs be computed */
 static char inputs_stable;
 
@@ -650,34 +648,28 @@
 
 static const char (*keypad_profile)[4][9] = old_keypad_profile;
 
-/* FIXME: this should be converted to a bit array containing signals states */
-static struct {
-	unsigned char e;  /* parallel LCD E (data latch on falling edge) */
-	unsigned char rs; /* parallel LCD RS  (0 = cmd, 1 = data) */
-	unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */
-	unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */
-	unsigned char cl; /* serial LCD clock (latch on rising edge) */
-	unsigned char da; /* serial LCD data */
-} bits;
+static DECLARE_BITMAP(bits, LCD_BITS);
+
+static void lcd_get_bits(unsigned int port, int *val)
+{
+	unsigned int bit, state;
+
+	for (bit = 0; bit < LCD_BITS; bit++) {
+		state = test_bit(bit, bits) ? BIT_SET : BIT_CLR;
+		*val &= lcd_bits[port][bit][BIT_MSK];
+		*val |= lcd_bits[port][bit][state];
+	}
+}
 
 static void init_scan_timer(void);
 
 /* sets data port bits according to current signals values */
 static int set_data_bits(void)
 {
-	int val, bit;
+	int val;
 
 	val = r_dtr(pprt);
-	for (bit = 0; bit < LCD_BITS; bit++)
-		val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK];
-
-	val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e]
-	    | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs]
-	    | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw]
-	    | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl]
-	    | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl]
-	    | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da];
-
+	lcd_get_bits(LCD_PORT_D, &val);
 	w_dtr(pprt, val);
 	return val;
 }
@@ -685,19 +677,10 @@
 /* sets ctrl port bits according to current signals values */
 static int set_ctrl_bits(void)
 {
-	int val, bit;
+	int val;
 
 	val = r_ctr(pprt);
-	for (bit = 0; bit < LCD_BITS; bit++)
-		val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK];
-
-	val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e]
-	    | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs]
-	    | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw]
-	    | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl]
-	    | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl]
-	    | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da];
-
+	lcd_get_bits(LCD_PORT_C, &val);
 	w_ctr(pprt, val);
 	return val;
 }
@@ -793,12 +776,17 @@
 	 * LCD reads D0 on STROBE's rising edge.
 	 */
 	for (bit = 0; bit < 8; bit++) {
-		bits.cl = BIT_CLR;	/* CLK low */
+		clear_bit(LCD_BIT_CL, bits);	/* CLK low */
 		panel_set_bits();
-		bits.da = byte & 1;
+		if (byte & 1) {
+			set_bit(LCD_BIT_DA, bits);
+		} else {
+			clear_bit(LCD_BIT_DA, bits);
+		}
+
 		panel_set_bits();
 		udelay(2);  /* maintain the data during 2 us before CLK up */
-		bits.cl = BIT_SET;	/* CLK high */
+		set_bit(LCD_BIT_CL, bits);	/* CLK high */
 		panel_set_bits();
 		udelay(1);  /* maintain the strobe during 1 us */
 		byte >>= 1;
@@ -813,7 +801,10 @@
 
 	/* The backlight is activated by setting the AUTOFEED line to +5V  */
 	spin_lock_irq(&pprt_lock);
-	bits.bl = on;
+	if (on)
+		set_bit(LCD_BIT_BL, bits);
+	else
+		clear_bit(LCD_BIT_BL, bits);
 	panel_set_bits();
 	spin_unlock_irq(&pprt_lock);
 }
@@ -848,14 +839,14 @@
 	w_dtr(pprt, cmd);
 	udelay(20);	/* maintain the data during 20 us before the strobe */
 
-	bits.e = BIT_SET;
-	bits.rs = BIT_CLR;
-	bits.rw = BIT_CLR;
+	set_bit(LCD_BIT_E, bits);
+	clear_bit(LCD_BIT_RS, bits);
+	clear_bit(LCD_BIT_RW, bits);
 	set_ctrl_bits();
 
 	udelay(40);	/* maintain the strobe during 40 us */
 
-	bits.e = BIT_CLR;
+	clear_bit(LCD_BIT_E, bits);
 	set_ctrl_bits();
 
 	udelay(120);	/* the shortest command takes at least 120 us */
@@ -870,14 +861,14 @@
 	w_dtr(pprt, data);
 	udelay(20);	/* maintain the data during 20 us before the strobe */
 
-	bits.e = BIT_SET;
-	bits.rs = BIT_SET;
-	bits.rw = BIT_CLR;
+	set_bit(LCD_BIT_E, bits);
+	set_bit(LCD_BIT_RS, bits);
+	clear_bit(LCD_BIT_RW, bits);
 	set_ctrl_bits();
 
 	udelay(40);	/* maintain the strobe during 40 us */
 
-	bits.e = BIT_CLR;
+	clear_bit(LCD_BIT_E, bits);
 	set_ctrl_bits();
 
 	udelay(45);	/* the shortest data takes at least 45 us */
@@ -943,7 +934,8 @@
 		lcd_send_serial(0x5F);	/* R/W=W, RS=1 */
 		lcd_send_serial(' ' & 0x0F);
 		lcd_send_serial((' ' >> 4) & 0x0F);
-		udelay(40);	/* the shortest data takes at least 40 us */
+		/* the shortest data takes at least 40 us */
+		udelay(40);
 	}
 	spin_unlock_irq(&pprt_lock);
 
@@ -969,15 +961,15 @@
 		/* maintain the data during 20 us before the strobe */
 		udelay(20);
 
-		bits.e = BIT_SET;
-		bits.rs = BIT_SET;
-		bits.rw = BIT_CLR;
+		set_bit(LCD_BIT_E, bits);
+		set_bit(LCD_BIT_RS, bits);
+		clear_bit(LCD_BIT_RW, bits);
 		set_ctrl_bits();
 
 		/* maintain the strobe during 40 us */
 		udelay(40);
 
-		bits.e = BIT_CLR;
+		clear_bit(LCD_BIT_E, bits);
 		set_ctrl_bits();
 
 		/* the shortest data takes at least 45 us */
@@ -1784,7 +1776,7 @@
 	gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
 
 	/* grounded inputs are signals 40-44 */
-	phys_read |= (pmask_t) gndmask << 40;
+	phys_read |= (__u64)gndmask << 40;
 
 	if (bitmask != gndmask) {
 		/*
@@ -1800,7 +1792,7 @@
 
 			w_dtr(pprt, oldval & ~bitval);	/* enable this output */
 			bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask;
-			phys_read |= (pmask_t) bitmask << (5 * bit);
+			phys_read |= (__u64)bitmask << (5 * bit);
 		}
 		w_dtr(pprt, oldval);	/* disable all outputs */
 	}
@@ -2037,32 +2029,32 @@
  * corresponding to out and in bits respectively.
  * returns 1 if ok, 0 if error (in which case, nothing is written).
  */
-static int input_name2mask(const char *name, pmask_t *mask, pmask_t *value,
-			   char *imask, char *omask)
+static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value,
+			  u8 *imask, u8 *omask)
 {
-	static char sigtab[10] = "EeSsPpAaBb";
-	char im, om;
-	pmask_t m, v;
+	const char sigtab[] = "EeSsPpAaBb";
+	u8 im, om;
+	__u64 m, v;
 
-	om = 0ULL;
-	im = 0ULL;
+	om = 0;
+	im = 0;
 	m = 0ULL;
 	v = 0ULL;
 	while (*name) {
 		int in, out, bit, neg;
+		const char *idx;
 
-		for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name);
-		     in++)
-			;
-
-		if (in >= sizeof(sigtab))
+		idx = strchr(sigtab, *name);
+		if (!idx)
 			return 0;	/* input name not found */
+
+		in = idx - sigtab;
 		neg = (in & 1);	/* odd (lower) names are negated */
 		in >>= 1;
 		im |= BIT(in);
 
 		name++;
-		if (isdigit(*name)) {
+		if (*name >= '0' && *name <= '7') {
 			out = *name - '0';
 			om |= BIT(out);
 		} else if (*name == '-') {
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 9a17a9ba..15bb0c8 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -503,8 +503,7 @@
 	int err;
 	ssize_t rom_size;
 
-	struct pch_phub_reg *chip =
-		dev_get_drvdata(container_of(kobj, struct device, kobj));
+	struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
 
 	ret = mutex_lock_interruptible(&pch_phub_mutex);
 	if (ret) {
@@ -567,8 +566,7 @@
 	unsigned int addr_offset;
 	int ret;
 	ssize_t rom_size;
-	struct pch_phub_reg *chip =
-		dev_get_drvdata(container_of(kobj, struct device, kobj));
+	struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
 
 	ret = mutex_lock_interruptible(&pch_phub_mutex);
 	if (ret)
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 6e3af8b..dcdbd58 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -632,7 +632,6 @@
 		spin_unlock_irqrestore(&st_gdata->lock, flags);
 		return err;
 	}
-	pr_debug("done %s(%d) ", __func__, new_proto->chnl_id);
 }
 EXPORT_SYMBOL_GPL(st_register);
 
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index b823f9a..896be150 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.3.0-k");
+MODULE_VERSION("1.1.4.0-k");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index bc4ea58..5bd18cc 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -25,6 +25,15 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem-imx-ocotp.
 
+config NVMEM_LPC18XX_EEPROM
+	tristate "NXP LPC18XX EEPROM Memory Support"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	help
+	  Say Y here to include support for NXP LPC18xx EEPROM memory found in
+	  NXP LPC185x/3x and LPC435x/3x/2x/1x devices.
+	  To compile this driver as a module, choose M here: the module
+	  will be called nvmem_lpc18xx_eeprom.
+
 config NVMEM_MXS_OCOTP
 	tristate "Freescale MXS On-Chip OTP Memory Support"
 	depends on ARCH_MXS || COMPILE_TEST
@@ -36,6 +45,17 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem-mxs-ocotp.
 
+config MTK_EFUSE
+	tristate "Mediatek SoCs EFUSE support"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select REGMAP_MMIO
+	help
+	  This is a driver to access hardware related data like sensor
+	  calibration, HDMI impedance etc.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called efuse-mtk.
+
 config QCOM_QFPROM
 	tristate "QCOM QFPROM Support"
 	depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 95dde3f..45ab1ae 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -8,8 +8,12 @@
 # Devices
 obj-$(CONFIG_NVMEM_IMX_OCOTP)	+= nvmem-imx-ocotp.o
 nvmem-imx-ocotp-y		:= imx-ocotp.o
+obj-$(CONFIG_NVMEM_LPC18XX_EEPROM)	+= nvmem_lpc18xx_eeprom.o
+nvmem_lpc18xx_eeprom-y	:= lpc18xx_eeprom.o
 obj-$(CONFIG_NVMEM_MXS_OCOTP)	+= nvmem-mxs-ocotp.o
 nvmem-mxs-ocotp-y		:= mxs-ocotp.o
+obj-$(CONFIG_MTK_EFUSE)		+= nvmem_mtk-efuse.o
+nvmem_mtk-efuse-y		:= mtk-efuse.o
 obj-$(CONFIG_QCOM_QFPROM)	+= nvmem_qfprom.o
 nvmem_qfprom-y			:= qfprom.o
 obj-$(CONFIG_ROCKCHIP_EFUSE)	+= nvmem_rockchip_efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9d11d98..de14fae 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -294,9 +294,11 @@
 
 	return 0;
 err:
-	while (--i)
+	while (i--)
 		nvmem_cell_drop(cells[i]);
 
+	kfree(cells);
+
 	return rval;
 }
 
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
new file mode 100644
index 0000000..878fce7
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -0,0 +1,330 @@
+/*
+ * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Registers */
+#define LPC18XX_EEPROM_AUTOPROG			0x00c
+#define LPC18XX_EEPROM_AUTOPROG_WORD		0x1
+
+#define LPC18XX_EEPROM_CLKDIV			0x014
+
+#define LPC18XX_EEPROM_PWRDWN			0x018
+#define LPC18XX_EEPROM_PWRDWN_NO		0x0
+#define LPC18XX_EEPROM_PWRDWN_YES		0x1
+
+#define LPC18XX_EEPROM_INTSTAT			0xfe0
+#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG	BIT(2)
+
+#define LPC18XX_EEPROM_INTSTATCLR		0xfe8
+#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST	BIT(2)
+
+/* Fixed page size (bytes) */
+#define LPC18XX_EEPROM_PAGE_SIZE		0x80
+
+/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */
+#define LPC18XX_EEPROM_CLOCK_HZ			1500000
+
+/* EEPROM requires 3 ms of erase/program time between each writing */
+#define LPC18XX_EEPROM_PROGRAM_TIME		3
+
+struct lpc18xx_eeprom_dev {
+	struct clk *clk;
+	void __iomem *reg_base;
+	void __iomem *mem_base;
+	struct nvmem_device *nvmem;
+	unsigned reg_bytes;
+	unsigned val_bytes;
+};
+
+static struct regmap_config lpc18xx_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+};
+
+static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
+					 u32 reg, u32 val)
+{
+	writel(val, eeprom->reg_base + reg);
+}
+
+static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom,
+				       u32 reg)
+{
+	return readl(eeprom->reg_base + reg);
+}
+
+static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
+{
+	unsigned long end;
+	u32 val;
+
+	/* Wait until EEPROM program operation has finished */
+	end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10);
+
+	while (time_is_after_jiffies(end)) {
+		val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT);
+
+		if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) {
+			lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR,
+					LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST);
+			return 0;
+		}
+
+		usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC,
+			     (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
+				       size_t reg_size, const void *val,
+				       size_t val_size)
+{
+	struct lpc18xx_eeprom_dev *eeprom = context;
+	unsigned int offset = *(u32 *)reg;
+	int ret;
+
+	if (offset % lpc18xx_regmap_config.reg_stride)
+		return -EINVAL;
+
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+			      LPC18XX_EEPROM_PWRDWN_NO);
+
+	/* Wait 100 us while the EEPROM wakes up */
+	usleep_range(100, 200);
+
+	while (val_size) {
+		writel(*(u32 *)val, eeprom->mem_base + offset);
+		ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
+		if (ret < 0)
+			return ret;
+
+		val_size -= eeprom->val_bytes;
+		val += eeprom->val_bytes;
+		offset += eeprom->val_bytes;
+	}
+
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+			      LPC18XX_EEPROM_PWRDWN_YES);
+
+	return 0;
+}
+
+static int lpc18xx_eeprom_write(void *context, const void *data, size_t count)
+{
+	struct lpc18xx_eeprom_dev *eeprom = context;
+	unsigned int offset = eeprom->reg_bytes;
+
+	if (count <= offset)
+		return -EINVAL;
+
+	return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes,
+					   data + offset, count - offset);
+}
+
+static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
+			       void *val, size_t val_size)
+{
+	struct lpc18xx_eeprom_dev *eeprom = context;
+	unsigned int offset = *(u32 *)reg;
+
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+			      LPC18XX_EEPROM_PWRDWN_NO);
+
+	/* Wait 100 us while the EEPROM wakes up */
+	usleep_range(100, 200);
+
+	while (val_size) {
+		*(u32 *)val = readl(eeprom->mem_base + offset);
+		val_size -= eeprom->val_bytes;
+		val += eeprom->val_bytes;
+		offset += eeprom->val_bytes;
+	}
+
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+			      LPC18XX_EEPROM_PWRDWN_YES);
+
+	return 0;
+}
+
+static struct regmap_bus lpc18xx_eeprom_bus = {
+	.write = lpc18xx_eeprom_write,
+	.gather_write = lpc18xx_eeprom_gather_write,
+	.read = lpc18xx_eeprom_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg)
+{
+	/*
+	 * The last page contains the EEPROM initialization data and is not
+	 * writable.
+	 */
+	return reg <= lpc18xx_regmap_config.max_register -
+						LPC18XX_EEPROM_PAGE_SIZE;
+}
+
+static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg)
+{
+	return reg <= lpc18xx_regmap_config.max_register;
+}
+
+static struct nvmem_config lpc18xx_nvmem_config = {
+	.name = "lpc18xx-eeprom",
+	.owner = THIS_MODULE,
+};
+
+static int lpc18xx_eeprom_probe(struct platform_device *pdev)
+{
+	struct lpc18xx_eeprom_dev *eeprom;
+	struct device *dev = &pdev->dev;
+	struct reset_control *rst;
+	unsigned long clk_rate;
+	struct regmap *regmap;
+	struct resource *res;
+	int ret;
+
+	eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
+	if (!eeprom)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
+	eeprom->reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(eeprom->reg_base))
+		return PTR_ERR(eeprom->reg_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+	eeprom->mem_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(eeprom->mem_base))
+		return PTR_ERR(eeprom->mem_base);
+
+	eeprom->clk = devm_clk_get(&pdev->dev, "eeprom");
+	if (IS_ERR(eeprom->clk)) {
+		dev_err(&pdev->dev, "failed to get eeprom clock\n");
+		return PTR_ERR(eeprom->clk);
+	}
+
+	ret = clk_prepare_enable(eeprom->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret);
+		return ret;
+	}
+
+	rst = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(rst)) {
+		dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst));
+		ret = PTR_ERR(rst);
+		goto err_clk;
+	}
+
+	ret = reset_control_assert(rst);
+	if (ret < 0) {
+		dev_err(dev, "failed to assert reset: %d\n", ret);
+		goto err_clk;
+	}
+
+	eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE;
+	eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE;
+
+	/*
+	 * Clock rate is generated by dividing the system bus clock by the
+	 * division factor, contained in the divider register (minus 1 encoded).
+	 */
+	clk_rate = clk_get_rate(eeprom->clk);
+	clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1;
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate);
+
+	/*
+	 * Writing a single word to the page will start the erase/program cycle
+	 * automatically
+	 */
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG,
+			      LPC18XX_EEPROM_AUTOPROG_WORD);
+
+	lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+			      LPC18XX_EEPROM_PWRDWN_YES);
+
+	lpc18xx_regmap_config.max_register = resource_size(res) - 1;
+	lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg;
+	lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg;
+
+	regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom,
+				  &lpc18xx_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap));
+		ret = PTR_ERR(regmap);
+		goto err_clk;
+	}
+
+	lpc18xx_nvmem_config.dev = dev;
+
+	eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
+	if (IS_ERR(eeprom->nvmem)) {
+		ret = PTR_ERR(eeprom->nvmem);
+		goto err_clk;
+	}
+
+	platform_set_drvdata(pdev, eeprom);
+
+	return 0;
+
+err_clk:
+	clk_disable_unprepare(eeprom->clk);
+
+	return ret;
+}
+
+static int lpc18xx_eeprom_remove(struct platform_device *pdev)
+{
+	struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = nvmem_unregister(eeprom->nvmem);
+	if (ret < 0)
+		return ret;
+
+	clk_disable_unprepare(eeprom->clk);
+
+	return 0;
+}
+
+static const struct of_device_id lpc18xx_eeprom_of_match[] = {
+	{ .compatible = "nxp,lpc1857-eeprom" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match);
+
+static struct platform_driver lpc18xx_eeprom_driver = {
+	.probe = lpc18xx_eeprom_probe,
+	.remove = lpc18xx_eeprom_remove,
+	.driver = {
+		.name = "lpc18xx-eeprom",
+		.of_match_table = lpc18xx_eeprom_of_match,
+	},
+};
+
+module_platform_driver(lpc18xx_eeprom_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
new file mode 100644
index 0000000..7b35f5b
--- /dev/null
+++ b/drivers/nvmem/mtk-efuse.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config mtk_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
+static int mtk_efuse_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct nvmem_device *nvmem;
+	struct nvmem_config *econfig;
+	struct regmap *regmap;
+	void __iomem *base;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
+	if (!econfig)
+		return -ENOMEM;
+
+	mtk_regmap_config.max_register = resource_size(res) - 1;
+
+	regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(regmap);
+	}
+
+	econfig->dev = dev;
+	econfig->owner = THIS_MODULE;
+	nvmem = nvmem_register(econfig);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static int mtk_efuse_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id mtk_efuse_of_match[] = {
+	{ .compatible = "mediatek,mt8173-efuse",},
+	{ .compatible = "mediatek,efuse",},
+	{/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
+
+static struct platform_driver mtk_efuse_driver = {
+	.probe = mtk_efuse_probe,
+	.remove = mtk_efuse_remove,
+	.driver = {
+		.name = "mediatek,efuse",
+		.of_match_table = mtk_efuse_of_match,
+	},
+};
+module_platform_driver(mtk_efuse_driver);
+MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek EFUSE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index f552134..a009795 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -14,16 +14,16 @@
  * more details.
  */
 
-#include <linux/platform_device.h>
-#include <linux/nvmem-provider.h>
-#include <linux/slab.h>
-#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/delay.h>
+#include <linux/nvmem-provider.h>
+#include <linux/slab.h>
 #include <linux/of.h>
-#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
 
 #define EFUSE_A_SHIFT			6
 #define EFUSE_A_MASK			0x3ff
@@ -35,10 +35,10 @@
 #define REG_EFUSE_CTRL			0x0000
 #define REG_EFUSE_DOUT			0x0004
 
-struct rockchip_efuse_context {
+struct rockchip_efuse_chip {
 	struct device *dev;
 	void __iomem *base;
-	struct clk *efuse_clk;
+	struct clk *clk;
 };
 
 static int rockchip_efuse_write(void *context, const void *data, size_t count)
@@ -52,34 +52,32 @@
 			       void *val, size_t val_size)
 {
 	unsigned int offset = *(u32 *)reg;
-	struct rockchip_efuse_context *_context = context;
-	void __iomem *base = _context->base;
-	struct clk *clk = _context->efuse_clk;
+	struct rockchip_efuse_chip *efuse = context;
 	u8 *buf = val;
 	int ret;
 
-	ret = clk_prepare_enable(clk);
+	ret = clk_prepare_enable(efuse->clk);
 	if (ret < 0) {
-		dev_err(_context->dev, "failed to prepare/enable efuse clk\n");
+		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
 		return ret;
 	}
 
-	writel(EFUSE_LOAD | EFUSE_PGENB, base + REG_EFUSE_CTRL);
+	writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL);
 	udelay(1);
 	while (val_size) {
-		writel(readl(base + REG_EFUSE_CTRL) &
+		writel(readl(efuse->base + REG_EFUSE_CTRL) &
 			     (~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
-			     base + REG_EFUSE_CTRL);
-		writel(readl(base + REG_EFUSE_CTRL) |
+			     efuse->base + REG_EFUSE_CTRL);
+		writel(readl(efuse->base + REG_EFUSE_CTRL) |
 			     ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT),
-			     base + REG_EFUSE_CTRL);
+			     efuse->base + REG_EFUSE_CTRL);
 		udelay(1);
-		writel(readl(base + REG_EFUSE_CTRL) |
-			     EFUSE_STROBE, base + REG_EFUSE_CTRL);
+		writel(readl(efuse->base + REG_EFUSE_CTRL) |
+			     EFUSE_STROBE, efuse->base + REG_EFUSE_CTRL);
 		udelay(1);
-		*buf++ = readb(base + REG_EFUSE_DOUT);
-		writel(readl(base + REG_EFUSE_CTRL) &
-		     (~EFUSE_STROBE), base + REG_EFUSE_CTRL);
+		*buf++ = readb(efuse->base + REG_EFUSE_DOUT);
+		writel(readl(efuse->base + REG_EFUSE_CTRL) &
+		     (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL);
 		udelay(1);
 
 		val_size -= 1;
@@ -87,9 +85,9 @@
 	}
 
 	/* Switch to standby mode */
-	writel(EFUSE_PGENB | EFUSE_CSB, base + REG_EFUSE_CTRL);
+	writel(EFUSE_PGENB | EFUSE_CSB, efuse->base + REG_EFUSE_CTRL);
 
-	clk_disable_unprepare(clk);
+	clk_disable_unprepare(efuse->clk);
 
 	return 0;
 }
@@ -114,48 +112,44 @@
 };
 
 static const struct of_device_id rockchip_efuse_match[] = {
-	{ .compatible = "rockchip,rockchip-efuse",},
+	{ .compatible = "rockchip,rockchip-efuse", },
 	{ /* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
 
 static int rockchip_efuse_probe(struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
 	struct resource *res;
 	struct nvmem_device *nvmem;
 	struct regmap *regmap;
-	void __iomem *base;
-	struct clk *clk;
-	struct rockchip_efuse_context *context;
+	struct rockchip_efuse_chip *efuse;
+
+	efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip),
+			     GFP_KERNEL);
+	if (!efuse)
+		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
+	efuse->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(efuse->base))
+		return PTR_ERR(efuse->base);
 
-	context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context),
-			       GFP_KERNEL);
-	if (IS_ERR(context))
-		return PTR_ERR(context);
+	efuse->clk = devm_clk_get(&pdev->dev, "pclk_efuse");
+	if (IS_ERR(efuse->clk))
+		return PTR_ERR(efuse->clk);
 
-	clk = devm_clk_get(dev, "pclk_efuse");
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
-
-	context->dev = dev;
-	context->base = base;
-	context->efuse_clk = clk;
+	efuse->dev = &pdev->dev;
 
 	rockchip_efuse_regmap_config.max_register = resource_size(res) - 1;
 
-	regmap = devm_regmap_init(dev, &rockchip_efuse_bus,
-				  context, &rockchip_efuse_regmap_config);
+	regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus,
+				  efuse, &rockchip_efuse_regmap_config);
 	if (IS_ERR(regmap)) {
-		dev_err(dev, "regmap init failed\n");
+		dev_err(efuse->dev, "regmap init failed\n");
 		return PTR_ERR(regmap);
 	}
-	econfig.dev = dev;
+
+	econfig.dev = efuse->dev;
 	nvmem = nvmem_register(&econfig);
 	if (IS_ERR(nvmem))
 		return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index cfa3b85..bc88b408 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -13,10 +13,8 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
  */
 
-
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -27,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 
-
 static struct nvmem_config econfig = {
 	.name = "sunxi-sid",
 	.read_only = true,
@@ -55,8 +52,8 @@
 }
 
 static int sunxi_sid_read(void *context,
-			    const void *reg, size_t reg_size,
-			    void *val, size_t val_size)
+			  const void *reg, size_t reg_size,
+			  void *val, size_t val_size)
 {
 	struct sunxi_sid *sid = context;
 	unsigned int offset = *(u32 *)reg;
@@ -130,7 +127,7 @@
 	if (IS_ERR(nvmem))
 		return PTR_ERR(nvmem);
 
-	randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+	randomness = kzalloc(sizeof(u8) * (size), GFP_KERNEL);
 	if (!randomness) {
 		ret = -EINVAL;
 		goto err_unreg_nvmem;
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 0adccbf..c11db8b 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -4,8 +4,7 @@
 if MIPS
 source "drivers/platform/mips/Kconfig"
 endif
-if GOLDFISH
+
 source "drivers/platform/goldfish/Kconfig"
-endif
 
 source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 635ef25..50331e3e 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,5 +1,23 @@
+menuconfig GOLDFISH
+	bool "Platform support for Goldfish virtual devices"
+	depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
+	---help---
+	  Say Y here to get to see options for the Goldfish virtual platform.
+	  This option alone does not add any kernel code.
+
+	  Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_BUS
+	bool "Goldfish platform bus"
+	---help---
+	  This is a virtual bus to host Goldfish Android Virtual Devices.
+
 config GOLDFISH_PIPE
 	tristate "Goldfish virtual device for QEMU pipes"
 	---help---
 	  This is a virtual device to drive the QEMU pipe interface used by
 	  the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index a002239..d348712 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -1,5 +1,5 @@
 #
 # Makefile for Goldfish platform specific drivers
 #
-obj-$(CONFIG_GOLDFISH)	+=	pdev_bus.o
+obj-$(CONFIG_GOLDFISH_BUS)	+= pdev_bus.o
 obj-$(CONFIG_GOLDFISH_PIPE)	+= goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index e7a29e2..9973ceb 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -2,6 +2,7 @@
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2012 Intel, Inc.
  * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -57,6 +58,9 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/goldfish.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
 
 /*
  * IMPORTANT: The following constants must match the ones used and defined
@@ -75,6 +79,7 @@
 #define PIPE_REG_PARAMS_ADDR_LOW	0x18  /* read/write: batch data address */
 #define PIPE_REG_PARAMS_ADDR_HIGH	0x1c  /* read/write: batch data address */
 #define PIPE_REG_ACCESS_PARAMS		0x20  /* write: batch access */
+#define PIPE_REG_VERSION		0x24  /* read: device version */
 
 /* list of commands for PIPE_REG_COMMAND */
 #define CMD_OPEN			1  /* open new channel */
@@ -90,12 +95,6 @@
 #define CMD_WRITE_BUFFER	4  /* send a user buffer to the emulator */
 #define CMD_WAKE_ON_WRITE	5  /* tell the emulator to wake us when writing
 				     is possible */
-
-/* The following commands are related to read operations, they must be
- * listed in the same order than the corresponding write ones, since we
- * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
- * in goldfish_pipe_read_write() below.
- */
 #define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
 #define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
 				   * is possible */
@@ -130,6 +129,7 @@
 	unsigned char __iomem *base;
 	struct access_params *aps;
 	int irq;
+	u32 version;
 };
 
 static struct goldfish_pipe_dev   pipe_dev[1];
@@ -217,17 +217,16 @@
 static int setup_access_params_addr(struct platform_device *pdev,
 					struct goldfish_pipe_dev *dev)
 {
-	u64 paddr;
+	dma_addr_t dma_handle;
 	struct access_params *aps;
 
-	aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
+	aps = dmam_alloc_coherent(&pdev->dev, sizeof(struct access_params),
+				  &dma_handle, GFP_KERNEL);
 	if (!aps)
-		return -1;
+		return -ENOMEM;
 
-	/* FIXME */
-	paddr = __pa(aps);
-	writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
-	writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
+	writel(upper_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
+	writel(lower_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_LOW);
 
 	if (valid_batchbuffer_addr(dev, aps)) {
 		dev->aps = aps;
@@ -263,19 +262,14 @@
 	return 0;
 }
 
-/* This function is used for both reading from and writing to a given
- * pipe.
- */
 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
-				    size_t bufflen, int is_write)
+				       size_t bufflen, int is_write)
 {
 	unsigned long irq_flags;
 	struct goldfish_pipe *pipe = filp->private_data;
 	struct goldfish_pipe_dev *dev = pipe->dev;
-	const int cmd_offset = is_write ? 0
-					: (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
 	unsigned long address, address_end;
-	int ret = 0;
+	int count = 0, ret = -EINVAL;
 
 	/* If the emulator already closed the pipe, no need to go further */
 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -298,79 +292,107 @@
 	address_end = address + bufflen;
 
 	while (address < address_end) {
-		unsigned long  page_end = (address & PAGE_MASK) + PAGE_SIZE;
-		unsigned long  next     = page_end < address_end ? page_end
-								 : address_end;
-		unsigned long  avail    = next - address;
+		unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+		unsigned long next     = page_end < address_end ? page_end
+								: address_end;
+		unsigned long avail    = next - address;
 		int status, wakeBit;
+		struct page *page;
 
-		/* Ensure that the corresponding page is properly mapped */
-		/* FIXME: this isn't safe or sufficient - use get_user_pages */
-		if (is_write) {
-			char c;
-			/* Ensure that the page is mapped and readable */
-			if (__get_user(c, (char __user *)address)) {
-				if (!ret)
-					ret = -EFAULT;
-				break;
-			}
+		/* Either vaddr or paddr depending on the device version */
+		unsigned long xaddr;
+
+		/*
+		 * We grab the pages on a page-by-page basis in case user
+		 * space gives us a potentially huge buffer but the read only
+		 * returns a small amount, then there's no need to pin that
+		 * much memory to the process.
+		 */
+		down_read(&current->mm->mmap_sem);
+		ret = get_user_pages(current, current->mm, address, 1,
+				     !is_write, 0, &page, NULL);
+		up_read(&current->mm->mmap_sem);
+		if (ret < 0)
+			break;
+
+		if (dev->version) {
+			/* Device version 1 or newer (qemu-android) expects the
+			 * physical address.
+			 */
+			xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
 		} else {
-			/* Ensure that the page is mapped and writable */
-			if (__put_user(0, (char __user *)address)) {
-				if (!ret)
-					ret = -EFAULT;
-				break;
-			}
+			/* Device version 0 (classic emulator) expects the
+			 * virtual address.
+			 */
+			xaddr = address;
 		}
 
 		/* Now, try to transfer the bytes in the current page */
 		spin_lock_irqsave(&dev->lock, irq_flags);
-		if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
-				address, avail, pipe, &status)) {
+		if (access_with_param(dev,
+				is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+				xaddr, avail, pipe, &status)) {
 			gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
 				     dev->base + PIPE_REG_CHANNEL_HIGH);
 			writel(avail, dev->base + PIPE_REG_SIZE);
-			gf_write_ptr((void *)address,
+			gf_write_ptr((void *)xaddr,
 				     dev->base + PIPE_REG_ADDRESS,
 				     dev->base + PIPE_REG_ADDRESS_HIGH);
-			writel(CMD_WRITE_BUFFER + cmd_offset,
+			writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
 					dev->base + PIPE_REG_COMMAND);
 			status = readl(dev->base + PIPE_REG_STATUS);
 		}
 		spin_unlock_irqrestore(&dev->lock, irq_flags);
 
+		if (status > 0 && !is_write)
+			set_page_dirty(page);
+		put_page(page);
+
 		if (status > 0) { /* Correct transfer */
-			ret += status;
+			count += status;
 			address += status;
 			continue;
+		} else if (status == 0) { /* EOF */
+			ret = 0;
+			break;
+		} else if (status < 0 && count > 0) {
+			/*
+			 * An error occurred and we already transferred
+			 * something on one of the previous pages.
+			 * Just return what we already copied and log this
+			 * err.
+			 *
+			 * Note: This seems like an incorrect approach but
+			 * cannot change it until we check if any user space
+			 * ABI relies on this behavior.
+			 */
+			if (status != PIPE_ERROR_AGAIN)
+				pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+					status, is_write ? "write" : "read");
+			ret = 0;
+			break;
 		}
 
-		if (status == 0)  /* EOF */
-			break;
-
-		/* An error occured. If we already transfered stuff, just
-		* return with its count. We expect the next call to return
-		* an error code */
-		if (ret > 0)
-			break;
-
-		/* If the error is not PIPE_ERROR_AGAIN, or if we are not in
-		* non-blocking mode, just return the error code.
-		*/
+		/*
+		 * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+		 * non-blocking mode, just return the error code.
+		 */
 		if (status != PIPE_ERROR_AGAIN ||
 			(filp->f_flags & O_NONBLOCK) != 0) {
 			ret = goldfish_pipe_error_convert(status);
 			break;
 		}
 
-		/* We will have to wait until more data/space is available.
-		* First, mark the pipe as waiting for a specific wake signal.
-		*/
+		/*
+		 * The backend blocked the read/write, wait until the backend
+		 * tells us it's ready to process more data.
+		 */
 		wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
 		set_bit(wakeBit, &pipe->flags);
 
 		/* Tell the emulator we're going to wait for a wake event */
-		goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
+		goldfish_cmd(pipe,
+			is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
 
 		/* Unlock the pipe, then wait for the wake signal */
 		mutex_unlock(&pipe->lock);
@@ -388,12 +410,13 @@
 		/* Try to re-acquire the lock */
 		if (mutex_lock_interruptible(&pipe->lock))
 			return -ERESTARTSYS;
-
-		/* Try the transfer again */
-		continue;
 	}
 	mutex_unlock(&pipe->lock);
-	return ret;
+
+	if (ret < 0)
+		return ret;
+	else
+		return count;
 }
 
 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
@@ -446,10 +469,11 @@
 	unsigned long irq_flags;
 	int count = 0;
 
-	/* We're going to read from the emulator a list of (channel,flags)
-	* pairs corresponding to the wake events that occured on each
-	* blocked pipe (i.e. channel).
-	*/
+	/*
+	 * We're going to read from the emulator a list of (channel,flags)
+	 * pairs corresponding to the wake events that occurred on each
+	 * blocked pipe (i.e. channel).
+	 */
 	spin_lock_irqsave(&dev->lock, irq_flags);
 	for (;;) {
 		/* First read the channel, 0 means the end of the list */
@@ -600,6 +624,12 @@
 		goto error;
 	}
 	setup_access_params_addr(pdev, dev);
+
+	/* Although the pipe device in the classic Android emulator does not
+	 * recognize the 'version' register, it won't treat this as an error
+	 * either and will simply return 0, which is fine.
+	 */
+	dev->version = readl(dev->base + PIPE_REG_VERSION);
 	return 0;
 
 error:
@@ -615,11 +645,26 @@
 	return 0;
 }
 
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+	{ "GFSH0003", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+	{ .compatible = "google,android-pipe", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
 static struct platform_driver goldfish_pipe = {
 	.probe = goldfish_pipe_probe,
 	.remove = goldfish_pipe_remove,
 	.driver = {
-		.name = "goldfish_pipe"
+		.name = "goldfish_pipe",
+		.owner = THIS_MODULE,
+		.of_match_table = goldfish_pipe_of_match,
+		.acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
 	}
 };
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index be822f7..aca282d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -10,6 +10,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
@@ -47,9 +48,9 @@
 #define SPMI_MAPPING_BIT_IS_1_FLAG(X)	(((X) >> 8) & 0x1)
 #define SPMI_MAPPING_BIT_IS_1_RESULT(X)	(((X) >> 0) & 0xFF)
 
-#define SPMI_MAPPING_TABLE_LEN		255
 #define SPMI_MAPPING_TABLE_TREE_DEPTH	16	/* Maximum of 16-bits */
-#define PPID_TO_CHAN_TABLE_SZ		BIT(12)	/* PPID is 12bit chan is 1byte*/
+#define PMIC_ARB_MAX_PPID		BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID		BIT(15)
 
 /* Ownership Table */
 #define SPMI_OWNERSHIP_TABLE_REG(N)	(0x0700 + (4 * (N)))
@@ -85,9 +86,7 @@
 };
 
 /* Maximum number of support PMIC peripherals */
-#define PMIC_ARB_MAX_PERIPHS		256
-#define PMIC_ARB_MAX_CHNL		128
-#define PMIC_ARB_PERIPH_ID_VALID	(1 << 15)
+#define PMIC_ARB_MAX_PERIPHS		512
 #define PMIC_ARB_TIMEOUT_US		100
 #define PMIC_ARB_MAX_TRANS_BYTES	(8)
 
@@ -125,18 +124,22 @@
 	void __iomem		*wr_base;
 	void __iomem		*intr;
 	void __iomem		*cnfg;
+	void __iomem		*core;
+	resource_size_t		core_size;
 	raw_spinlock_t		lock;
 	u8			channel;
 	int			irq;
 	u8			ee;
-	u8			min_apid;
-	u8			max_apid;
-	u32			mapping_table[SPMI_MAPPING_TABLE_LEN];
+	u16			min_apid;
+	u16			max_apid;
+	u32			*mapping_table;
+	DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
 	struct irq_domain	*domain;
 	struct spmi_controller	*spmic;
-	u16			apid_to_ppid[256];
+	u16			*apid_to_ppid;
 	const struct pmic_arb_ver_ops *ver_ops;
-	u8			*ppid_to_chan;
+	u16			*ppid_to_chan;
+	u16			last_channel;
 };
 
 /**
@@ -158,7 +161,8 @@
  */
 struct pmic_arb_ver_ops {
 	/* spmi commands (read_cmd, write_cmd, cmd) functionality */
-	u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+	int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+		      u32 *offset);
 	u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
 	int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
 	/* Interrupts controller functionality (offset of PIC registers) */
@@ -212,7 +216,14 @@
 	struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
 	u32 status = 0;
 	u32 timeout = PMIC_ARB_TIMEOUT_US;
-	u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
+	u32 offset;
+	int rc;
+
+	rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+	if (rc)
+		return rc;
+
+	offset += PMIC_ARB_STATUS;
 
 	while (timeout--) {
 		status = readl_relaxed(base + offset);
@@ -257,7 +268,11 @@
 	unsigned long flags;
 	u32 cmd;
 	int rc;
-	u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
+	u32 offset;
+
+	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+	if (rc)
+		return rc;
 
 	cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
@@ -297,7 +312,11 @@
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
-	u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+	u32 offset;
+
+	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+	if (rc)
+		return rc;
 
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
@@ -344,7 +363,11 @@
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
-	u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+	u32 offset;
+
+	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+	if (rc)
+		return rc;
 
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
@@ -614,6 +637,10 @@
 	u32 data;
 
 	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+		if (!test_and_set_bit(index, pa->mapping_table_valid))
+			mapping_table[index] = readl_relaxed(pa->cnfg +
+						SPMI_MAPPING_TABLE_REG(index));
+
 		data = mapping_table[index];
 
 		if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
@@ -701,18 +728,61 @@
 }
 
 /* v1 offset per ee */
-static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
-	return 0x800 + 0x80 * pa->channel;
+	*offset = 0x800 + 0x80 * pa->channel;
+	return 0;
 }
 
+static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+{
+	u32 regval, offset;
+	u16 chan;
+	u16 id;
+
+	/*
+	 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+	 * ppid_to_chan is an in-memory invert of that table.
+	 */
+	for (chan = pa->last_channel; ; chan++) {
+		offset = PMIC_ARB_REG_CHNL(chan);
+		if (offset >= pa->core_size)
+			break;
+
+		regval = readl_relaxed(pa->core + offset);
+		if (!regval)
+			continue;
+
+		id = (regval >> 8) & PMIC_ARB_PPID_MASK;
+		pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+		if (id == ppid) {
+			chan |= PMIC_ARB_CHAN_VALID;
+			break;
+		}
+	}
+	pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+
+	return chan;
+}
+
+
 /* v2 offset per ppid (chan) and per ee */
-static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
 	u16 ppid = (sid << 8) | (addr >> 8);
-	u8  chan = pa->ppid_to_chan[ppid];
+	u16 chan;
 
-	return 0x1000 * pa->ee + 0x8000 * chan;
+	chan = pa->ppid_to_chan[ppid];
+	if (!(chan & PMIC_ARB_CHAN_VALID))
+		chan = pmic_arb_find_chan(pa, ppid);
+	if (!(chan & PMIC_ARB_CHAN_VALID))
+		return -ENODEV;
+	chan &= ~PMIC_ARB_CHAN_VALID;
+
+	*offset = 0x1000 * pa->ee + 0x8000 * chan;
+	return 0;
 }
 
 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -797,7 +867,7 @@
 	struct resource *res;
 	void __iomem *core;
 	u32 channel, ee, hw_ver;
-	int err, i;
+	int err;
 	bool is_v1;
 
 	ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
@@ -808,6 +878,7 @@
 	pa->spmic = ctrl;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+	pa->core_size = resource_size(res);
 	core = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(core)) {
 		err = PTR_ERR(core);
@@ -825,10 +896,7 @@
 		pa->wr_base = core;
 		pa->rd_base = core;
 	} else {
-		u8  chan;
-		u16 ppid;
-		u32 regval;
-
+		pa->core = core;
 		pa->ver_ops = &pmic_arb_v2;
 
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -847,24 +915,14 @@
 			goto err_put_ctrl;
 		}
 
-		pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
-					PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
+		pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+						PMIC_ARB_MAX_PPID,
+						sizeof(*pa->ppid_to_chan),
+						GFP_KERNEL);
 		if (!pa->ppid_to_chan) {
 			err = -ENOMEM;
 			goto err_put_ctrl;
 		}
-		/*
-		 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-		 * ppid_to_chan is an in-memory invert of that table.
-		 */
-		for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
-			regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
-			if (!regval)
-				continue;
-
-			ppid = (regval >> 8) & 0xFFF;
-			pa->ppid_to_chan[ppid] = chan;
-		}
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
@@ -915,9 +973,20 @@
 
 	pa->ee = ee;
 
-	for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
-		pa->mapping_table[i] = readl_relaxed(
-				pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+	pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
+					    sizeof(*pa->apid_to_ppid),
+					    GFP_KERNEL);
+	if (!pa->apid_to_ppid) {
+		err = -ENOMEM;
+		goto err_put_ctrl;
+	}
+
+	pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+					sizeof(*pa->mapping_table), GFP_KERNEL);
+	if (!pa->mapping_table) {
+		err = -ENOMEM;
+		goto err_put_ctrl;
+	}
 
 	/* Initialize max_apid/min_apid to the opposite bounds, during
 	 * the irq domain translation, we are sure to update these */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5d3b86a..5f9a97a 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,10 +30,6 @@
 
 source "drivers/staging/comedi/Kconfig"
 
-source "drivers/staging/olpc_dcon/Kconfig"
-
-source "drivers/staging/panel/Kconfig"
-
 source "drivers/staging/rtl8192u/Kconfig"
 
 source "drivers/staging/rtl8192e/Kconfig"
@@ -92,8 +88,6 @@
 
 source "drivers/staging/dgnc/Kconfig"
 
-source "drivers/staging/dgap/Kconfig"
-
 source "drivers/staging/gs_fpgaboot/Kconfig"
 
 source "drivers/staging/skein/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 30918ed..b3920c2 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,14 +1,9 @@
 # Makefile for staging directory
 
-# fix for build system bug...
-obj-$(CONFIG_STAGING)		+= staging.o
-
 obj-y				+= media/
 obj-$(CONFIG_SLICOSS)		+= slicoss/
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_COMEDI)		+= comedi/
-obj-$(CONFIG_FB_OLPC_DCON)	+= olpc_dcon/
-obj-$(CONFIG_PANEL)		+= panel/
 obj-$(CONFIG_RTL8192U)		+= rtl8192u/
 obj-$(CONFIG_RTL8192E)		+= rtl8192e/
 obj-$(CONFIG_R8712U)		+= rtl8712/
@@ -37,7 +32,6 @@
 obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_LUSTRE_FS)		+= lustre/
 obj-$(CONFIG_DGNC)			+= dgnc/
-obj-$(CONFIG_DGAP)			+= dgap/
 obj-$(CONFIG_MTD_SPINAND_MT29F)	+= mt29f_spinand/
 obj-$(CONFIG_GS_FPGABOOT)	+= gs_fpgaboot/
 obj-$(CONFIG_CRYPTO_SKEIN)	+= skein/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 42b1512..bd90d20 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -57,15 +57,6 @@
 	  synchronization.  Useful when there is no hardware primitive backing
 	  the synchronization.
 
-config SW_SYNC_USER
-	bool "Userspace API for SW_SYNC"
-	default n
-	depends on SW_SYNC
-	---help---
-	  Provides a user space API to the sw sync object.
-	  *WARNING* improper use of this can result in deadlocking kernel
-	  drivers from userspace.
-
 source "drivers/staging/android/ion/Kconfig"
 
 endif # if ANDROID
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 5bb1283..8ae13082 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -441,7 +441,9 @@
 	if (!(sc->gfp_mask & __GFP_FS))
 		return SHRINK_STOP;
 
-	mutex_lock(&ashmem_mutex);
+	if (!mutex_trylock(&ashmem_mutex))
+		return -1;
+
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e237e9f..7ff2a7e 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -251,8 +251,10 @@
 	 * memory coming from the heaps is ready for dma, ie if it has a
 	 * cached mapping that mapping has been invalidated
 	 */
-	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
 		sg_dma_address(sg) = sg_phys(sg);
+		sg_dma_len(sg) = sg->length;
+	}
 	mutex_lock(&dev->buffer_lock);
 	ion_buffer_add(dev, buffer);
 	mutex_unlock(&dev->buffer_lock);
@@ -675,6 +677,34 @@
 }
 EXPORT_SYMBOL(ion_unmap_kernel);
 
+static struct mutex debugfs_mutex;
+static struct rb_root *ion_root_client;
+static int is_client_alive(struct ion_client *client)
+{
+	struct rb_node *node;
+	struct ion_client *tmp;
+	struct ion_device *dev;
+
+	node = ion_root_client->rb_node;
+	dev = container_of(ion_root_client, struct ion_device, clients);
+
+	down_read(&dev->lock);
+	while (node) {
+		tmp = rb_entry(node, struct ion_client, node);
+		if (client < tmp) {
+			node = node->rb_left;
+		} else if (client > tmp) {
+			node = node->rb_right;
+		} else {
+			up_read(&dev->lock);
+			return 1;
+		}
+	}
+
+	up_read(&dev->lock);
+	return 0;
+}
+
 static int ion_debug_client_show(struct seq_file *s, void *unused)
 {
 	struct ion_client *client = s->private;
@@ -683,6 +713,14 @@
 	const char *names[ION_NUM_HEAP_IDS] = {NULL};
 	int i;
 
+	mutex_lock(&debugfs_mutex);
+	if (!is_client_alive(client)) {
+		seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
+			   client);
+		mutex_unlock(&debugfs_mutex);
+		return 0;
+	}
+
 	mutex_lock(&client->lock);
 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
@@ -694,6 +732,7 @@
 		sizes[id] += handle->buffer->size;
 	}
 	mutex_unlock(&client->lock);
+	mutex_unlock(&debugfs_mutex);
 
 	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
 	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
@@ -830,6 +869,7 @@
 	struct rb_node *n;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
+	mutex_lock(&debugfs_mutex);
 	while ((n = rb_first(&client->handles))) {
 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
 						     node);
@@ -848,6 +888,7 @@
 	kfree(client->display_name);
 	kfree(client->name);
 	kfree(client);
+	mutex_unlock(&debugfs_mutex);
 }
 EXPORT_SYMBOL(ion_client_destroy);
 
@@ -1151,22 +1192,18 @@
 }
 EXPORT_SYMBOL(ion_share_dma_buf_fd);
 
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+				      struct dma_buf *dmabuf)
 {
-	struct dma_buf *dmabuf;
 	struct ion_buffer *buffer;
 	struct ion_handle *handle;
 	int ret;
 
-	dmabuf = dma_buf_get(fd);
-	if (IS_ERR(dmabuf))
-		return ERR_CAST(dmabuf);
 	/* if this memory came from ion */
 
 	if (dmabuf->ops != &dma_buf_ops) {
 		pr_err("%s: can not import dmabuf from another exporter\n",
 		       __func__);
-		dma_buf_put(dmabuf);
 		return ERR_PTR(-EINVAL);
 	}
 	buffer = dmabuf->priv;
@@ -1194,11 +1231,25 @@
 	}
 
 end:
-	dma_buf_put(dmabuf);
 	return handle;
 }
 EXPORT_SYMBOL(ion_import_dma_buf);
 
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_handle *handle;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR(dmabuf))
+		return ERR_CAST(dmabuf);
+
+	handle = ion_import_dma_buf(client, dmabuf);
+	dma_buf_put(dmabuf);
+	return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf_fd);
+
 static int ion_sync_for_device(struct ion_client *client, int fd)
 {
 	struct dma_buf *dmabuf;
@@ -1306,7 +1357,7 @@
 	{
 		struct ion_handle *handle;
 
-		handle = ion_import_dma_buf(client, data.fd.fd);
+		handle = ion_import_dma_buf_fd(client, data.fd.fd);
 		if (IS_ERR(handle))
 			ret = PTR_ERR(handle);
 		else
@@ -1403,6 +1454,7 @@
 	seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
 	seq_puts(s, "----------------------------------------------------\n");
 
+	mutex_lock(&debugfs_mutex);
 	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
 		struct ion_client *client = rb_entry(n, struct ion_client,
 						     node);
@@ -1421,6 +1473,8 @@
 				   client->pid, size);
 		}
 	}
+	mutex_unlock(&debugfs_mutex);
+
 	seq_puts(s, "----------------------------------------------------\n");
 	seq_puts(s, "orphaned allocations (info is from last known client):\n");
 	mutex_lock(&dev->buffer_lock);
@@ -1605,6 +1659,8 @@
 	init_rwsem(&idev->lock);
 	plist_head_init(&idev->heaps);
 	idev->clients = RB_ROOT;
+	ion_root_client = &idev->clients;
+	mutex_init(&debugfs_mutex);
 	return idev;
 }
 EXPORT_SYMBOL(ion_device_create);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index b860c5f..a1331fc 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -192,14 +192,26 @@
 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
 
 /**
- * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * ion_import_dma_buf() - get ion_handle from dma-buf
+ * @client:	the client
+ * @dmabuf:	the dma-buf
+ *
+ * Get the ion_buffer associated with the dma-buf and return the ion_handle.
+ * If no ion_handle exists for this buffer, return newly created ion_handle.
+ * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+				      struct dma_buf *dmabuf);
+
+/**
+ * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle
  * @client:	the client
  * @fd:		the dma-buf fd
  *
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
- * import that fd and return a handle representing it.  If a dma-buf from
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd,
+ * import that fd and return a handle representing it. If a dma-buf from
  * another exporter is passed in this function will return ERR_PTR(-EINVAL)
  */
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
 
 #endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 9156d82..e702ce6 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -167,7 +167,7 @@
 	if (!carveout_heap)
 		return ERR_PTR(-ENOMEM);
 
-	carveout_heap->pool = gen_pool_create(12, -1);
+	carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
 	if (!carveout_heap->pool) {
 		kfree(carveout_heap);
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index fd7e23e..1fe8016 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -149,8 +149,8 @@
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
 {
-	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
-					     GFP_KERNEL);
+	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+
 	if (!pool)
 		return NULL;
 	pool->high_count = 0;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index d4c3e55..b69dfc7 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -27,7 +27,7 @@
 #include "ion_priv.h"
 
 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
-				     __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
+				     __GFP_NORETRY) & ~__GFP_RECLAIM;
 static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
 static const unsigned int orders[] = {8, 4, 0};
 static const int num_orders = ARRAY_SIZE(orders);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 8b5a4a8..4b8a56c 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -84,6 +84,7 @@
 	int tasksize;
 	int i;
 	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+	int minfree = 0;
 	int selected_tasksize = 0;
 	short selected_oom_score_adj;
 	int array_size = ARRAY_SIZE(lowmem_adj);
@@ -97,8 +98,8 @@
 	if (lowmem_minfree_size < array_size)
 		array_size = lowmem_minfree_size;
 	for (i = 0; i < array_size; i++) {
-		if (other_free < lowmem_minfree[i] &&
-		    other_file < lowmem_minfree[i]) {
+		minfree = lowmem_minfree[i];
+		if (other_free < minfree && other_file < minfree) {
 			min_score_adj = lowmem_adj[i];
 			break;
 		}
@@ -153,8 +154,8 @@
 		selected = p;
 		selected_tasksize = tasksize;
 		selected_oom_score_adj = oom_score_adj;
-		lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
-			     p->pid, p->comm, oom_score_adj, tasksize);
+		lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+			     p->comm, p->pid, oom_score_adj, tasksize);
 	}
 	if (selected) {
 		task_lock(selected);
@@ -167,9 +168,18 @@
 		if (selected->mm)
 			mark_oom_victim(selected);
 		task_unlock(selected);
-		lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
-			     selected->pid, selected->comm,
-			     selected_oom_score_adj, selected_tasksize);
+		lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
+				 "   to free %ldkB on behalf of '%s' (%d) because\n"
+				 "   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n"
+				 "   Free memory is %ldkB above reserved\n",
+			     selected->comm, selected->pid,
+			     selected_oom_score_adj,
+			     selected_tasksize * (long)(PAGE_SIZE / 1024),
+			     current->comm, current->pid,
+			     other_file * (long)(PAGE_SIZE / 1024),
+			     minfree * (long)(PAGE_SIZE / 1024),
+			     min_score_adj,
+			     other_free * (long)(PAGE_SIZE / 1024));
 		lowmem_deathpending_timeout = jiffies + HZ;
 		rem += selected_tasksize;
 	}
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index c4ff167..af39ff5 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -25,15 +25,7 @@
 
 #include "sw_sync.h"
 
-static int sw_sync_cmp(u32 a, u32 b)
-{
-	if (a == b)
-		return 0;
-
-	return ((s32)a - (s32)b) < 0 ? -1 : 1;
-}
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
 {
 	struct sw_sync_pt *pt;
 
@@ -42,47 +34,17 @@
 
 	pt->value = value;
 
-	return (struct sync_pt *)pt;
+	return (struct fence *)pt;
 }
 EXPORT_SYMBOL(sw_sync_pt_create);
 
-static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
+static int sw_sync_fence_has_signaled(struct fence *fence)
 {
-	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+	struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
 	struct sw_sync_timeline *obj =
-		(struct sw_sync_timeline *)sync_pt_parent(sync_pt);
+		(struct sw_sync_timeline *)fence_parent(fence);
 
-	return (struct sync_pt *)sw_sync_pt_create(obj, pt->value);
-}
-
-static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
-{
-	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-	struct sw_sync_timeline *obj =
-		(struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
-	return sw_sync_cmp(obj->value, pt->value) >= 0;
-}
-
-static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
-	struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
-	struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
-
-	return sw_sync_cmp(pt_a->value, pt_b->value);
-}
-
-static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
-				    void *data, int size)
-{
-	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
-	if (size < sizeof(pt->value))
-		return -ENOMEM;
-
-	memcpy(data, &pt->value, sizeof(pt->value));
-
-	return sizeof(pt->value);
+	return (pt->value > obj->value) ? 0 : 1;
 }
 
 static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
@@ -93,22 +55,18 @@
 	snprintf(str, size, "%d", timeline->value);
 }
 
-static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
-				 char *str, int size)
+static void sw_sync_fence_value_str(struct fence *fence, char *str, int size)
 {
-	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+	struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
 
 	snprintf(str, size, "%d", pt->value);
 }
 
 static struct sync_timeline_ops sw_sync_timeline_ops = {
 	.driver_name = "sw_sync",
-	.dup = sw_sync_pt_dup,
-	.has_signaled = sw_sync_pt_has_signaled,
-	.compare = sw_sync_pt_compare,
-	.fill_driver_data = sw_sync_fill_driver_data,
+	.has_signaled = sw_sync_fence_has_signaled,
 	.timeline_value_str = sw_sync_timeline_value_str,
-	.pt_value_str = sw_sync_pt_value_str,
+	.fence_value_str = sw_sync_fence_value_str,
 };
 
 struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
@@ -129,132 +87,3 @@
 	sync_timeline_signal(&obj->obj);
 }
 EXPORT_SYMBOL(sw_sync_timeline_inc);
-
-#ifdef CONFIG_SW_SYNC_USER
-/* *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_open(struct inode *inode, struct file *file)
-{
-	struct sw_sync_timeline *obj;
-	char task_comm[TASK_COMM_LEN];
-
-	get_task_comm(task_comm, current);
-
-	obj = sw_sync_timeline_create(task_comm);
-	if (!obj)
-		return -ENOMEM;
-
-	file->private_data = obj;
-
-	return 0;
-}
-
-static int sw_sync_release(struct inode *inode, struct file *file)
-{
-	struct sw_sync_timeline *obj = file->private_data;
-
-	sync_timeline_destroy(&obj->obj);
-	return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
-				       unsigned long arg)
-{
-	int fd = get_unused_fd_flags(O_CLOEXEC);
-	int err;
-	struct sync_pt *pt;
-	struct sync_fence *fence;
-	struct sw_sync_create_fence_data data;
-
-	if (fd < 0)
-		return fd;
-
-	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-		err = -EFAULT;
-		goto err;
-	}
-
-	pt = sw_sync_pt_create(obj, data.value);
-	if (!pt) {
-		err = -ENOMEM;
-		goto err;
-	}
-
-	data.name[sizeof(data.name) - 1] = '\0';
-	fence = sync_fence_create(data.name, pt);
-	if (!fence) {
-		sync_pt_free(pt);
-		err = -ENOMEM;
-		goto err;
-	}
-
-	data.fence = fd;
-	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-		sync_fence_put(fence);
-		err = -EFAULT;
-		goto err;
-	}
-
-	sync_fence_install(fence, fd);
-
-	return 0;
-
-err:
-	put_unused_fd(fd);
-	return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
-	u32 value;
-
-	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
-		return -EFAULT;
-
-	sw_sync_timeline_inc(obj, value);
-
-	return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
-			  unsigned long arg)
-{
-	struct sw_sync_timeline *obj = file->private_data;
-
-	switch (cmd) {
-	case SW_SYNC_IOC_CREATE_FENCE:
-		return sw_sync_ioctl_create_fence(obj, arg);
-
-	case SW_SYNC_IOC_INC:
-		return sw_sync_ioctl_inc(obj, arg);
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-static const struct file_operations sw_sync_fops = {
-	.owner = THIS_MODULE,
-	.open = sw_sync_open,
-	.release = sw_sync_release,
-	.unlocked_ioctl = sw_sync_ioctl,
-	.compat_ioctl = sw_sync_ioctl,
-};
-
-static struct miscdevice sw_sync_dev = {
-	.minor	= MISC_DYNAMIC_MINOR,
-	.name	= "sw_sync",
-	.fops	= &sw_sync_fops,
-};
-
-static int __init sw_sync_device_init(void)
-{
-	return misc_register(&sw_sync_dev);
-}
-device_initcall(sw_sync_device_init);
-
-#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index c87ae9e..e18667b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -29,7 +29,7 @@
 };
 
 struct sw_sync_pt {
-	struct sync_pt		pt;
+	struct fence		pt;
 
 	u32			value;
 };
@@ -38,7 +38,7 @@
 struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
 void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
 
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
 #else
 static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
 {
@@ -49,8 +49,8 @@
 {
 }
 
-static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
-						u32 value)
+static inline struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj,
+					      u32 value)
 {
 	return NULL;
 }
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index ed43796..3a8f210 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -32,7 +32,7 @@
 #include "trace/sync.h"
 
 static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_fence_fops;
+static const struct file_operations sync_file_fops;
 
 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
 					   int size, const char *name)
@@ -68,9 +68,6 @@
 
 	sync_timeline_debug_remove(obj);
 
-	if (obj->ops->release_obj)
-		obj->ops->release_obj(obj);
-
 	kfree(obj);
 }
 
@@ -93,10 +90,6 @@
 	 */
 	smp_wmb();
 
-	/*
-	 * signal any children that their parent is going away.
-	 */
-	sync_timeline_signal(obj);
 	sync_timeline_put(obj);
 }
 EXPORT_SYMBOL(sync_timeline_destroy);
@@ -104,126 +97,115 @@
 void sync_timeline_signal(struct sync_timeline *obj)
 {
 	unsigned long flags;
-	LIST_HEAD(signaled_pts);
-	struct sync_pt *pt, *next;
+	struct fence *fence, *next;
 
 	trace_sync_timeline(obj);
 
 	spin_lock_irqsave(&obj->child_list_lock, flags);
 
-	list_for_each_entry_safe(pt, next, &obj->active_list_head,
+	list_for_each_entry_safe(fence, next, &obj->active_list_head,
 				 active_list) {
-		if (fence_is_signaled_locked(&pt->base))
-			list_del_init(&pt->active_list);
+		if (fence_is_signaled_locked(fence))
+			list_del_init(&fence->active_list);
 	}
 
 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 }
 EXPORT_SYMBOL(sync_timeline_signal);
 
-struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
+struct fence *sync_pt_create(struct sync_timeline *obj, int size)
 {
 	unsigned long flags;
-	struct sync_pt *pt;
+	struct fence *fence;
 
-	if (size < sizeof(struct sync_pt))
+	if (size < sizeof(*fence))
 		return NULL;
 
-	pt = kzalloc(size, GFP_KERNEL);
-	if (!pt)
-		return NULL;
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	sync_timeline_get(obj);
-	fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
-		   obj->context, ++obj->value);
-	list_add_tail(&pt->child_list, &obj->child_list_head);
-	INIT_LIST_HEAD(&pt->active_list);
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-	return pt;
-}
-EXPORT_SYMBOL(sync_pt_create);
-
-void sync_pt_free(struct sync_pt *pt)
-{
-	fence_put(&pt->base);
-}
-EXPORT_SYMBOL(sync_pt_free);
-
-static struct sync_fence *sync_fence_alloc(int size, const char *name)
-{
-	struct sync_fence *fence;
-
 	fence = kzalloc(size, GFP_KERNEL);
 	if (!fence)
 		return NULL;
 
-	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
-					 fence, 0);
-	if (IS_ERR(fence->file))
+	spin_lock_irqsave(&obj->child_list_lock, flags);
+	sync_timeline_get(obj);
+	fence_init(fence, &android_fence_ops, &obj->child_list_lock,
+		   obj->context, ++obj->value);
+	list_add_tail(&fence->child_list, &obj->child_list_head);
+	INIT_LIST_HEAD(&fence->active_list);
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+	return fence;
+}
+EXPORT_SYMBOL(sync_pt_create);
+
+static struct sync_file *sync_file_alloc(int size, const char *name)
+{
+	struct sync_file *sync_file;
+
+	sync_file = kzalloc(size, GFP_KERNEL);
+	if (!sync_file)
+		return NULL;
+
+	sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
+					     sync_file, 0);
+	if (IS_ERR(sync_file->file))
 		goto err;
 
-	kref_init(&fence->kref);
-	strlcpy(fence->name, name, sizeof(fence->name));
+	kref_init(&sync_file->kref);
+	strlcpy(sync_file->name, name, sizeof(sync_file->name));
 
-	init_waitqueue_head(&fence->wq);
+	init_waitqueue_head(&sync_file->wq);
 
-	return fence;
+	return sync_file;
 
 err:
-	kfree(fence);
+	kfree(sync_file);
 	return NULL;
 }
 
 static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
 {
-	struct sync_fence_cb *check;
-	struct sync_fence *fence;
+	struct sync_file_cb *check;
+	struct sync_file *sync_file;
 
-	check = container_of(cb, struct sync_fence_cb, cb);
-	fence = check->fence;
+	check = container_of(cb, struct sync_file_cb, cb);
+	sync_file = check->sync_file;
 
-	if (atomic_dec_and_test(&fence->status))
-		wake_up_all(&fence->wq);
+	if (atomic_dec_and_test(&sync_file->status))
+		wake_up_all(&sync_file->wq);
 }
 
-/* TODO: implement a create which takes more that one sync_pt */
-struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt)
+/* TODO: implement a create which takes more that one fence */
+struct sync_file *sync_file_create(const char *name, struct fence *fence)
 {
-	struct sync_fence *fence;
+	struct sync_file *sync_file;
 
-	fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
-	if (!fence)
+	sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]),
+				    name);
+	if (!sync_file)
 		return NULL;
 
-	fence->num_fences = 1;
-	atomic_set(&fence->status, 1);
+	sync_file->num_fences = 1;
+	atomic_set(&sync_file->status, 1);
 
-	fence->cbs[0].sync_pt = pt;
-	fence->cbs[0].fence = fence;
-	if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func))
-		atomic_dec(&fence->status);
+	sync_file->cbs[0].fence = fence;
+	sync_file->cbs[0].sync_file = sync_file;
+	if (fence_add_callback(fence, &sync_file->cbs[0].cb,
+			       fence_check_cb_func))
+		atomic_dec(&sync_file->status);
 
-	sync_fence_debug_add(fence);
+	sync_file_debug_add(sync_file);
 
-	return fence;
+	return sync_file;
 }
-EXPORT_SYMBOL(sync_fence_create_dma);
+EXPORT_SYMBOL(sync_file_create);
 
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
-{
-	return sync_fence_create_dma(name, &pt->base);
-}
-EXPORT_SYMBOL(sync_fence_create);
-
-struct sync_fence *sync_fence_fdget(int fd)
+struct sync_file *sync_file_fdget(int fd)
 {
 	struct file *file = fget(fd);
 
 	if (!file)
 		return NULL;
 
-	if (file->f_op != &sync_fence_fops)
+	if (file->f_op != &sync_file_fops)
 		goto err;
 
 	return file->private_data;
@@ -232,70 +214,71 @@
 	fput(file);
 	return NULL;
 }
-EXPORT_SYMBOL(sync_fence_fdget);
+EXPORT_SYMBOL(sync_file_fdget);
 
-void sync_fence_put(struct sync_fence *fence)
+void sync_file_put(struct sync_file *sync_file)
 {
-	fput(fence->file);
+	fput(sync_file->file);
 }
-EXPORT_SYMBOL(sync_fence_put);
+EXPORT_SYMBOL(sync_file_put);
 
-void sync_fence_install(struct sync_fence *fence, int fd)
+void sync_file_install(struct sync_file *sync_file, int fd)
 {
-	fd_install(fd, fence->file);
+	fd_install(fd, sync_file->file);
 }
-EXPORT_SYMBOL(sync_fence_install);
+EXPORT_SYMBOL(sync_file_install);
 
-static void sync_fence_add_pt(struct sync_fence *fence,
-			      int *i, struct fence *pt)
+static void sync_file_add_pt(struct sync_file *sync_file, int *i,
+			     struct fence *fence)
 {
-	fence->cbs[*i].sync_pt = pt;
-	fence->cbs[*i].fence = fence;
+	sync_file->cbs[*i].fence = fence;
+	sync_file->cbs[*i].sync_file = sync_file;
 
-	if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
-		fence_get(pt);
+	if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
+				fence_check_cb_func)) {
+		fence_get(fence);
 		(*i)++;
 	}
 }
 
-struct sync_fence *sync_fence_merge(const char *name,
-				    struct sync_fence *a, struct sync_fence *b)
+struct sync_file *sync_file_merge(const char *name,
+				  struct sync_file *a, struct sync_file *b)
 {
 	int num_fences = a->num_fences + b->num_fences;
-	struct sync_fence *fence;
+	struct sync_file *sync_file;
 	int i, i_a, i_b;
-	unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
+	unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
 
-	fence = sync_fence_alloc(size, name);
-	if (!fence)
+	sync_file = sync_file_alloc(size, name);
+	if (!sync_file)
 		return NULL;
 
-	atomic_set(&fence->status, num_fences);
+	atomic_set(&sync_file->status, num_fences);
 
 	/*
-	 * Assume sync_fence a and b are both ordered and have no
+	 * Assume sync_file a and b are both ordered and have no
 	 * duplicates with the same context.
 	 *
-	 * If a sync_fence can only be created with sync_fence_merge
-	 * and sync_fence_create, this is a reasonable assumption.
+	 * If a sync_file can only be created with sync_file_merge
+	 * and sync_file_create, this is a reasonable assumption.
 	 */
 	for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
-		struct fence *pt_a = a->cbs[i_a].sync_pt;
-		struct fence *pt_b = b->cbs[i_b].sync_pt;
+		struct fence *pt_a = a->cbs[i_a].fence;
+		struct fence *pt_b = b->cbs[i_b].fence;
 
 		if (pt_a->context < pt_b->context) {
-			sync_fence_add_pt(fence, &i, pt_a);
+			sync_file_add_pt(sync_file, &i, pt_a);
 
 			i_a++;
 		} else if (pt_a->context > pt_b->context) {
-			sync_fence_add_pt(fence, &i, pt_b);
+			sync_file_add_pt(sync_file, &i, pt_b);
 
 			i_b++;
 		} else {
 			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
-				sync_fence_add_pt(fence, &i, pt_a);
+				sync_file_add_pt(sync_file, &i, pt_a);
 			else
-				sync_fence_add_pt(fence, &i, pt_b);
+				sync_file_add_pt(sync_file, &i, pt_b);
 
 			i_a++;
 			i_b++;
@@ -303,156 +286,55 @@
 	}
 
 	for (; i_a < a->num_fences; i_a++)
-		sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
+		sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
 
 	for (; i_b < b->num_fences; i_b++)
-		sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
+		sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
 
 	if (num_fences > i)
-		atomic_sub(num_fences - i, &fence->status);
-	fence->num_fences = i;
+		atomic_sub(num_fences - i, &sync_file->status);
+	sync_file->num_fences = i;
 
-	sync_fence_debug_add(fence);
-	return fence;
+	sync_file_debug_add(sync_file);
+	return sync_file;
 }
-EXPORT_SYMBOL(sync_fence_merge);
-
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
-			  int wake_flags, void *key)
-{
-	struct sync_fence_waiter *wait;
-
-	wait = container_of(curr, struct sync_fence_waiter, work);
-	list_del_init(&wait->work.task_list);
-
-	wait->callback(wait->work.private, wait);
-	return 1;
-}
-
-int sync_fence_wait_async(struct sync_fence *fence,
-			  struct sync_fence_waiter *waiter)
-{
-	int err = atomic_read(&fence->status);
-	unsigned long flags;
-
-	if (err < 0)
-		return err;
-
-	if (!err)
-		return 1;
-
-	init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
-	waiter->work.private = fence;
-
-	spin_lock_irqsave(&fence->wq.lock, flags);
-	err = atomic_read(&fence->status);
-	if (err > 0)
-		__add_wait_queue_tail(&fence->wq, &waiter->work);
-	spin_unlock_irqrestore(&fence->wq.lock, flags);
-
-	if (err < 0)
-		return err;
-
-	return !err;
-}
-EXPORT_SYMBOL(sync_fence_wait_async);
-
-int sync_fence_cancel_async(struct sync_fence *fence,
-			    struct sync_fence_waiter *waiter)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&fence->wq.lock, flags);
-	if (!list_empty(&waiter->work.task_list))
-		list_del_init(&waiter->work.task_list);
-	else
-		ret = -ENOENT;
-	spin_unlock_irqrestore(&fence->wq.lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(sync_fence_cancel_async);
-
-int sync_fence_wait(struct sync_fence *fence, long timeout)
-{
-	long ret;
-	int i;
-
-	if (timeout < 0)
-		timeout = MAX_SCHEDULE_TIMEOUT;
-	else
-		timeout = msecs_to_jiffies(timeout);
-
-	trace_sync_wait(fence, 1);
-	for (i = 0; i < fence->num_fences; ++i)
-		trace_sync_pt(fence->cbs[i].sync_pt);
-	ret = wait_event_interruptible_timeout(fence->wq,
-					       atomic_read(&fence->status) <= 0,
-					       timeout);
-	trace_sync_wait(fence, 0);
-
-	if (ret < 0) {
-		return ret;
-	} else if (ret == 0) {
-		if (timeout) {
-			pr_info("fence timeout on [%p] after %dms\n", fence,
-				jiffies_to_msecs(timeout));
-			sync_dump();
-		}
-		return -ETIME;
-	}
-
-	ret = atomic_read(&fence->status);
-	if (ret) {
-		pr_info("fence error %ld on [%p]\n", ret, fence);
-		sync_dump();
-	}
-	return ret;
-}
-EXPORT_SYMBOL(sync_fence_wait);
+EXPORT_SYMBOL(sync_file_merge);
 
 static const char *android_fence_get_driver_name(struct fence *fence)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 
 	return parent->ops->driver_name;
 }
 
 static const char *android_fence_get_timeline_name(struct fence *fence)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 
 	return parent->name;
 }
 
 static void android_fence_release(struct fence *fence)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 	unsigned long flags;
 
 	spin_lock_irqsave(fence->lock, flags);
-	list_del(&pt->child_list);
-	if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
-		list_del(&pt->active_list);
+	list_del(&fence->child_list);
+	if (WARN_ON_ONCE(!list_empty(&fence->active_list)))
+		list_del(&fence->active_list);
 	spin_unlock_irqrestore(fence->lock, flags);
 
-	if (parent->ops->free_pt)
-		parent->ops->free_pt(pt);
-
 	sync_timeline_put(parent);
-	fence_free(&pt->base);
+	fence_free(fence);
 }
 
 static bool android_fence_signaled(struct fence *fence)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 	int ret;
 
-	ret = parent->ops->has_signaled(pt);
+	ret = parent->ops->has_signaled(fence);
 	if (ret < 0)
 		fence->status = ret;
 	return ret;
@@ -460,46 +342,32 @@
 
 static bool android_fence_enable_signaling(struct fence *fence)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 
 	if (android_fence_signaled(fence))
 		return false;
 
-	list_add_tail(&pt->active_list, &parent->active_list_head);
+	list_add_tail(&fence->active_list, &parent->active_list_head);
 	return true;
 }
 
-static int android_fence_fill_driver_data(struct fence *fence,
-					  void *data, int size)
-{
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
-
-	if (!parent->ops->fill_driver_data)
-		return 0;
-	return parent->ops->fill_driver_data(pt, data, size);
-}
-
 static void android_fence_value_str(struct fence *fence,
 				    char *str, int size)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 
-	if (!parent->ops->pt_value_str) {
+	if (!parent->ops->fence_value_str) {
 		if (size)
 			*str = 0;
 		return;
 	}
-	parent->ops->pt_value_str(pt, str, size);
+	parent->ops->fence_value_str(fence, str, size);
 }
 
 static void android_fence_timeline_value_str(struct fence *fence,
 					     char *str, int size)
 {
-	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
-	struct sync_timeline *parent = sync_pt_parent(pt);
+	struct sync_timeline *parent = fence_parent(fence);
 
 	if (!parent->ops->timeline_value_str) {
 		if (size)
@@ -516,65 +384,57 @@
 	.signaled = android_fence_signaled,
 	.wait = fence_default_wait,
 	.release = android_fence_release,
-	.fill_driver_data = android_fence_fill_driver_data,
 	.fence_value_str = android_fence_value_str,
 	.timeline_value_str = android_fence_timeline_value_str,
 };
 
-static void sync_fence_free(struct kref *kref)
+static void sync_file_free(struct kref *kref)
 {
-	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+	struct sync_file *sync_file = container_of(kref, struct sync_file,
+						     kref);
 	int i;
 
-	for (i = 0; i < fence->num_fences; ++i) {
-		fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
-		fence_put(fence->cbs[i].sync_pt);
+	for (i = 0; i < sync_file->num_fences; ++i) {
+		fence_remove_callback(sync_file->cbs[i].fence,
+				      &sync_file->cbs[i].cb);
+		fence_put(sync_file->cbs[i].fence);
 	}
 
-	kfree(fence);
+	kfree(sync_file);
 }
 
-static int sync_fence_release(struct inode *inode, struct file *file)
+static int sync_file_release(struct inode *inode, struct file *file)
 {
-	struct sync_fence *fence = file->private_data;
+	struct sync_file *sync_file = file->private_data;
 
-	sync_fence_debug_remove(fence);
+	sync_file_debug_remove(sync_file);
 
-	kref_put(&fence->kref, sync_fence_free);
+	kref_put(&sync_file->kref, sync_file_free);
 	return 0;
 }
 
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 {
-	struct sync_fence *fence = file->private_data;
+	struct sync_file *sync_file = file->private_data;
 	int status;
 
-	poll_wait(file, &fence->wq, wait);
+	poll_wait(file, &sync_file->wq, wait);
 
-	status = atomic_read(&fence->status);
+	status = atomic_read(&sync_file->status);
 
 	if (!status)
 		return POLLIN;
-	else if (status < 0)
+	if (status < 0)
 		return POLLERR;
 	return 0;
 }
 
-static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
-{
-	__s32 value;
-
-	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
-		return -EFAULT;
-
-	return sync_fence_wait(fence, value);
-}
-
-static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+static long sync_file_ioctl_merge(struct sync_file *sync_file,
+				   unsigned long arg)
 {
 	int fd = get_unused_fd_flags(O_CLOEXEC);
 	int err;
-	struct sync_fence *fence2, *fence3;
+	struct sync_file *fence2, *fence3;
 	struct sync_merge_data data;
 
 	if (fd < 0)
@@ -585,14 +445,14 @@
 		goto err_put_fd;
 	}
 
-	fence2 = sync_fence_fdget(data.fd2);
+	fence2 = sync_file_fdget(data.fd2);
 	if (!fence2) {
 		err = -ENOENT;
 		goto err_put_fd;
 	}
 
 	data.name[sizeof(data.name) - 1] = '\0';
-	fence3 = sync_fence_merge(data.name, fence, fence2);
+	fence3 = sync_file_merge(data.name, sync_file, fence2);
 	if (!fence3) {
 		err = -ENOMEM;
 		goto err_put_fence2;
@@ -604,40 +464,28 @@
 		goto err_put_fence3;
 	}
 
-	sync_fence_install(fence3, fd);
-	sync_fence_put(fence2);
+	sync_file_install(fence3, fd);
+	sync_file_put(fence2);
 	return 0;
 
 err_put_fence3:
-	sync_fence_put(fence3);
+	sync_file_put(fence3);
 
 err_put_fence2:
-	sync_fence_put(fence2);
+	sync_file_put(fence2);
 
 err_put_fd:
 	put_unused_fd(fd);
 	return err;
 }
 
-static int sync_fill_pt_info(struct fence *fence, void *data, int size)
+static int sync_fill_fence_info(struct fence *fence, void *data, int size)
 {
-	struct sync_pt_info *info = data;
-	int ret;
+	struct sync_fence_info *info = data;
 
-	if (size < sizeof(struct sync_pt_info))
+	if (size < sizeof(*info))
 		return -ENOMEM;
 
-	info->len = sizeof(struct sync_pt_info);
-
-	if (fence->ops->fill_driver_data) {
-		ret = fence->ops->fill_driver_data(fence, info->driver_data,
-						   size - sizeof(*info));
-		if (ret < 0)
-			return ret;
-
-		info->len += ret;
-	}
-
 	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
 		sizeof(info->obj_name));
 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
@@ -648,13 +496,13 @@
 		info->status = 0;
 	info->timestamp_ns = ktime_to_ns(fence->timestamp);
 
-	return info->len;
+	return sizeof(*info);
 }
 
-static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 					unsigned long arg)
 {
-	struct sync_fence_info_data *data;
+	struct sync_file_info *info;
 	__u32 size;
 	__u32 len = 0;
 	int ret, i;
@@ -662,27 +510,27 @@
 	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
 		return -EFAULT;
 
-	if (size < sizeof(struct sync_fence_info_data))
+	if (size < sizeof(struct sync_file_info))
 		return -EINVAL;
 
 	if (size > 4096)
 		size = 4096;
 
-	data = kzalloc(size, GFP_KERNEL);
-	if (!data)
+	info = kzalloc(size, GFP_KERNEL);
+	if (!info)
 		return -ENOMEM;
 
-	strlcpy(data->name, fence->name, sizeof(data->name));
-	data->status = atomic_read(&fence->status);
-	if (data->status >= 0)
-		data->status = !data->status;
+	strlcpy(info->name, sync_file->name, sizeof(info->name));
+	info->status = atomic_read(&sync_file->status);
+	if (info->status >= 0)
+		info->status = !info->status;
 
-	len = sizeof(struct sync_fence_info_data);
+	len = sizeof(struct sync_file_info);
 
-	for (i = 0; i < fence->num_fences; ++i) {
-		struct fence *pt = fence->cbs[i].sync_pt;
+	for (i = 0; i < sync_file->num_fences; ++i) {
+		struct fence *fence = sync_file->cbs[i].fence;
 
-		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+		ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len);
 
 		if (ret < 0)
 			goto out;
@@ -690,43 +538,40 @@
 		len += ret;
 	}
 
-	data->len = len;
+	info->len = len;
 
-	if (copy_to_user((void __user *)arg, data, len))
+	if (copy_to_user((void __user *)arg, info, len))
 		ret = -EFAULT;
 	else
 		ret = 0;
 
 out:
-	kfree(data);
+	kfree(info);
 
 	return ret;
 }
 
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+static long sync_file_ioctl(struct file *file, unsigned int cmd,
 			     unsigned long arg)
 {
-	struct sync_fence *fence = file->private_data;
+	struct sync_file *sync_file = file->private_data;
 
 	switch (cmd) {
-	case SYNC_IOC_WAIT:
-		return sync_fence_ioctl_wait(fence, arg);
-
 	case SYNC_IOC_MERGE:
-		return sync_fence_ioctl_merge(fence, arg);
+		return sync_file_ioctl_merge(sync_file, arg);
 
 	case SYNC_IOC_FENCE_INFO:
-		return sync_fence_ioctl_fence_info(fence, arg);
+		return sync_file_ioctl_fence_info(sync_file, arg);
 
 	default:
 		return -ENOTTY;
 	}
 }
 
-static const struct file_operations sync_fence_fops = {
-	.release = sync_fence_release,
-	.poll = sync_fence_poll,
-	.unlocked_ioctl = sync_fence_ioctl,
-	.compat_ioctl = sync_fence_ioctl,
+static const struct file_operations sync_file_fops = {
+	.release = sync_file_release,
+	.poll = sync_file_poll,
+	.unlocked_ioctl = sync_file_ioctl,
+	.compat_ioctl = sync_file_ioctl,
 };
 
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index afa0752..d2a1734 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -18,63 +18,35 @@
 #include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
-#include <linux/wait.h>
 #include <linux/fence.h>
 
 #include "uapi/sync.h"
 
 struct sync_timeline;
-struct sync_pt;
-struct sync_fence;
+struct sync_file;
 
 /**
  * struct sync_timeline_ops - sync object implementation ops
  * @driver_name:	name of the implementation
- * @dup:		duplicate a sync_pt
  * @has_signaled:	returns:
  *			  1 if pt has signaled
  *			  0 if pt has not signaled
  *			 <0 on error
- * @compare:		returns:
- *			  1 if b will signal before a
- *			  0 if a and b will signal at the same time
- *			 -1 if a will signal before b
- * @free_pt:		called before sync_pt is freed
- * @release_obj:	called before sync_timeline is freed
- * @fill_driver_data:	write implementation specific driver data to data.
- *			  should return an error if there is not enough room
- *			  as specified by size.  This information is returned
- *			  to userspace by SYNC_IOC_FENCE_INFO.
  * @timeline_value_str: fill str with the value of the sync_timeline's counter
- * @pt_value_str:	fill str with the value of the sync_pt
+ * @fence_value_str:	fill str with the value of the fence
  */
 struct sync_timeline_ops {
 	const char *driver_name;
 
 	/* required */
-	struct sync_pt * (*dup)(struct sync_pt *pt);
-
-	/* required */
-	int (*has_signaled)(struct sync_pt *pt);
-
-	/* required */
-	int (*compare)(struct sync_pt *a, struct sync_pt *b);
-
-	/* optional */
-	void (*free_pt)(struct sync_pt *sync_pt);
-
-	/* optional */
-	void (*release_obj)(struct sync_timeline *sync_timeline);
-
-	/* optional */
-	int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+	int (*has_signaled)(struct fence *fence);
 
 	/* optional */
 	void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
 				   int size);
 
 	/* optional */
-	void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
+	void (*fence_value_str)(struct fence *fence, char *str, int size);
 };
 
 /**
@@ -85,7 +57,7 @@
  * @destroyed:		set when sync_timeline is destroyed
  * @child_list_head:	list of children sync_pts for this sync_timeline
  * @child_list_lock:	lock protecting @child_list_head, destroyed, and
- *			  sync_pt.status
+ *			fence.status
  * @active_list_head:	list of active (unsignaled/errored) sync_pts
  * @sync_timeline_list:	membership in global sync_timeline_list
  */
@@ -108,86 +80,44 @@
 #endif
 };
 
-/**
- * struct sync_pt - sync point
- * @fence:		base fence class
- * @child_list:		membership in sync_timeline.child_list_head
- * @active_list:	membership in sync_timeline.active_list_head
- * @signaled_list:	membership in temporary signaled_list on stack
- * @fence:		sync_fence to which the sync_pt belongs
- * @pt_list:		membership in sync_fence.pt_list_head
- * @status:		1: signaled, 0:active, <0: error
- * @timestamp:		time which sync_pt status transitioned from active to
- *			  signaled or error.
- */
-struct sync_pt {
-	struct fence base;
-
-	struct list_head	child_list;
-	struct list_head	active_list;
-};
-
-static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+static inline struct sync_timeline *fence_parent(struct fence *fence)
 {
-	return container_of(pt->base.lock, struct sync_timeline,
+	return container_of(fence->lock, struct sync_timeline,
 			    child_list_lock);
 }
 
-struct sync_fence_cb {
+struct sync_file_cb {
 	struct fence_cb cb;
-	struct fence *sync_pt;
-	struct sync_fence *fence;
+	struct fence *fence;
+	struct sync_file *sync_file;
 };
 
 /**
- * struct sync_fence - sync fence
+ * struct sync_file - sync file to export to the userspace
  * @file:		file representing this fence
  * @kref:		reference count on fence.
- * @name:		name of sync_fence.  Useful for debugging
- * @pt_list_head:	list of sync_pts in the fence.  immutable once fence
- *			  is created
- * @status:		0: signaled, >0:active, <0: error
- *
+ * @name:		name of sync_file.  Useful for debugging
+ * @sync_file_list:	membership in global file list
+ * @num_fences		number of sync_pts in the fence
  * @wq:			wait queue for fence signaling
- * @sync_fence_list:	membership in global fence list
+ * @status:		0: signaled, >0:active, <0: error
+ * @cbs:		sync_pts callback information
  */
-struct sync_fence {
+struct sync_file {
 	struct file		*file;
 	struct kref		kref;
 	char			name[32];
 #ifdef CONFIG_DEBUG_FS
-	struct list_head	sync_fence_list;
+	struct list_head	sync_file_list;
 #endif
 	int num_fences;
 
 	wait_queue_head_t	wq;
 	atomic_t		status;
 
-	struct sync_fence_cb	cbs[];
+	struct sync_file_cb	cbs[];
 };
 
-struct sync_fence_waiter;
-typedef void (*sync_callback_t)(struct sync_fence *fence,
-				struct sync_fence_waiter *waiter);
-
-/**
- * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
- * @waiter_list:	membership in sync_fence.waiter_list_head
- * @callback:		function pointer to call when fence signals
- * @callback_data:	pointer to pass to @callback
- */
-struct sync_fence_waiter {
-	wait_queue_t work;
-	sync_callback_t callback;
-};
-
-static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
-					  sync_callback_t callback)
-{
-	INIT_LIST_HEAD(&waiter->work.task_list);
-	waiter->callback = callback;
-}
-
 /*
  * API for sync_timeline implementers
  */
@@ -200,7 +130,8 @@
  *
  * Creates a new sync_timeline which will use the implementation specified by
  * @ops.  @size bytes will be allocated allowing for implementation specific
- * data to be kept after the generic sync_timeline struct.
+ * data to be kept after the generic sync_timeline struct. Returns the
+ * sync_timeline object or NULL in case of error.
  */
 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
 					   int size, const char *name);
@@ -211,7 +142,7 @@
  *
  * A sync implementation should call this when the @obj is going away
  * (i.e. module unload.)  @obj won't actually be freed until all its children
- * sync_pts are freed.
+ * fences are freed.
  */
 void sync_timeline_destroy(struct sync_timeline *obj);
 
@@ -219,148 +150,92 @@
  * sync_timeline_signal() - signal a status change on a sync_timeline
  * @obj:	sync_timeline to signal
  *
- * A sync implementation should call this any time one of it's sync_pts
+ * A sync implementation should call this any time one of it's fences
  * has signaled or has an error condition.
  */
 void sync_timeline_signal(struct sync_timeline *obj);
 
 /**
  * sync_pt_create() - creates a sync pt
- * @parent:	sync_pt's parent sync_timeline
+ * @parent:	fence's parent sync_timeline
  * @size:	size to allocate for this pt
  *
- * Creates a new sync_pt as a child of @parent.  @size bytes will be
+ * Creates a new fence as a child of @parent.  @size bytes will be
  * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct.
+ * the generic sync_timeline struct. Returns the fence object or
+ * NULL in case of error.
  */
-struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
-
-/**
- * sync_pt_free() - frees a sync pt
- * @pt:		sync_pt to free
- *
- * This should only be called on sync_pts which have been created but
- * not added to a fence.
- */
-void sync_pt_free(struct sync_pt *pt);
+struct fence *sync_pt_create(struct sync_timeline *parent, int size);
 
 /**
  * sync_fence_create() - creates a sync fence
  * @name:	name of fence to create
- * @pt:		sync_pt to add to the fence
+ * @fence:	fence to add to the sync_fence
  *
- * Creates a fence containg @pt.  Once this is called, the fence takes
- * ownership of @pt.
+ * Creates a sync_file containg @fence. Once this is called, the sync_file
+ * takes ownership of @fence.
  */
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
-
-/**
- * sync_fence_create_dma() - creates a sync fence from dma-fence
- * @name:	name of fence to create
- * @pt:	dma-fence to add to the fence
- *
- * Creates a fence containg @pt.  Once this is called, the fence takes
- * ownership of @pt.
- */
-struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt);
+struct sync_file *sync_file_create(const char *name, struct fence *fence);
 
 /*
- * API for sync_fence consumers
+ * API for sync_file consumers
  */
 
 /**
- * sync_fence_merge() - merge two fences
+ * sync_file_merge() - merge two sync_files
  * @name:	name of new fence
- * @a:		fence a
- * @b:		fence b
+ * @a:		sync_file a
+ * @b:		sync_file b
  *
- * Creates a new fence which contains copies of all the sync_pts in both
- * @a and @b.  @a and @b remain valid, independent fences.
+ * Creates a new sync_file which contains copies of all the fences in both
+ * @a and @b.  @a and @b remain valid, independent sync_file. Returns the
+ * new merged sync_file or NULL in case of error.
  */
-struct sync_fence *sync_fence_merge(const char *name,
-				    struct sync_fence *a, struct sync_fence *b);
+struct sync_file *sync_file_merge(const char *name,
+				    struct sync_file *a, struct sync_file *b);
 
 /**
- * sync_fence_fdget() - get a fence from an fd
+ * sync_file_fdget() - get a sync_file from an fd
  * @fd:		fd referencing a fence
  *
- * Ensures @fd references a valid fence, increments the refcount of the backing
- * file, and returns the fence.
+ * Ensures @fd references a valid sync_file, increments the refcount of the
+ * backing file. Returns the sync_file or NULL in case of error.
  */
-struct sync_fence *sync_fence_fdget(int fd);
+struct sync_file *sync_file_fdget(int fd);
 
 /**
- * sync_fence_put() - puts a reference of a sync fence
- * @fence:	fence to put
+ * sync_file_put() - puts a reference of a sync_file
+ * @sync_file:	sync_file to put
  *
- * Puts a reference on @fence.  If this is the last reference, the fence and
- * all it's sync_pts will be freed
+ * Puts a reference on @sync_fence.  If this is the last reference, the
+ * sync_fil and all it's sync_pts will be freed
  */
-void sync_fence_put(struct sync_fence *fence);
+void sync_file_put(struct sync_file *sync_file);
 
 /**
- * sync_fence_install() - installs a fence into a file descriptor
- * @fence:	fence to install
+ * sync_file_install() - installs a sync_file into a file descriptor
+ * @sync_file:	sync_file to install
  * @fd:		file descriptor in which to install the fence
  *
- * Installs @fence into @fd.  @fd's should be acquired through
+ * Installs @sync_file into @fd.  @fd's should be acquired through
  * get_unused_fd_flags(O_CLOEXEC).
  */
-void sync_fence_install(struct sync_fence *fence, int fd);
-
-/**
- * sync_fence_wait_async() - registers and async wait on the fence
- * @fence:		fence to wait on
- * @waiter:		waiter callback struck
- *
- * Returns 1 if @fence has already signaled.
- *
- * Registers a callback to be called when @fence signals or has an error.
- * @waiter should be initialized with sync_fence_waiter_init().
- */
-int sync_fence_wait_async(struct sync_fence *fence,
-			  struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_cancel_async() - cancels an async wait
- * @fence:		fence to wait on
- * @waiter:		waiter callback struck
- *
- * returns 0 if waiter was removed from fence's async waiter list.
- * returns -ENOENT if waiter was not found on fence's async waiter list.
- *
- * Cancels a previously registered async wait.  Will fail gracefully if
- * @waiter was never registered or if @fence has already signaled @waiter.
- */
-int sync_fence_cancel_async(struct sync_fence *fence,
-			    struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_wait() - wait on fence
- * @fence:	fence to wait on
- * @tiemout:	timeout in ms
- *
- * Wait for @fence to be signaled or have an error.  Waits indefinitely
- * if @timeout < 0
- */
-int sync_fence_wait(struct sync_fence *fence, long timeout);
+void sync_file_install(struct sync_file *sync_file, int fd);
 
 #ifdef CONFIG_DEBUG_FS
 
 void sync_timeline_debug_add(struct sync_timeline *obj);
 void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_fence_debug_add(struct sync_fence *fence);
-void sync_fence_debug_remove(struct sync_fence *fence);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
 void sync_dump(void);
 
 #else
 # define sync_timeline_debug_add(obj)
 # define sync_timeline_debug_remove(obj)
-# define sync_fence_debug_add(fence)
-# define sync_fence_debug_remove(fence)
+# define sync_file_debug_add(fence)
+# define sync_file_debug_remove(fence)
 # define sync_dump()
 #endif
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
-				 int wake_flags, void *key);
 
 #endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index f45d13c..5a7ec58 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -26,14 +26,16 @@
 #include <linux/uaccess.h>
 #include <linux/anon_inodes.h>
 #include <linux/time64.h>
-#include "sync.h"
+#include "sw_sync.h"
 
 #ifdef CONFIG_DEBUG_FS
 
+static struct dentry *dbgfs;
+
 static LIST_HEAD(sync_timeline_list_head);
 static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_fence_list_head);
-static DEFINE_SPINLOCK(sync_fence_list_lock);
+static LIST_HEAD(sync_file_list_head);
+static DEFINE_SPINLOCK(sync_file_list_lock);
 
 void sync_timeline_debug_add(struct sync_timeline *obj)
 {
@@ -53,22 +55,22 @@
 	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
 }
 
-void sync_fence_debug_add(struct sync_fence *fence)
+void sync_file_debug_add(struct sync_file *sync_file)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+	spin_lock_irqsave(&sync_file_list_lock, flags);
+	list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
+	spin_unlock_irqrestore(&sync_file_list_lock, flags);
 }
 
-void sync_fence_debug_remove(struct sync_fence *fence)
+void sync_file_debug_remove(struct sync_file *sync_file)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_del(&fence->sync_fence_list);
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+	spin_lock_irqsave(&sync_file_list_lock, flags);
+	list_del(&sync_file->sync_file_list);
+	spin_unlock_irqrestore(&sync_file_list_lock, flags);
 }
 
 static const char *sync_status_str(int status)
@@ -82,39 +84,40 @@
 	return "error";
 }
 
-static void sync_print_pt(struct seq_file *s, struct fence *pt, bool fence)
+static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
 {
 	int status = 1;
+	struct sync_timeline *parent = fence_parent(fence);
 
-	if (fence_is_signaled_locked(pt))
-		status = pt->status;
+	if (fence_is_signaled_locked(fence))
+		status = fence->status;
 
-	seq_printf(s, "  %s%spt %s",
-		   fence && pt->ops->get_timeline_name ?
-		   pt->ops->get_timeline_name(pt) : "",
-		   fence ? "_" : "",
+	seq_printf(s, "  %s%sfence %s",
+		   show ? parent->name : "",
+		   show ? "_" : "",
 		   sync_status_str(status));
 
 	if (status <= 0) {
 		struct timespec64 ts64 =
-			ktime_to_timespec64(pt->timestamp);
+			ktime_to_timespec64(fence->timestamp);
 
 		seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
 	}
 
-	if ((!fence || pt->ops->timeline_value_str) &&
-	    pt->ops->fence_value_str) {
+	if ((!fence || fence->ops->timeline_value_str) &&
+		fence->ops->fence_value_str) {
 		char value[64];
 		bool success;
 
-		pt->ops->fence_value_str(pt, value, sizeof(value));
+		fence->ops->fence_value_str(fence, value, sizeof(value));
 		success = strlen(value);
 
 		if (success)
 			seq_printf(s, ": %s", value);
 
 		if (success && fence) {
-			pt->ops->timeline_value_str(pt, value, sizeof(value));
+			fence->ops->timeline_value_str(fence, value,
+						       sizeof(value));
 
 			if (strlen(value))
 				seq_printf(s, " / %s", value);
@@ -142,38 +145,23 @@
 
 	spin_lock_irqsave(&obj->child_list_lock, flags);
 	list_for_each(pos, &obj->child_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, child_list);
-		sync_print_pt(s, &pt->base, false);
+		struct fence *fence =
+			container_of(pos, struct fence, child_list);
+		sync_print_fence(s, fence, false);
 	}
 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 }
 
-static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+static void sync_print_sync_file(struct seq_file *s,
+				  struct sync_file *sync_file)
 {
-	wait_queue_t *pos;
-	unsigned long flags;
 	int i;
 
-	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
-		   sync_status_str(atomic_read(&fence->status)));
+	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
+		   sync_status_str(atomic_read(&sync_file->status)));
 
-	for (i = 0; i < fence->num_fences; ++i) {
-		sync_print_pt(s, fence->cbs[i].sync_pt, true);
-	}
-
-	spin_lock_irqsave(&fence->wq.lock, flags);
-	list_for_each_entry(pos, &fence->wq.task_list, task_list) {
-		struct sync_fence_waiter *waiter;
-
-		if (pos->func != &sync_fence_wake_up_wq)
-			continue;
-
-		waiter = container_of(pos, struct sync_fence_waiter, work);
-
-		seq_printf(s, "waiter %pF\n", waiter->callback);
-	}
-	spin_unlock_irqrestore(&fence->wq.lock, flags);
+	for (i = 0; i < sync_file->num_fences; ++i)
+		sync_print_fence(s, sync_file->cbs[i].fence, true);
 }
 
 static int sync_debugfs_show(struct seq_file *s, void *unused)
@@ -196,33 +184,152 @@
 
 	seq_puts(s, "fences:\n--------------\n");
 
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_for_each(pos, &sync_fence_list_head) {
-		struct sync_fence *fence =
-			container_of(pos, struct sync_fence, sync_fence_list);
+	spin_lock_irqsave(&sync_file_list_lock, flags);
+	list_for_each(pos, &sync_file_list_head) {
+		struct sync_file *sync_file =
+			container_of(pos, struct sync_file, sync_file_list);
 
-		sync_print_fence(s, fence);
+		sync_print_sync_file(s, sync_file);
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+	spin_unlock_irqrestore(&sync_file_list_lock, flags);
 	return 0;
 }
 
-static int sync_debugfs_open(struct inode *inode, struct file *file)
+static int sync_info_debugfs_open(struct inode *inode, struct file *file)
 {
 	return single_open(file, sync_debugfs_show, inode->i_private);
 }
 
-static const struct file_operations sync_debugfs_fops = {
-	.open           = sync_debugfs_open,
+static const struct file_operations sync_info_debugfs_fops = {
+	.open           = sync_info_debugfs_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
 	.release        = single_release,
 };
 
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct sw_sync_timeline *obj;
+	char task_comm[TASK_COMM_LEN];
+
+	get_task_comm(task_comm, current);
+
+	obj = sw_sync_timeline_create(task_comm);
+	if (!obj)
+		return -ENOMEM;
+
+	file->private_data = obj;
+
+	return 0;
+}
+
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct sw_sync_timeline *obj = file->private_data;
+
+	sync_timeline_destroy(&obj->obj);
+	return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
+				       unsigned long arg)
+{
+	int fd = get_unused_fd_flags(O_CLOEXEC);
+	int err;
+	struct fence *fence;
+	struct sync_file *sync_file;
+	struct sw_sync_create_fence_data data;
+
+	if (fd < 0)
+		return fd;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+		err = -EFAULT;
+		goto err;
+	}
+
+	fence = sw_sync_pt_create(obj, data.value);
+	if (!fence) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	data.name[sizeof(data.name) - 1] = '\0';
+	sync_file = sync_file_create(data.name, fence);
+	if (!sync_file) {
+		fence_put(fence);
+		err = -ENOMEM;
+		goto err;
+	}
+
+	data.fence = fd;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		sync_file_put(sync_file);
+		err = -EFAULT;
+		goto err;
+	}
+
+	sync_file_install(sync_file, fd);
+
+	return 0;
+
+err:
+	put_unused_fd(fd);
+	return err;
+}
+
+static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+	u32 value;
+
+	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+		return -EFAULT;
+
+	sw_sync_timeline_inc(obj, value);
+
+	return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	struct sw_sync_timeline *obj = file->private_data;
+
+	switch (cmd) {
+	case SW_SYNC_IOC_CREATE_FENCE:
+		return sw_sync_ioctl_create_fence(obj, arg);
+
+	case SW_SYNC_IOC_INC:
+		return sw_sync_ioctl_inc(obj, arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations sw_sync_debugfs_fops = {
+	.open           = sw_sync_debugfs_open,
+	.release        = sw_sync_debugfs_release,
+	.unlocked_ioctl = sw_sync_ioctl,
+	.compat_ioctl = sw_sync_ioctl,
+};
+
 static __init int sync_debugfs_init(void)
 {
-	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+	dbgfs = debugfs_create_dir("sync", NULL);
+
+	debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops);
+	debugfs_create_file("sw_sync", 0644, dbgfs, NULL,
+			    &sw_sync_debugfs_fops);
+
 	return 0;
 }
 late_initcall(sync_debugfs_init);
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
index 77edb97..a0f80f4 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/staging/android/trace/sync.h
@@ -32,50 +32,6 @@
 	TP_printk("name=%s value=%s", __get_str(name), __entry->value)
 );
 
-TRACE_EVENT(sync_wait,
-	TP_PROTO(struct sync_fence *fence, int begin),
-
-	TP_ARGS(fence, begin),
-
-	TP_STRUCT__entry(
-			__string(name, fence->name)
-			__field(s32, status)
-			__field(u32, begin)
-	),
-
-	TP_fast_assign(
-			__assign_str(name, fence->name);
-			__entry->status = atomic_read(&fence->status);
-			__entry->begin = begin;
-	),
-
-	TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
-			__get_str(name), __entry->status)
-);
-
-TRACE_EVENT(sync_pt,
-	TP_PROTO(struct fence *pt),
-
-	TP_ARGS(pt),
-
-	TP_STRUCT__entry(
-		__string(timeline, pt->ops->get_timeline_name(pt))
-		__array(char, value, 32)
-	),
-
-	TP_fast_assign(
-		__assign_str(timeline, pt->ops->get_timeline_name(pt));
-		if (pt->ops->fence_value_str) {
-			pt->ops->fence_value_str(pt, __entry->value,
-							sizeof(__entry->value));
-		} else {
-			__entry->value[0] = '\0';
-		}
-	),
-
-	TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
-);
-
 #endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
 
 /* This part must be outside protection */
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
index ba4743c..13df42d 100644
--- a/drivers/staging/android/uapi/ashmem.h
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -13,6 +13,7 @@
 #define _UAPI_LINUX_ASHMEM_H
 
 #include <linux/ioctl.h>
+#include <linux/types.h>
 
 #define ASHMEM_NAME_LEN		256
 
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
index e964c75..a0cf357 100644
--- a/drivers/staging/android/uapi/sync.h
+++ b/drivers/staging/android/uapi/sync.h
@@ -27,51 +27,39 @@
 };
 
 /**
- * struct sync_pt_info - detailed sync_pt information
- * @len:		length of sync_pt_info including any driver_data
+ * struct sync_fence_info - detailed fence information
  * @obj_name:		name of parent sync_timeline
  * @driver_name:	name of driver implementing the parent
- * @status:		status of the sync_pt 0:active 1:signaled <0:error
+ * @status:		status of the fence 0:active 1:signaled <0:error
  * @timestamp_ns:	timestamp of status change in nanoseconds
- * @driver_data:	any driver dependent data
  */
-struct sync_pt_info {
-	__u32	len;
+struct sync_fence_info {
 	char	obj_name[32];
 	char	driver_name[32];
 	__s32	status;
 	__u64	timestamp_ns;
-
-	__u8	driver_data[0];
 };
 
 /**
- * struct sync_fence_info_data - data returned from fence info ioctl
+ * struct sync_file_info - data returned from fence info ioctl
  * @len:	ioctl caller writes the size of the buffer its passing in.
- *		ioctl returns length of sync_fence_data returned to userspace
- *		including pt_info.
+ *		ioctl returns length of sync_file_info returned to
+ *		userspace including pt_info.
  * @name:	name of fence
  * @status:	status of fence. 1: signaled 0:active <0:error
- * @pt_info:	a sync_pt_info struct for every sync_pt in the fence
+ * @sync_fence_info: array of sync_fence_info for every fence in the sync_file
  */
-struct sync_fence_info_data {
+struct sync_file_info {
 	__u32	len;
 	char	name[32];
 	__s32	status;
 
-	__u8	pt_info[0];
+	__u8	sync_fence_info[0];
 };
 
 #define SYNC_IOC_MAGIC		'>'
 
 /**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
  * DOC: SYNC_IOC_MERGE - merge two fences
  *
  * Takes a struct sync_merge_data.  Creates a new fence containing copies of
@@ -83,15 +71,14 @@
 /**
  * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
  *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Takes a struct sync_file_info_data with extra space allocated for pt_info.
  * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
+ * updated to reflect the total size of the sync_file_info_data including
  * pt_info.
  *
  * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
  * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
  */
-#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\
-	struct sync_fence_info_data)
+#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info)
 
 #endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 912c96b..bb63ece 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -27,7 +27,6 @@
 
 #include "board.h"
 
-
 static struct fb_videomode lcdc0_mode = {
 	.name		= "AMPIER/AM-800480",
 	.xres		= 800,
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 965afc7..45807d8 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -155,7 +155,6 @@
 	if (IS_ERR(pd)) {
 		pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
 		return PTR_ERR(pd);
-
 	}
 	pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
 
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index b8e2f61..7b8be52 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -32,8 +32,8 @@
 
 #define WZRD_CLK_CFG_REG(n)	(0x200 + 4 * (n))
 
-#define WZRD_CLkOUT0_FRAC_EN	BIT(18)
-#define WZRD_CLkFBOUT_FRAC_EN	BIT(26)
+#define WZRD_CLKOUT0_FRAC_EN	BIT(18)
+#define WZRD_CLKFBOUT_FRAC_EN	BIT(26)
 
 #define WZRD_CLKFBOUT_MULT_SHIFT	8
 #define WZRD_CLKFBOUT_MULT_MASK		(0xff << WZRD_CLKFBOUT_MULT_SHIFT)
@@ -71,6 +71,7 @@
 	int speed_grade;
 	bool suspended;
 };
+
 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
 
 /* maximum frequencies for input/output clocks per speed grade */
@@ -195,9 +196,9 @@
 
 	/* we don't support fractional div/mul yet */
 	reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
-		    WZRD_CLkFBOUT_FRAC_EN;
+		    WZRD_CLKFBOUT_FRAC_EN;
 	reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
-		     WZRD_CLkOUT0_FRAC_EN;
+		     WZRD_CLKOUT0_FRAC_EN;
 	if (reg)
 		dev_warn(&pdev->dev, "fractional div/mul not supported\n");
 
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 83bd309..f1415cb 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1,6 +1,6 @@
 /*
- * include/comedi.h (installed as /usr/include/comedi.h)
- * header file for comedi
+ * comedi.h
+ * header file for COMEDI user API
  *
  * COMEDI - Linux Control and Measurement Device Interface
  * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
@@ -72,12 +72,12 @@
 #define CR_AREF(a)	(((a) >> 24) & 0x03)
 
 #define CR_FLAGS_MASK	0xfc000000
-#define CR_ALT_FILTER	(1 << 26)
+#define CR_ALT_FILTER	BIT(26)
 #define CR_DITHER	CR_ALT_FILTER
 #define CR_DEGLITCH	CR_ALT_FILTER
-#define CR_ALT_SOURCE	(1 << 27)
-#define CR_EDGE		(1 << 30)
-#define CR_INVERT	(1 << 31)
+#define CR_ALT_SOURCE	BIT(27)
+#define CR_EDGE		BIT(30)
+#define CR_INVERT	BIT(31)
 
 #define AREF_GROUND	0x00	/* analog ref = analog ground */
 #define AREF_COMMON	0x01	/* analog ref = analog common */
@@ -120,13 +120,6 @@
 #define INSN_WAIT		(5 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
 #define INSN_INTTRIG		(6 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
 
-/* trigger flags */
-/* These flags are used in comedi_trig structures */
-
-#define TRIG_DITHER	0x0002	/* enable dithering */
-#define TRIG_DEGLITCH	0x0004	/* enable deglitching */
-#define TRIG_CONFIG	0x0010	/* perform configuration, not triggering */
-
 /* command flags */
 /* These flags are used in comedi_cmd structures */
 
@@ -190,11 +183,8 @@
 #define SDF_MAXDATA	0x0010	/* maxdata depends on channel */
 #define SDF_FLAGS	0x0020	/* flags depend on channel */
 #define SDF_RANGETYPE	0x0040	/* range type depends on channel */
-#define SDF_MODE0	0x0080	/* can do mode 0 */
-#define SDF_MODE1	0x0100	/* can do mode 1 */
-#define SDF_MODE2	0x0200	/* can do mode 2 */
-#define SDF_MODE3	0x0400	/* can do mode 3 */
-#define SDF_MODE4	0x0800	/* can do mode 4 */
+#define SDF_PWM_COUNTER 0x0080	/* PWM can automatically switch off */
+#define SDF_PWM_HBRIDGE 0x0100	/* PWM is signed (H-bridge) */
 #define SDF_CMD		0x1000	/* can do commands (deprecated) */
 #define SDF_SOFT_CALIBRATED	0x2000 /* subdevice uses software calibration */
 #define SDF_CMD_WRITE		0x4000 /* can do output commands */
@@ -217,30 +207,94 @@
 #define SDF_RUNNING	0x08000000	/* subdevice is acquiring data */
 #define SDF_LSAMPL	0x10000000	/* subdevice uses 32-bit samples */
 #define SDF_PACKED	0x20000000	/* subdevice can do packed DIO */
-/* re recycle these flags for PWM */
-#define SDF_PWM_COUNTER SDF_MODE0	/* PWM can automatically switch off */
-#define SDF_PWM_HBRIDGE SDF_MODE1	/* PWM is signed (H-bridge) */
 
 /* subdevice types */
 
+/**
+ * enum comedi_subdevice_type - COMEDI subdevice types
+ * @COMEDI_SUBD_UNUSED:		Unused subdevice.
+ * @COMEDI_SUBD_AI:		Analog input.
+ * @COMEDI_SUBD_AO:		Analog output.
+ * @COMEDI_SUBD_DI:		Digital input.
+ * @COMEDI_SUBD_DO:		Digital output.
+ * @COMEDI_SUBD_DIO:		Digital input/output.
+ * @COMEDI_SUBD_COUNTER:	Counter.
+ * @COMEDI_SUBD_TIMER:		Timer.
+ * @COMEDI_SUBD_MEMORY:		Memory, EEPROM, DPRAM.
+ * @COMEDI_SUBD_CALIB:		Calibration DACs.
+ * @COMEDI_SUBD_PROC:		Processor, DSP.
+ * @COMEDI_SUBD_SERIAL:		Serial I/O.
+ * @COMEDI_SUBD_PWM:		Pulse-Width Modulation output.
+ */
 enum comedi_subdevice_type {
-	COMEDI_SUBD_UNUSED,	/* unused by driver */
-	COMEDI_SUBD_AI,		/* analog input */
-	COMEDI_SUBD_AO,		/* analog output */
-	COMEDI_SUBD_DI,		/* digital input */
-	COMEDI_SUBD_DO,		/* digital output */
-	COMEDI_SUBD_DIO,	/* digital input/output */
-	COMEDI_SUBD_COUNTER,	/* counter */
-	COMEDI_SUBD_TIMER,	/* timer */
-	COMEDI_SUBD_MEMORY,	/* memory, EEPROM, DPRAM */
-	COMEDI_SUBD_CALIB,	/* calibration DACs */
-	COMEDI_SUBD_PROC,	/* processor, DSP */
-	COMEDI_SUBD_SERIAL,	/* serial IO */
-	COMEDI_SUBD_PWM		/* PWM */
+	COMEDI_SUBD_UNUSED,
+	COMEDI_SUBD_AI,
+	COMEDI_SUBD_AO,
+	COMEDI_SUBD_DI,
+	COMEDI_SUBD_DO,
+	COMEDI_SUBD_DIO,
+	COMEDI_SUBD_COUNTER,
+	COMEDI_SUBD_TIMER,
+	COMEDI_SUBD_MEMORY,
+	COMEDI_SUBD_CALIB,
+	COMEDI_SUBD_PROC,
+	COMEDI_SUBD_SERIAL,
+	COMEDI_SUBD_PWM
 };
 
 /* configuration instructions */
 
+/**
+ * enum configuration_ids - COMEDI configuration instruction codes
+ * @INSN_CONFIG_DIO_INPUT:	Configure digital I/O as input.
+ * @INSN_CONFIG_DIO_OUTPUT:	Configure digital I/O as output.
+ * @INSN_CONFIG_DIO_OPENDRAIN:	Configure digital I/O as open-drain (or open
+ *				collector) output.
+ * @INSN_CONFIG_ANALOG_TRIG:	Configure analog trigger.
+ * @INSN_CONFIG_ALT_SOURCE:	Configure alternate input source.
+ * @INSN_CONFIG_DIGITAL_TRIG:	Configure digital trigger.
+ * @INSN_CONFIG_BLOCK_SIZE:	Configure block size for DMA transfers.
+ * @INSN_CONFIG_TIMER_1:	Configure divisor for external clock.
+ * @INSN_CONFIG_FILTER:		Configure a filter.
+ * @INSN_CONFIG_CHANGE_NOTIFY:	Configure change notification for digital
+ *				inputs.  (New drivers should use
+ *				%INSN_CONFIG_DIGITAL_TRIG instead.)
+ * @INSN_CONFIG_SERIAL_CLOCK:	Configure clock for serial I/O.
+ * @INSN_CONFIG_BIDIRECTIONAL_DATA: Send and receive byte over serial I/O.
+ * @INSN_CONFIG_DIO_QUERY:	Query direction of digital I/O channel.
+ * @INSN_CONFIG_PWM_OUTPUT:	Configure pulse-width modulator output.
+ * @INSN_CONFIG_GET_PWM_OUTPUT:	Get pulse-width modulator output configuration.
+ * @INSN_CONFIG_ARM:		Arm a subdevice or channel.
+ * @INSN_CONFIG_DISARM:		Disarm a subdevice or channel.
+ * @INSN_CONFIG_GET_COUNTER_STATUS: Get counter status.
+ * @INSN_CONFIG_RESET:		Reset a subdevice or channel.
+ * @INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: Configure counter/timer as
+ *				single pulse generator.
+ * @INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: Configure counter/timer as
+ *				pulse train generator.
+ * @INSN_CONFIG_GPCT_QUADRATURE_ENCODER: Configure counter as a quadrature
+ *				encoder.
+ * @INSN_CONFIG_SET_GATE_SRC:	Set counter/timer gate source.
+ * @INSN_CONFIG_GET_GATE_SRC:	Get counter/timer gate source.
+ * @INSN_CONFIG_SET_CLOCK_SRC:	Set counter/timer master clock source.
+ * @INSN_CONFIG_GET_CLOCK_SRC:	Get counter/timer master clock source.
+ * @INSN_CONFIG_SET_OTHER_SRC:	Set counter/timer "other" source.
+ * @INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: Get size (in bytes) of subdevice's
+ *				on-board FIFOs used during streaming
+ *				input/output.
+ * @INSN_CONFIG_SET_COUNTER_MODE: Set counter/timer mode.
+ * @INSN_CONFIG_8254_SET_MODE:	(Deprecated) Same as
+ *				%INSN_CONFIG_SET_COUNTER_MODE.
+ * @INSN_CONFIG_8254_READ_STATUS: Read status of 8254 counter channel.
+ * @INSN_CONFIG_SET_ROUTING:	Set routing for a channel.
+ * @INSN_CONFIG_GET_ROUTING:	Get routing for a channel.
+ * @INSN_CONFIG_PWM_SET_PERIOD: Set PWM period in nanoseconds.
+ * @INSN_CONFIG_PWM_GET_PERIOD: Get PWM period in nanoseconds.
+ * @INSN_CONFIG_GET_PWM_STATUS: Get PWM status.
+ * @INSN_CONFIG_PWM_SET_H_BRIDGE: Set PWM H bridge duty cycle and polarity for
+ *				a relay simultaneously.
+ * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
+ */
 enum configuration_ids {
 	INSN_CONFIG_DIO_INPUT = 0,
 	INSN_CONFIG_DIO_OUTPUT = 1,
@@ -265,72 +319,76 @@
 	INSN_CONFIG_DISARM = 32,
 	INSN_CONFIG_GET_COUNTER_STATUS = 33,
 	INSN_CONFIG_RESET = 34,
-	/* Use CTR as single pulsegenerator */
 	INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,
-	/* Use CTR as pulsetraingenerator */
 	INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,
-	/* Use the counter as encoder */
 	INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,
-	INSN_CONFIG_SET_GATE_SRC = 2001,	/* Set gate source */
-	INSN_CONFIG_GET_GATE_SRC = 2002,	/* Get gate source */
-	/* Set master clock source */
+	INSN_CONFIG_SET_GATE_SRC = 2001,
+	INSN_CONFIG_GET_GATE_SRC = 2002,
 	INSN_CONFIG_SET_CLOCK_SRC = 2003,
-	INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */
-	INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */
-	/* INSN_CONFIG_GET_OTHER_SRC = 2006,*//* Get other source */
-	/* Get size in bytes of subdevice's on-board fifos used during
-	 * streaming input/output
-	 */
+	INSN_CONFIG_GET_CLOCK_SRC = 2004,
+	INSN_CONFIG_SET_OTHER_SRC = 2005,
 	INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006,
 	INSN_CONFIG_SET_COUNTER_MODE = 4097,
-	/* INSN_CONFIG_8254_SET_MODE is deprecated */
 	INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,
 	INSN_CONFIG_8254_READ_STATUS = 4098,
 	INSN_CONFIG_SET_ROUTING = 4099,
 	INSN_CONFIG_GET_ROUTING = 4109,
-	/* PWM */
-	INSN_CONFIG_PWM_SET_PERIOD = 5000,	/* sets frequency */
-	INSN_CONFIG_PWM_GET_PERIOD = 5001,	/* gets frequency */
-	INSN_CONFIG_GET_PWM_STATUS = 5002,	/* is it running? */
-	/* sets H bridge: duty cycle and sign bit for a relay at the
-	 * same time
-	 */
+	INSN_CONFIG_PWM_SET_PERIOD = 5000,
+	INSN_CONFIG_PWM_GET_PERIOD = 5001,
+	INSN_CONFIG_GET_PWM_STATUS = 5002,
 	INSN_CONFIG_PWM_SET_H_BRIDGE = 5003,
-	/* gets H bridge data: duty cycle and the sign bit */
 	INSN_CONFIG_PWM_GET_H_BRIDGE = 5004
 };
 
-/*
- * Settings for INSN_CONFIG_DIGITAL_TRIG:
- * data[0] = INSN_CONFIG_DIGITAL_TRIG
- * data[1] = trigger ID
- * data[2] = configuration operation
- * data[3] = configuration parameter 1
- * data[4] = configuration parameter 2
- * data[5] = configuration parameter 3
+/**
+ * enum comedi_digital_trig_op - operations for configuring a digital trigger
+ * @COMEDI_DIGITAL_TRIG_DISABLE:	Return digital trigger to its default,
+ *					inactive, unconfigured state.
+ * @COMEDI_DIGITAL_TRIG_ENABLE_EDGES:	Set rising and/or falling edge inputs
+ *					that each can fire the trigger.
+ * @COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:	Set a combination of high and/or low
+ *					level inputs that can fire the trigger.
  *
- * operation                           parameter 1   parameter 2   parameter 3
- * ---------------------------------   -----------   -----------   -----------
- * COMEDI_DIGITAL_TRIG_DISABLE
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES    left-shift    rising-edges  falling-edges
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS   left-shift    high-levels   low-levels
+ * These are used with the %INSN_CONFIG_DIGITAL_TRIG configuration instruction.
+ * The data for the configuration instruction is as follows...
  *
- * COMEDI_DIGITAL_TRIG_DISABLE returns the trigger to its default, inactive,
- * unconfigured state.
+ *   data[%0] = %INSN_CONFIG_DIGITAL_TRIG
  *
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES sets the rising and/or falling edge inputs
- * that each can fire the trigger.
+ *   data[%1] = trigger ID
  *
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS sets a combination of high and/or low
- * level inputs that can fire the trigger.
+ *   data[%2] = configuration operation
  *
- * "left-shift" is useful if the trigger has more than 32 inputs to specify the
- * first input for this configuration.
+ *   data[%3] = configuration parameter 1
  *
- * Some sequences of INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
+ *   data[%4] = configuration parameter 2
+ *
+ *   data[%5] = configuration parameter 3
+ *
+ * The trigger ID (data[%1]) is used to differentiate multiple digital triggers
+ * belonging to the same subdevice.  The configuration operation (data[%2]) is
+ * one of the enum comedi_digital_trig_op values.  The configuration
+ * parameters (data[%3], data[%4], and data[%5]) depend on the operation; they
+ * are not used with %COMEDI_DIGITAL_TRIG_DISABLE.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES and %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS,
+ * configuration parameter 1 (data[%3]) contains a "left-shift" value that
+ * specifies the input corresponding to bit 0 of configuration parameters 2
+ * and 3.  This is useful if the trigger has more than 32 inputs.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES, configuration parameter 2 (data[%4])
+ * specifies which of up to 32 inputs have rising-edge sensitivity, and
+ * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
+ * have falling-edge sensitivity that can fire the trigger.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, configuration parameter 2 (data[%4])
+ * specifies which of up to 32 inputs must be at a high level, and
+ * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
+ * must be at a low level for the trigger to fire.
+ *
+ * Some sequences of %INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
  * accumulative effect, depending on the low-level driver.  This is useful
- * when setting up a trigger that has more than 32 inputs or has a combination
- * of edge and level triggered inputs.
+ * when setting up a trigger that has more than 32 inputs, or has a combination
+ * of edge- and level-triggered inputs.
  */
 enum comedi_digital_trig_op {
 	COMEDI_DIGITAL_TRIG_DISABLE = 0,
@@ -338,18 +396,49 @@
 	COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2
 };
 
+/**
+ * enum comedi_io_direction - COMEDI I/O directions
+ * @COMEDI_INPUT:	Input.
+ * @COMEDI_OUTPUT:	Output.
+ * @COMEDI_OPENDRAIN:	Open-drain (or open-collector) output.
+ *
+ * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
+ * report a direction.  They may also be used in other places where a direction
+ * needs to be specified.
+ */
 enum comedi_io_direction {
 	COMEDI_INPUT = 0,
 	COMEDI_OUTPUT = 1,
 	COMEDI_OPENDRAIN = 2
 };
 
+/**
+ * enum comedi_support_level - support level for a COMEDI feature
+ * @COMEDI_UNKNOWN_SUPPORT:	Unspecified support for feature.
+ * @COMEDI_SUPPORTED:		Feature is supported.
+ * @COMEDI_UNSUPPORTED:		Feature is unsupported.
+ */
 enum comedi_support_level {
 	COMEDI_UNKNOWN_SUPPORT = 0,
 	COMEDI_SUPPORTED,
 	COMEDI_UNSUPPORTED
 };
 
+/**
+ * enum comedi_counter_status_flags - counter status bits
+ * @COMEDI_COUNTER_ARMED:		Counter is armed.
+ * @COMEDI_COUNTER_COUNTING:		Counter is counting.
+ * @COMEDI_COUNTER_TERMINAL_COUNT:	Counter reached terminal count.
+ *
+ * These bitwise values are used by the %INSN_CONFIG_GET_COUNTER_STATUS
+ * configuration instruction to report the status of a counter.
+ */
+enum comedi_counter_status_flags {
+	COMEDI_COUNTER_ARMED = 0x1,
+	COMEDI_COUNTER_COUNTING = 0x2,
+	COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
+};
+
 /* ioctls */
 
 #define CIO 'd'
@@ -357,7 +446,7 @@
 #define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo)
 #define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo)
 #define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo)
-#define COMEDI_TRIG _IOWR(CIO, 4, comedi_trig)
+/* _IOWR(CIO, 4, ...) is reserved */
 #define COMEDI_LOCK _IO(CIO, 5)
 #define COMEDI_UNLOCK _IO(CIO, 6)
 #define COMEDI_CANCEL _IO(CIO, 7)
@@ -374,21 +463,19 @@
 
 /* structures */
 
-struct comedi_trig {
-	unsigned int subdev;	/* subdevice */
-	unsigned int mode;	/* mode */
-	unsigned int flags;
-	unsigned int n_chan;	/* number of channels */
-	unsigned int *chanlist;	/* channel/range list */
-	short *data;		/* data list, size depends on subd flags */
-	unsigned int n;		/* number of scans */
-	unsigned int trigsrc;
-	unsigned int trigvar;
-	unsigned int trigvar1;
-	unsigned int data_len;
-	unsigned int unused[3];
-};
-
+/**
+ * struct comedi_insn - COMEDI instruction
+ * @insn:	COMEDI instruction type (%INSN_xxx).
+ * @n:		Length of @data[].
+ * @data:	Pointer to data array operated on by the instruction.
+ * @subdev:	Subdevice index.
+ * @chanspec:	A packed "chanspec" value consisting of channel number,
+ *		analog range index, analog reference type, and flags.
+ * @unused:	Reserved for future use.
+ *
+ * This is used with the %COMEDI_INSN ioctl, and indirectly with the
+ * %COMEDI_INSNLIST ioctl.
+ */
 struct comedi_insn {
 	unsigned int insn;
 	unsigned int n;
@@ -398,11 +485,95 @@
 	unsigned int unused[3];
 };
 
+/**
+ * struct comedi_insnlist - list of COMEDI instructions
+ * @n_insns:	Number of COMEDI instructions.
+ * @insns:	Pointer to array COMEDI instructions.
+ *
+ * This is used with the %COMEDI_INSNLIST ioctl.
+ */
 struct comedi_insnlist {
 	unsigned int n_insns;
 	struct comedi_insn __user *insns;
 };
 
+/**
+ * struct comedi_cmd - COMEDI asynchronous acquisition command details
+ * @subdev:		Subdevice index.
+ * @flags:		Command flags (%CMDF_xxx).
+ * @start_src:		"Start acquisition" trigger source (%TRIG_xxx).
+ * @start_arg:		"Start acquisition" trigger argument.
+ * @scan_begin_src:	"Scan begin" trigger source.
+ * @scan_begin_arg:	"Scan begin" trigger argument.
+ * @convert_src:	"Convert" trigger source.
+ * @convert_arg:	"Convert" trigger argument.
+ * @scan_end_src:	"Scan end" trigger source.
+ * @scan_end_arg:	"Scan end" trigger argument.
+ * @stop_src:		"Stop acquisition" trigger source.
+ * @stop_arg:		"Stop acquisition" trigger argument.
+ * @chanlist:		Pointer to array of "chanspec" values, containing a
+ *			sequence of channel numbers packed with analog range
+ *			index, etc.
+ * @chanlist_len:	Number of channels in sequence.
+ * @data:		Pointer to miscellaneous set-up data (not used).
+ * @data_len:		Length of miscellaneous set-up data.
+ *
+ * This is used with the %COMEDI_CMD or %COMEDI_CMDTEST ioctl to set-up
+ * or validate an asynchronous acquisition command.  The ioctl may modify
+ * the &struct comedi_cmd and copy it back to the caller.
+ *
+ * Optional command @flags values that can be ORed together...
+ *
+ * %CMDF_BOGUS - makes %COMEDI_CMD ioctl return error %EAGAIN instead of
+ * starting the command.
+ *
+ * %CMDF_PRIORITY - requests "hard real-time" processing (which is not
+ * supported in this version of COMEDI).
+ *
+ * %CMDF_WAKE_EOS - requests the command makes data available for reading
+ * after every "scan" period.
+ *
+ * %CMDF_WRITE - marks the command as being in the "write" (to device)
+ * direction.  This does not need to be specified by the caller unless the
+ * subdevice supports commands in either direction.
+ *
+ * %CMDF_RAWDATA - prevents the command from "munging" the data between the
+ * COMEDI sample format and the raw hardware sample format.
+ *
+ * %CMDF_ROUND_NEAREST - requests timing periods to be rounded to nearest
+ * supported values.
+ *
+ * %CMDF_ROUND_DOWN - requests timing periods to be rounded down to supported
+ * values (frequencies rounded up).
+ *
+ * %CMDF_ROUND_UP - requests timing periods to be rounded up to supported
+ * values (frequencies rounded down).
+ *
+ * Trigger source values for @start_src, @scan_begin_src, @convert_src,
+ * @scan_end_src, and @stop_src...
+ *
+ * %TRIG_ANY - "all ones" value used to test which trigger sources are
+ * supported.
+ *
+ * %TRIG_INVALID - "all zeroes" value used to indicate that all requested
+ * trigger sources are invalid.
+ *
+ * %TRIG_NONE - never trigger (often used as a @stop_src value).
+ *
+ * %TRIG_NOW - trigger after '_arg' nanoseconds.
+ *
+ * %TRIG_FOLLOW - trigger follows another event.
+ *
+ * %TRIG_TIMER - trigger every '_arg' nanoseconds.
+ *
+ * %TRIG_COUNT - trigger when count '_arg' is reached.
+ *
+ * %TRIG_EXT - trigger on external signal specified by '_arg'.
+ *
+ * %TRIG_INT - trigger on internal, software trigger specified by '_arg'.
+ *
+ * %TRIG_OTHER - trigger on other, driver-defined signal specified by '_arg'.
+ */
 struct comedi_cmd {
 	unsigned int subdev;
 	unsigned int flags;
@@ -422,13 +593,31 @@
 	unsigned int stop_src;
 	unsigned int stop_arg;
 
-	unsigned int *chanlist;	/* channel/range list */
+	unsigned int *chanlist;
 	unsigned int chanlist_len;
 
-	short __user *data; /* data list, size depends on subd flags */
+	short __user *data;
 	unsigned int data_len;
 };
 
+/**
+ * struct comedi_chaninfo - used to retrieve per-channel information
+ * @subdev:		Subdevice index.
+ * @maxdata_list:	Optional pointer to per-channel maximum data values.
+ * @flaglist:		Optional pointer to per-channel flags.
+ * @rangelist:		Optional pointer to per-channel range types.
+ * @unused:		Reserved for future use.
+ *
+ * This is used with the %COMEDI_CHANINFO ioctl to get per-channel information
+ * for the subdevice.  Use of this requires knowledge of the number of channels
+ * and subdevice flags obtained using the %COMEDI_SUBDINFO ioctl.
+ *
+ * The @maxdata_list member must be %NULL unless the %SDF_MAXDATA subdevice
+ * flag is set.  The @flaglist member must be %NULL unless the %SDF_FLAGS
+ * subdevice flag is set.  The @rangelist member must be %NULL unless the
+ * %SDF_RANGETYPE subdevice flag is set.  Otherwise, the arrays they point to
+ * must be at least as long as the number of channels.
+ */
 struct comedi_chaninfo {
 	unsigned int subdev;
 	unsigned int __user *maxdata_list;
@@ -437,17 +626,149 @@
 	unsigned int unused[4];
 };
 
+/**
+ * struct comedi_rangeinfo - used to retrieve the range table for a channel
+ * @range_type:		Encodes subdevice index (bits 27:24), channel index
+ *			(bits 23:16) and range table length (bits 15:0).
+ * @range_ptr:		Pointer to array of @struct comedi_krange to be filled
+ *			in with the range table for the channel or subdevice.
+ *
+ * This is used with the %COMEDI_RANGEINFO ioctl to retrieve the range table
+ * for a specific channel (if the subdevice has the %SDF_RANGETYPE flag set to
+ * indicate that the range table depends on the channel), or for the subdevice
+ * as a whole (if the %SDF_RANGETYPE flag is clear, indicating the range table
+ * is shared by all channels).
+ *
+ * The @range_type value is an input to the ioctl and comes from a previous
+ * use of the %COMEDI_SUBDINFO ioctl (if the %SDF_RANGETYPE flag is clear),
+ * or the %COMEDI_CHANINFO ioctl (if the %SDF_RANGETYPE flag is set).
+ */
 struct comedi_rangeinfo {
 	unsigned int range_type;
 	void __user *range_ptr;
 };
 
+/**
+ * struct comedi_krange - describes a range in a range table
+ * @min:	Minimum value in millionths (1e-6) of a unit.
+ * @max:	Maximum value in millionths (1e-6) of a unit.
+ * @flags:	Indicates the units (in bits 7:0) OR'ed with optional flags.
+ *
+ * A range table is associated with a single channel, or with all channels in a
+ * subdevice, and a list of one or more ranges.  A %struct comedi_krange
+ * describes the physical range of units for one of those ranges.  Sample
+ * values in COMEDI are unsigned from %0 up to some 'maxdata' value.  The
+ * mapping from sample values to physical units is assumed to be nomimally
+ * linear (for the purpose of describing the range), with sample value %0
+ * mapping to @min, and the 'maxdata' sample value mapping to @max.
+ *
+ * The currently defined units are %UNIT_volt (%0), %UNIT_mA (%1), and
+ * %UNIT_none (%2).  The @min and @max values are the physical range multiplied
+ * by 1e6, so a @max value of %1000000 (with %UNIT_volt) represents a maximal
+ * value of 1 volt.
+ *
+ * The only defined flag value is %RF_external (%1 << %8), indicating that the
+ * the range needs to be multiplied by an external reference.
+ */
 struct comedi_krange {
-	int min;	/* fixed point, multiply by 1e-6 */
-	int max;	/* fixed point, multiply by 1e-6 */
+	int min;
+	int max;
 	unsigned int flags;
 };
 
+/**
+ * struct comedi_subdinfo - used to retrieve information about a subdevice
+ * @type:		Type of subdevice from &enum comedi_subdevice_type.
+ * @n_chan:		Number of channels the subdevice supports.
+ * @subd_flags:		A mixture of static and dynamic flags describing
+ *			aspects of the subdevice and its current state.
+ * @timer_type:		Timer type.  Always set to %5 ("nanosecond timer").
+ * @len_chanlist:	Maximum length of a channel list if the subdevice
+ *			supports asynchronous acquisition commands.
+ * @maxdata:		Maximum sample value for all channels if the
+ *			%SDF_MAXDATA subdevice flag is clear.
+ * @flags:		Channel flags for all channels if the %SDF_FLAGS
+ *			subdevice flag is clear.
+ * @range_type:		The range type for all channels if the %SDF_RANGETYPE
+ *			subdevice flag is clear.  Encodes the subdevice index
+ *			(bits 27:24), a dummy channel index %0 (bits 23:16),
+ *			and the range table length (bits 15:0).
+ * @settling_time_0:	Not used.
+ * @insn_bits_support:	Set to %COMEDI_SUPPORTED if the subdevice supports the
+ *			%INSN_BITS instruction, or to %COMEDI_UNSUPPORTED if it
+ *			does not.
+ * @unused:		Reserved for future use.
+ *
+ * This is used with the %COMEDI_SUBDINFO ioctl which copies an array of
+ * &struct comedi_subdinfo back to user space, with one element per subdevice.
+ * Use of this requires knowledge of the number of subdevices obtained from
+ * the %COMEDI_DEVINFO ioctl.
+ *
+ * These are the @subd_flags values that may be ORed together...
+ *
+ * %SDF_BUSY - the subdevice is busy processing an asynchronous command or a
+ * synchronous instruction.
+ *
+ * %SDF_BUSY_OWNER - the subdevice is busy processing an asynchronous
+ * acquisition command started on the current file object (the file object
+ * issuing the %COMEDI_SUBDINFO ioctl).
+ *
+ * %SDF_LOCKED - the subdevice is locked by a %COMEDI_LOCK ioctl.
+ *
+ * %SDF_LOCK_OWNER - the subdevice is locked by a %COMEDI_LOCK ioctl from the
+ * current file object.
+ *
+ * %SDF_MAXDATA - maximum sample values are channel-specific.
+ *
+ * %SDF_FLAGS - channel flags are channel-specific.
+ *
+ * %SDF_RANGETYPE - range types are channel-specific.
+ *
+ * %SDF_PWM_COUNTER - PWM can switch off automatically.
+ *
+ * %SDF_PWM_HBRIDGE - or PWM is signed (H-bridge).
+ *
+ * %SDF_CMD - the subdevice supports asynchronous commands.
+ *
+ * %SDF_SOFT_CALIBRATED - the subdevice uses software calibration.
+ *
+ * %SDF_CMD_WRITE - the subdevice supports asynchronous commands in the output
+ * ("write") direction.
+ *
+ * %SDF_CMD_READ - the subdevice supports asynchronous commands in the input
+ * ("read") direction.
+ *
+ * %SDF_READABLE - the subdevice is readable (e.g. analog input).
+ *
+ * %SDF_WRITABLE (aliased as %SDF_WRITEABLE) - the subdevice is writable (e.g.
+ * analog output).
+ *
+ * %SDF_INTERNAL - the subdevice has no externally visible lines.
+ *
+ * %SDF_GROUND - the subdevice can use ground as an analog reference.
+ *
+ * %SDF_COMMON - the subdevice can use a common analog reference.
+ *
+ * %SDF_DIFF - the subdevice can use differential inputs (or outputs).
+ *
+ * %SDF_OTHER - the subdevice can use some other analog reference.
+ *
+ * %SDF_DITHER - the subdevice can do dithering.
+ *
+ * %SDF_DEGLITCH - the subdevice can do deglitching.
+ *
+ * %SDF_MMAP - this is never set.
+ *
+ * %SDF_RUNNING - an asynchronous command is still running.
+ *
+ * %SDF_LSAMPL - the subdevice uses "long" (32-bit) samples (for asynchronous
+ * command data).
+ *
+ * %SDF_PACKED - the subdevice packs several DIO samples into a single sample
+ * (for asynchronous command data).
+ *
+ * No "channel flags" (@flags) values are currently defined.
+ */
 struct comedi_subdinfo {
 	unsigned int type;
 	unsigned int n_chan;
@@ -455,14 +776,26 @@
 	unsigned int timer_type;
 	unsigned int len_chanlist;
 	unsigned int maxdata;
-	unsigned int flags;		/* channel flags */
-	unsigned int range_type;	/* lookup in kernel */
+	unsigned int flags;
+	unsigned int range_type;
 	unsigned int settling_time_0;
-	/* see support_level enum for values */
 	unsigned insn_bits_support;
 	unsigned int unused[8];
 };
 
+/**
+ * struct comedi_devinfo - used to retrieve information about a COMEDI device
+ * @version_code:	COMEDI version code.
+ * @n_subdevs:		Number of subdevices the device has.
+ * @driver_name:	Null-terminated COMEDI driver name.
+ * @board_name:		Null-terminated COMEDI board name.
+ * @read_subdevice:	Index of the current "read" subdevice (%-1 if none).
+ * @write_subdevice:	Index of the current "write" subdevice (%-1 if none).
+ * @unused:		Reserved for future use.
+ *
+ * This is used with the %COMEDI_DEVINFO ioctl to get basic information about
+ * the device.
+ */
 struct comedi_devinfo {
 	unsigned int version_code;
 	unsigned int n_subdevs;
@@ -473,11 +806,45 @@
 	int unused[30];
 };
 
+/**
+ * struct comedi_devconfig - used to configure a legacy COMEDI device
+ * @board_name:		Null-terminated string specifying the type of board
+ *			to configure.
+ * @options:		An array of integer configuration options.
+ *
+ * This is used with the %COMEDI_DEVCONFIG ioctl to configure a "legacy" COMEDI
+ * device, such as an ISA card.  Not all COMEDI drivers support this.  Those
+ * that do either expect the specified board name to match one of a list of
+ * names registered with the COMEDI core, or expect the specified board name
+ * to match the COMEDI driver name itself.  The configuration options are
+ * handled in a driver-specific manner.
+ */
 struct comedi_devconfig {
 	char board_name[COMEDI_NAMELEN];
 	int options[COMEDI_NDEVCONFOPTS];
 };
 
+/**
+ * struct comedi_bufconfig - used to set or get buffer size for a subdevice
+ * @subdevice:		Subdevice index.
+ * @flags:		Not used.
+ * @maximum_size:	Maximum allowed buffer size.
+ * @size:		Buffer size.
+ * @unused:		Reserved for future use.
+ *
+ * This is used with the %COMEDI_BUFCONFIG ioctl to get or configure the
+ * maximum buffer size and current buffer size for a COMEDI subdevice that
+ * supports asynchronous commands.  If the subdevice does not support
+ * asynchronous commands, @maximum_size and @size are ignored and set to 0.
+ *
+ * On ioctl input, non-zero values of @maximum_size and @size specify a
+ * new maximum size and new current size (in bytes), respectively.  These
+ * will by rounded up to a multiple of %PAGE_SIZE.  Specifying a new maximum
+ * size requires admin capabilities.
+ *
+ * On ioctl output, @maximum_size and @size and set to the current maximum
+ * buffer size and current buffer size, respectively.
+ */
 struct comedi_bufconfig {
 	unsigned int subdevice;
 	unsigned int flags;
@@ -488,6 +855,23 @@
 	unsigned int unused[4];
 };
 
+/**
+ * struct comedi_bufinfo - used to manipulate buffer position for a subdevice
+ * @subdevice:		Subdevice index.
+ * @bytes_read:		Specify amount to advance read position for an
+ *			asynchronous command in the input ("read") direction.
+ * @buf_write_ptr:	Current write position (index) within the buffer.
+ * @buf_read_ptr:	Current read position (index) within the buffer.
+ * @buf_write_count:	Total amount written, modulo 2^32.
+ * @buf_read_count:	Total amount read, modulo 2^32.
+ * @bytes_written:	Specify amount to advance write position for an
+ *			asynchronous command in the output ("write") direction.
+ * @unused:		Reserved for future use.
+ *
+ * This is used with the %COMEDI_BUFINFO ioctl to optionally advance the
+ * current read or write position in an asynchronous acquisition data buffer,
+ * and to get the current read and write positions in the buffer.
+ */
 struct comedi_bufinfo {
 	unsigned int subdevice;
 	unsigned int bytes_read;
@@ -510,13 +894,13 @@
 #define RANGE_LENGTH(b)		((b) & 0xffff)
 
 #define RF_UNIT(flags)		((flags) & 0xff)
-#define RF_EXTERNAL		(1 << 8)
+#define RF_EXTERNAL		BIT(8)
 
 #define UNIT_volt		0
 #define UNIT_mA			1
 #define UNIT_none		2
 
-#define COMEDI_MIN_SPEED	((unsigned int)0xffffffff)
+#define COMEDI_MIN_SPEED	0xffffffffu
 
 /**********************************************************/
 /* everything after this line is ALPHA */
@@ -849,13 +1233,6 @@
 #define NI_EXT_PFI(x)			(NI_USUAL_PFI_SELECT(x) - 1)
 #define NI_EXT_RTSI(x)			(NI_USUAL_RTSI_SELECT(x) - 1)
 
-/* status bits for INSN_CONFIG_GET_COUNTER_STATUS */
-enum comedi_counter_status_flags {
-	COMEDI_COUNTER_ARMED = 0x1,
-	COMEDI_COUNTER_COUNTING = 0x2,
-	COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
-};
-
 /*
  * Clock sources for CDIO subdevice on NI m-series boards.  Used as the
  * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d57fade..d1cf6a1 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -686,13 +686,6 @@
 	return comedi_is_runflags_running(runflags);
 }
 
-static bool comedi_is_subdevice_idle(struct comedi_subdevice *s)
-{
-	unsigned runflags = comedi_get_subdevice_runflags(s);
-
-	return !(runflags & COMEDI_SRF_BUSY_MASK);
-}
-
 bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
 {
 	unsigned runflags = __comedi_get_subdevice_runflags(s);
@@ -1111,6 +1104,9 @@
 	struct comedi_bufinfo bi;
 	struct comedi_subdevice *s;
 	struct comedi_async *async;
+	unsigned int runflags;
+	int retval = 0;
+	bool become_nonbusy = false;
 
 	if (copy_from_user(&bi, arg, sizeof(bi)))
 		return -EFAULT;
@@ -1122,48 +1118,56 @@
 
 	async = s->async;
 
-	if (!async) {
-		dev_dbg(dev->class_dev,
-			"subdevice does not have async capability\n");
-		bi.buf_write_ptr = 0;
-		bi.buf_read_ptr = 0;
-		bi.buf_write_count = 0;
-		bi.buf_read_count = 0;
-		bi.bytes_read = 0;
-		bi.bytes_written = 0;
-		goto copyback;
-	}
-	if (!s->busy) {
-		bi.bytes_read = 0;
-		bi.bytes_written = 0;
-		goto copyback_position;
-	}
-	if (s->busy != file)
-		return -EACCES;
+	if (!async || s->busy != file)
+		return -EINVAL;
 
-	if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) {
-		bi.bytes_read = comedi_buf_read_alloc(s, bi.bytes_read);
-		comedi_buf_read_free(s, bi.bytes_read);
-
-		if (comedi_is_subdevice_idle(s) &&
-		    comedi_buf_read_n_available(s) == 0) {
-			do_become_nonbusy(dev, s);
+	runflags = comedi_get_subdevice_runflags(s);
+	if (!(async->cmd.flags & CMDF_WRITE)) {
+		/* command was set up in "read" direction */
+		if (bi.bytes_read) {
+			comedi_buf_read_alloc(s, bi.bytes_read);
+			bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read);
 		}
+		/*
+		 * If nothing left to read, and command has stopped, and
+		 * {"read" position not updated or command stopped normally},
+		 * then become non-busy.
+		 */
+		if (comedi_buf_read_n_available(s) == 0 &&
+		    !comedi_is_runflags_running(runflags) &&
+		    (bi.bytes_read == 0 ||
+		     !comedi_is_runflags_in_error(runflags))) {
+			become_nonbusy = true;
+			if (comedi_is_runflags_in_error(runflags))
+				retval = -EPIPE;
+		}
+		bi.bytes_written = 0;
+	} else {
+		/* command was set up in "write" direction */
+		if (!comedi_is_runflags_running(runflags)) {
+			bi.bytes_written = 0;
+			become_nonbusy = true;
+			if (comedi_is_runflags_in_error(runflags))
+				retval = -EPIPE;
+		} else if (bi.bytes_written) {
+			comedi_buf_write_alloc(s, bi.bytes_written);
+			bi.bytes_written =
+			    comedi_buf_write_free(s, bi.bytes_written);
+		}
+		bi.bytes_read = 0;
 	}
 
-	if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) {
-		bi.bytes_written =
-		    comedi_buf_write_alloc(s, bi.bytes_written);
-		comedi_buf_write_free(s, bi.bytes_written);
-	}
-
-copyback_position:
 	bi.buf_write_count = async->buf_write_count;
 	bi.buf_write_ptr = async->buf_write_ptr;
 	bi.buf_read_count = async->buf_read_count;
 	bi.buf_read_ptr = async->buf_read_ptr;
 
-copyback:
+	if (become_nonbusy)
+		do_become_nonbusy(dev, s);
+
+	if (retval)
+		return retval;
+
 	if (copy_to_user(arg, &bi, sizeof(bi)))
 		return -EFAULT;
 
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h
index 5d3db2b..5a572c2 100644
--- a/drivers/staging/comedi/comedi_pcmcia.h
+++ b/drivers/staging/comedi/comedi_pcmcia.h
@@ -39,7 +39,8 @@
 				     struct pcmcia_driver *);
 
 /**
- * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
+ * PCMCIA driver
  * @__comedi_driver: comedi_driver struct
  * @__pcmcia_driver: pcmcia_driver struct
  *
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 995096c..b6af3eb 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -496,7 +496,7 @@
 		switch (flags & CMDF_ROUND_MASK) {
 		case CMDF_ROUND_NEAREST:
 		default:
-			timer = (*ns + base / 2) / base;
+			timer = DIV_ROUND_CLOSEST(*ns, base);
 			break;
 		case CMDF_ROUND_DOWN:
 			timer = *ns / base;
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 4b39f69..d1fe88a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -637,7 +637,7 @@
 	switch (flags & CMDF_ROUND_MASK) {
 	default:
 	case CMDF_ROUND_NEAREST:
-		div += (rem + (timebase / 2)) / timebase;
+		div += DIV_ROUND_CLOSEST(rem, timebase);
 		break;
 	case CMDF_ROUND_DOWN:
 		break;
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index d33b8fe..e918d42 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1376,7 +1376,7 @@
 		num_entries = fifo->max_segment_length;
 
 	/*  1 == 256 entries, 2 == 512 entries, etc */
-	num_increments = (num_entries + increment_size / 2) / increment_size;
+	num_increments = DIV_ROUND_CLOSEST(num_entries, increment_size);
 
 	bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask;
 	devpriv->fifo_size_bits &= ~fifo->fifo_size_reg_mask;
@@ -2004,7 +2004,7 @@
 		break;
 	case CMDF_ROUND_NEAREST:
 	default:
-		divisor = (ns + TIMER_BASE / 2) / TIMER_BASE;
+		divisor = DIV_ROUND_CLOSEST(ns, TIMER_BASE);
 		break;
 	}
 	return divisor;
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index 6ba71d1..68ef9b1 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -132,8 +132,7 @@
 		result = result1;
 	if (result >= desc->size || result == 0)
 		return 0;
-	else
-		return desc->size - result;
+	return desc->size - result;
 }
 EXPORT_SYMBOL_GPL(comedi_isadma_poll);
 
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 80e38de..6c7b4d2 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -68,17 +68,17 @@
 /* Command modifiers (only used with read/write), EXTTRIG can be
    used with some other commands.
 */
-#define DT_MOD_DMA     (1<<4)
-#define DT_MOD_CONT    (1<<5)
-#define DT_MOD_EXTCLK  (1<<6)
-#define DT_MOD_EXTTRIG (1<<7)
+#define DT_MOD_DMA     BIT(4)
+#define DT_MOD_CONT    BIT(5)
+#define DT_MOD_EXTCLK  BIT(6)
+#define DT_MOD_EXTTRIG BIT(7)
 
 /* Bits in status register */
-#define DT_S_DATA_OUT_READY   (1<<0)
-#define DT_S_DATA_IN_FULL     (1<<1)
-#define DT_S_READY            (1<<2)
-#define DT_S_COMMAND          (1<<3)
-#define DT_S_COMPOSITE_ERROR  (1<<7)
+#define DT_S_DATA_OUT_READY   BIT(0)
+#define DT_S_DATA_IN_FULL     BIT(1)
+#define DT_S_READY            BIT(2)
+#define DT_S_COMMAND          BIT(3)
+#define DT_S_COMPOSITE_ERROR  BIT(7)
 
 /* registers */
 #define DT2801_DATA		0
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 5a536a0..6c26e09 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -371,7 +371,7 @@
 		switch (flags & CMDF_ROUND_MASK) {
 		case CMDF_ROUND_NEAREST:
 		default:
-			divider = (*ns + base / 2) / base;
+			divider = DIV_ROUND_CLOSEST(*ns, base);
 			break;
 		case CMDF_ROUND_DOWN:
 			divider = (*ns) / base;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index ab7a332..19e0b7b 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -361,7 +361,7 @@
 		switch (flags & CMDF_ROUND_MASK) {
 		case CMDF_ROUND_NEAREST:
 		default:
-			divider = (*nanosec + base / 2) / base;
+			divider = DIV_ROUND_CLOSEST(*nanosec, base);
 			break;
 		case CMDF_ROUND_DOWN:
 			divider = (*nanosec) / base;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index fa7ae2c..8f24702 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -297,7 +297,6 @@
 {
 	struct comedi_async *async = s->async;
 	unsigned int n_links;
-	int i;
 
 	if (ring->descriptors) {
 		dma_free_coherent(ring->hw_dev,
@@ -326,17 +325,58 @@
 	}
 	ring->n_links = n_links;
 
-	for (i = 0; i < n_links; i++) {
+	return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mite_buf_change);
+
+/*
+ * initializes the ring buffer descriptors to provide correct DMA transfer links
+ * to the exact amount of memory required.  When the ring buffer is allocated in
+ * mite_buf_change, the default is to initialize the ring to refer to the entire
+ * DMA data buffer.  A command may call this function later to re-initialize and
+ * shorten the amount of memory that will be transferred.
+ */
+int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
+			       struct comedi_subdevice *s,
+			       unsigned int nbytes)
+{
+	struct comedi_async *async = s->async;
+	unsigned int n_full_links = nbytes >> PAGE_SHIFT;
+	unsigned int remainder = nbytes % PAGE_SIZE;
+	int i;
+
+	dev_dbg(s->device->class_dev,
+		"mite: init ring buffer to %u bytes\n", nbytes);
+
+	if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
+		dev_err(s->device->class_dev,
+			"mite: ring buffer too small for requested init\n");
+		return -ENOMEM;
+	}
+
+	/* We set the descriptors for all full links. */
+	for (i = 0; i < n_full_links; ++i) {
 		ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
 		ring->descriptors[i].addr =
 		    cpu_to_le32(async->buf_map->page_list[i].dma_addr);
 		ring->descriptors[i].next =
-		    cpu_to_le32(ring->descriptors_dma_addr + (i +
-							      1) *
-				sizeof(struct mite_dma_descriptor));
+		    cpu_to_le32(ring->descriptors_dma_addr +
+				(i + 1) * sizeof(struct mite_dma_descriptor));
 	}
-	ring->descriptors[n_links - 1].next =
-	    cpu_to_le32(ring->descriptors_dma_addr);
+
+	/* the last link is either a remainder or was a full link. */
+	if (remainder > 0) {
+		/* set the lesser count for the remainder link */
+		ring->descriptors[i].count = cpu_to_le32(remainder);
+		ring->descriptors[i].addr =
+		    cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+		/* increment i so that assignment below refs last link */
+		++i;
+	}
+
+	/* Assign the last link->next to point back to the head of the list. */
+	ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr);
+
 	/*
 	 * barrier is meant to insure that all the writes to the dma descriptors
 	 * have completed before the dma controller is commanded to read them
@@ -344,7 +384,7 @@
 	smp_wmb();
 	return 0;
 }
-EXPORT_SYMBOL_GPL(mite_buf_change);
+EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
 
 void mite_prep_dma(struct mite_channel *mite_chan,
 		   unsigned int num_device_bits, unsigned int num_memory_bits)
@@ -552,6 +592,7 @@
 	unsigned int old_alloc_count = async->buf_read_alloc_count;
 	u32 nbytes_ub, nbytes_lb;
 	int count;
+	bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
 
 	/* read alloc as much as we can */
 	comedi_buf_read_alloc(s, async->prealloc_bufsz);
@@ -561,11 +602,24 @@
 	nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
 	if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
 		nbytes_ub = stop_count;
-	if ((int)(nbytes_ub - old_alloc_count) > 0) {
+
+	if ((!finite_regen || stop_count > old_alloc_count) &&
+	    ((int)(nbytes_ub - old_alloc_count) > 0)) {
 		dev_warn(s->device->class_dev, "mite: DMA underrun\n");
 		async->events |= COMEDI_CB_OVERFLOW;
 		return -1;
 	}
+
+	if (finite_regen) {
+		/*
+		 * This is a special case where we continuously output a finite
+		 * buffer.  In this case, we do not free any of the memory,
+		 * hence we expect that old_alloc_count will reach a maximum of
+		 * stop_count bytes.
+		 */
+		return 0;
+	}
+
 	count = nbytes_lb - async->buf_read_count;
 	if (count <= 0)
 		return 0;
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index c32d4e4..87534b0 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -110,6 +110,9 @@
 		   unsigned int num_device_bits, unsigned int num_memory_bits);
 int mite_buf_change(struct mite_dma_descriptor_ring *ring,
 		    struct comedi_subdevice *s);
+int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
+			       struct comedi_subdevice *s,
+			       unsigned int nbytes);
 
 enum mite_registers {
 	/*
diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_mio_c_common.c
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5e8130a..1c2abb3 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1166,8 +1166,7 @@
 			comedi_buf_write_samples(s, &data, 1);
 		}
 	} else {
-		if (n > sizeof(devpriv->ai_fifo_buffer) /
-		    sizeof(devpriv->ai_fifo_buffer[0])) {
+		if (n > ARRAY_SIZE(devpriv->ai_fifo_buffer)) {
 			dev_err(dev->class_dev,
 				"bug! ai_fifo_buffer too small\n");
 			async->events |= COMEDI_CB_ERROR;
@@ -1242,9 +1241,7 @@
 			     NISTC_AI_STATUS1_FIFO_E;
 		while (fifo_empty == 0) {
 			for (i = 0;
-			     i <
-			     sizeof(devpriv->ai_fifo_buffer) /
-			     sizeof(devpriv->ai_fifo_buffer[0]); i++) {
+			     i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) {
 				fifo_empty = ni_stc_readw(dev,
 							  NISTC_AI_STATUS1_REG) &
 						NISTC_AI_STATUS1_FIFO_E;
@@ -1500,7 +1497,8 @@
 		s->async->events |= COMEDI_CB_OVERFLOW;
 	}
 
-	if (b_status & NISTC_AO_STATUS1_BC_TC)
+	if (s->async->cmd.stop_src != TRIG_NONE &&
+	    b_status & NISTC_AO_STATUS1_BC_TC)
 		s->async->events |= COMEDI_CB_EOA;
 
 #ifndef PCIDMA
@@ -2073,6 +2071,37 @@
 	return devpriv->clock_ns * (timer + 1);
 }
 
+static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
+				     struct comedi_subdevice *sdev,
+				     const struct comedi_cmd *cmd,
+				     unsigned int max_count) {
+#ifdef PCIDMA
+	unsigned int nbytes = max_count;
+
+	if (cmd->stop_arg > 0 && cmd->stop_arg < max_count)
+		nbytes = cmd->stop_arg;
+	nbytes *= comedi_bytes_per_scan(sdev);
+
+	if (nbytes > sdev->async->prealloc_bufsz) {
+		if (cmd->stop_arg > 0)
+			dev_err(sdev->device->class_dev,
+				"ni_cmd_set_mite_transfer: tried exact data transfer limits greater than buffer size\n");
+
+		/*
+		 * we can only transfer up to the size of the buffer.  In this
+		 * case, the user is expected to continue to write into the
+		 * comedi buffer (already implemented as a ring buffer).
+		 */
+		nbytes = sdev->async->prealloc_bufsz;
+	}
+
+	mite_init_ring_descriptors(ring, sdev, nbytes);
+#else
+	dev_err(sdev->device->class_dev,
+		"ni_cmd_set_mite_transfer: exact data transfer limits not implemented yet without DMA\n");
+#endif
+}
+
 static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
 					 unsigned num_channels)
 {
@@ -2428,7 +2457,8 @@
 		ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
 		break;
 	case TRIG_EXT:
-		mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 + cmd->convert_arg);
+		mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 +
+						    CR_CHAN(cmd->convert_arg));
 		if ((cmd->convert_arg & CR_INVERT) == 0)
 			mode1 |= NISTC_AI_MODE1_CONVERT_POLARITY;
 		ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG);
@@ -2902,8 +2932,6 @@
 	ni_stc_writew(dev, NISTC_AO_CMD1_UI_ARM |
 			   NISTC_AO_CMD1_UC_ARM |
 			   NISTC_AO_CMD1_BC_ARM |
-			   NISTC_AO_CMD1_DAC1_UPDATE_MODE |
-			   NISTC_AO_CMD1_DAC0_UPDATE_MODE |
 			   devpriv->ao_cmd1,
 		      NISTC_AO_CMD1_REG);
 
@@ -2913,42 +2941,68 @@
 	return 0;
 }
 
-static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+/*
+ * begin ni_ao_cmd.
+ * Organized similar to NI-STC and MHDDK examples.
+ * ni_ao_cmd is broken out into configuration sub-routines for clarity.
+ */
+
+static void ni_ao_cmd_personalize(struct comedi_device *dev,
+				  const struct comedi_cmd *cmd)
 {
 	const struct ni_board_struct *board = dev->board_ptr;
-	struct ni_private *devpriv = dev->private;
-	const struct comedi_cmd *cmd = &s->async->cmd;
-	int bits;
-	int i;
-	unsigned trigvar;
-	unsigned val;
-
-	if (dev->irq == 0) {
-		dev_err(dev->class_dev, "cannot run command without an irq\n");
-		return -EIO;
-	}
+	unsigned bits;
 
 	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
 
-	ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG);
+	bits =
+	  /* fast CPU interface--only eseries */
+	  /* ((slow CPU interface) ? 0 : AO_Fast_CPU) | */
+	  NISTC_AO_PERSONAL_BC_SRC_SEL  |
+	  0 /* (use_original_pulse ? 0 : NISTC_AO_PERSONAL_UPDATE_TIMEBASE) */ |
+	  /*
+	   * FIXME:  start setting following bit when appropriate.  Need to
+	   * determine whether board is E4 or E1.
+	   * FROM MHHDK:
+	   * if board is E4 or E1
+	   *   Set bit "NISTC_AO_PERSONAL_UPDATE_PW" to 0
+	   * else
+	   *   set it to 1
+	   */
+	  NISTC_AO_PERSONAL_UPDATE_PW   |
+	  /* FIXME:  when should we set following bit to zero? */
+	  NISTC_AO_PERSONAL_TMRDACWR_PW |
+	  (board->ao_fifo_depth ?
+	    NISTC_AO_PERSONAL_FIFO_ENA : NISTC_AO_PERSONAL_DMA_PIO_CTRL)
+	  ;
+#if 0
+	/*
+	 * FIXME:
+	 * add something like ".has_individual_dacs = 0" to ni_board_struct
+	 * since, as F Hess pointed out, not all in m series have singles.  not
+	 * sure if e-series all have duals...
+	 */
 
-	if (devpriv->is_6xxx) {
-		ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG,
-			       NI611X_AO_MISC_REG);
+	/*
+	 * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit for
+	 * 6281, verified with bus analyzer.
+	 */
+	if (devpriv->is_m_series)
+		bits |= NISTC_AO_PERSONAL_NUM_DAC;
+#endif
+	ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG);
 
-		bits = 0;
-		for (i = 0; i < cmd->chanlist_len; i++) {
-			int chan;
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
 
-			chan = CR_CHAN(cmd->chanlist[i]);
-			bits |= 1 << chan;
-			ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG);
-		}
-		ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG);
-	}
+static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
+				  const struct comedi_cmd *cmd)
+{
+	struct ni_private *devpriv = dev->private;
 
-	ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1);
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
 
+	/* sync */
 	if (cmd->stop_src == TRIG_NONE) {
 		devpriv->ao_mode1 |= NISTC_AO_MODE1_CONTINUOUS;
 		devpriv->ao_mode1 &= ~NISTC_AO_MODE1_TRIGGER_ONCE;
@@ -2958,177 +3012,351 @@
 	}
 	ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
 
-	val = devpriv->ao_trigger_select;
-	switch (cmd->start_src) {
-	case TRIG_INT:
-	case TRIG_NOW:
-		val &= ~(NISTC_AO_TRIG_START1_POLARITY |
-			 NISTC_AO_TRIG_START1_SEL_MASK);
-		val |= NISTC_AO_TRIG_START1_EDGE |
-		       NISTC_AO_TRIG_START1_SYNC;
-		break;
-	case TRIG_EXT:
-		val = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
-		if (cmd->start_arg & CR_INVERT) {
-			/* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
-			val |= NISTC_AO_TRIG_START1_POLARITY;
+	{
+		unsigned int trigsel = devpriv->ao_trigger_select;
+
+		switch (cmd->start_src) {
+		case TRIG_INT:
+		case TRIG_NOW:
+			trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY |
+				     NISTC_AO_TRIG_START1_SEL_MASK);
+			trigsel |= NISTC_AO_TRIG_START1_EDGE |
+				   NISTC_AO_TRIG_START1_SYNC;
+			break;
+		case TRIG_EXT:
+			trigsel = NISTC_AO_TRIG_START1_SEL(
+					CR_CHAN(cmd->start_arg) + 1);
+			if (cmd->start_arg & CR_INVERT)
+				/*
+				 * 0=active high, 1=active low.
+				 * see daq-stc 3-24 (p186)
+				 */
+				trigsel |= NISTC_AO_TRIG_START1_POLARITY;
+			if (cmd->start_arg & CR_EDGE)
+				/* 0=edge detection disabled, 1=enabled */
+				trigsel |= NISTC_AO_TRIG_START1_EDGE;
+			break;
+		default:
+			BUG();
+			break;
 		}
-		if (cmd->start_arg & CR_EDGE) {
-			/* 0=edge detection disabled, 1=enabled */
-			val |= NISTC_AO_TRIG_START1_EDGE;
-		}
+
+		devpriv->ao_trigger_select = trigsel;
 		ni_stc_writew(dev, devpriv->ao_trigger_select,
 			      NISTC_AO_TRIG_SEL_REG);
-		break;
-	default:
-		BUG();
-		break;
 	}
-	devpriv->ao_trigger_select = val;
-	ni_stc_writew(dev, devpriv->ao_trigger_select, NISTC_AO_TRIG_SEL_REG);
+	/* AO_Delayed_START1 = 0, we do not support delayed start...yet */
 
+	/* sync */
+	/* select DA_START1 as PFI6/AO_START1 when configured as an output */
 	devpriv->ao_mode3 &= ~NISTC_AO_MODE3_TRIG_LEN;
 	ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
 
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_counters(struct comedi_device *dev,
+				   const struct comedi_cmd *cmd)
+{
+	struct ni_private *devpriv = dev->private;
+	/* Not supporting 'waveform staging' or 'local buffer with pauses' */
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+	/*
+	 * This relies on ao_mode1/(Trigger_Once | Continuous) being set in
+	 * set_trigger above.  It is unclear whether we really need to re-write
+	 * this register with these values.  The mhddk examples for e-series
+	 * show writing this in both places, but the examples for m-series show
+	 * a single write in the set_counters function (here).
+	 */
 	ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
+
+	/* sync (upload number of buffer iterations -1) */
+	/* indicate that we want to use BC_Load_A_Register as the source */
 	devpriv->ao_mode2 &= ~NISTC_AO_MODE2_BC_INIT_LOAD_SRC;
 	ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
-	if (cmd->stop_src == TRIG_NONE)
-		ni_stc_writel(dev, 0xffffff, NISTC_AO_BC_LOADA_REG);
-	else
-		ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG);
+
+	/*
+	 * if the BC_TC interrupt is still issued in spite of UC, BC, UI
+	 * ignoring BC_TC, then we will need to find a way to ignore that
+	 * interrupt in continuous mode.
+	 */
+	ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG); /* iter once */
+
+	/* sync (issue command to load number of buffer iterations -1) */
 	ni_stc_writew(dev, NISTC_AO_CMD1_BC_LOAD, NISTC_AO_CMD1_REG);
+
+	/* sync (upload number of updates in buffer) */
+	/* indicate that we want to use UC_Load_A_Register as the source */
 	devpriv->ao_mode2 &= ~NISTC_AO_MODE2_UC_INIT_LOAD_SRC;
 	ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
-	switch (cmd->stop_src) {
-	case TRIG_COUNT:
+
+	/*
+	 * if a user specifies '0', this automatically assumes the entire 24bit
+	 * address space is available for the (multiple iterations of single
+	 * buffer) MISB.  Otherwise, stop_arg specifies the MISB length that
+	 * will be used, regardless of whether we are in continuous mode or not.
+	 * In continuous mode, the output will just iterate indefinitely over
+	 * the MISB.
+	 */
+	{
+		unsigned int stop_arg = cmd->stop_arg > 0 ?
+			(cmd->stop_arg & 0xffffff) : 0xffffff;
+
 		if (devpriv->is_m_series) {
-			/*  this is how the NI example code does it for m-series boards, verified correct with 6259 */
-			ni_stc_writel(dev, cmd->stop_arg - 1,
-				      NISTC_AO_UC_LOADA_REG);
+			/*
+			 * this is how the NI example code does it for m-series
+			 * boards, verified correct with 6259
+			 */
+			ni_stc_writel(dev, stop_arg - 1, NISTC_AO_UC_LOADA_REG);
+
+			/* sync (issue cmd to load number of updates in MISB) */
 			ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD,
 				      NISTC_AO_CMD1_REG);
 		} else {
-			ni_stc_writel(dev, cmd->stop_arg,
-				      NISTC_AO_UC_LOADA_REG);
+			ni_stc_writel(dev, stop_arg, NISTC_AO_UC_LOADA_REG);
+
+			/* sync (issue cmd to load number of updates in MISB) */
 			ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD,
 				      NISTC_AO_CMD1_REG);
-			ni_stc_writel(dev, cmd->stop_arg - 1,
-				      NISTC_AO_UC_LOADA_REG);
+
+			/*
+			 * sync (upload number of updates-1 in MISB)
+			 * --eseries only?
+			 */
+			ni_stc_writel(dev, stop_arg - 1, NISTC_AO_UC_LOADA_REG);
 		}
-		break;
-	case TRIG_NONE:
-		ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG);
-		ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG);
-		ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG);
-		break;
-	default:
-		ni_stc_writel(dev, 0, NISTC_AO_UC_LOADA_REG);
-		ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG);
-		ni_stc_writel(dev, cmd->stop_arg, NISTC_AO_UC_LOADA_REG);
 	}
 
-	devpriv->ao_mode1 &= ~(NISTC_AO_MODE1_UPDATE_SRC_MASK |
-			       NISTC_AO_MODE1_UI_SRC_MASK |
-			       NISTC_AO_MODE1_UPDATE_SRC_POLARITY |
-			       NISTC_AO_MODE1_UI_SRC_POLARITY);
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_update(struct comedi_device *dev,
+				 const struct comedi_cmd *cmd)
+{
+	struct ni_private *devpriv = dev->private;
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
+	/*
+	 * zero out these bit fields to be set below. Does an ao-reset do this
+	 * automatically?
+	 */
+	devpriv->ao_mode1 &= ~(
+	  NISTC_AO_MODE1_UI_SRC_MASK         |
+	  NISTC_AO_MODE1_UI_SRC_POLARITY     |
+	  NISTC_AO_MODE1_UPDATE_SRC_MASK     |
+	  NISTC_AO_MODE1_UPDATE_SRC_POLARITY
+	);
+
 	switch (cmd->scan_begin_src) {
 	case TRIG_TIMER:
-		devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
-		trigvar =
-		    ni_ns_to_timer(dev, cmd->scan_begin_arg,
-				   CMDF_ROUND_NEAREST);
-		ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
-		ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
-		ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
+		devpriv->ao_cmd2  &= ~NISTC_AO_CMD2_BC_GATE_ENA;
+
+		/*
+		 * NOTE: there are several other ways of configuring internal
+		 * updates, but we'll only support one for now:  using
+		 * AO_IN_TIMEBASE, w/o waveform staging, w/o a delay between
+		 * START1 and first update, and also w/o local buffer mode w/
+		 * pauses.
+		 */
+
+		/*
+		 * This is already done above:
+		 * devpriv->ao_mode1 &= ~(
+		 *   // set UPDATE_Source to UI_TC:
+		 *   NISTC_AO_MODE1_UPDATE_SRC_MASK |
+		 *   // set UPDATE_Source_Polarity to rising (required?)
+		 *   NISTC_AO_MODE1_UPDATE_SRC_POLARITY |
+		 *   // set UI_Source to AO_IN_TIMEBASE1:
+		 *   NISTC_AO_MODE1_UI_SRC_MASK     |
+		 *   // set UI_Source_Polarity to rising (required?)
+		 *   NISTC_AO_MODE1_UI_SRC_POLARITY
+		 * );
+		 */
+
+		/*
+		 * TODO:  use ao_ui_clock_source to allow all possible signals
+		 * to be routed to UI_Source_Select.  See tSTC.h for
+		 * eseries/ni67xx and tMSeries.h for mseries.
+		 */
+
+		{
+			unsigned trigvar = ni_ns_to_timer(dev,
+							  cmd->scan_begin_arg,
+							  CMDF_ROUND_NEAREST);
+
+			/*
+			 * Wait N TB3 ticks after the start trigger before
+			 * clocking(N must be >=2).
+			 */
+			/* following line: 2-1 per STC */
+			ni_stc_writel(dev, 1,           NISTC_AO_UI_LOADA_REG);
+			ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD,
+				      NISTC_AO_CMD1_REG);
+			/* following line: N-1 per STC */
+			ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+		}
 		break;
 	case TRIG_EXT:
-		devpriv->ao_mode1 |=
-		    NISTC_AO_MODE1_UPDATE_SRC(cmd->scan_begin_arg);
+		/* FIXME:  assert scan_begin_arg != 0, ret failure otherwise */
+		devpriv->ao_cmd2  |= NISTC_AO_CMD2_BC_GATE_ENA;
+		devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
+					CR_CHAN(cmd->scan_begin_arg));
 		if (cmd->scan_begin_arg & CR_INVERT)
 			devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
-		devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
 		break;
 	default:
 		BUG();
 		break;
 	}
+
 	ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
 	ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
 	devpriv->ao_mode2 &= ~(NISTC_AO_MODE2_UI_RELOAD_MODE(3) |
 			       NISTC_AO_MODE2_UI_INIT_LOAD_SRC);
 	ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
 
+	/* Configure DAQ-STC for Timed update mode */
+	devpriv->ao_cmd1 |= NISTC_AO_CMD1_DAC1_UPDATE_MODE |
+			    NISTC_AO_CMD1_DAC0_UPDATE_MODE;
+	/* We are not using UPDATE2-->don't have to set DACx_Source_Select */
+	ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG);
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_channels(struct comedi_device *dev,
+				   struct comedi_subdevice *s)
+{
+	struct ni_private *devpriv = dev->private;
+	const struct comedi_cmd *cmd = &s->async->cmd;
+	unsigned bits = 0;
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
+	if (devpriv->is_6xxx) {
+		unsigned int i;
+
+		bits = 0;
+		for (i = 0; i < cmd->chanlist_len; ++i) {
+			int chan = CR_CHAN(cmd->chanlist[i]);
+
+			bits |= 1 << chan;
+			ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG);
+		}
+		ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG);
+	}
+
+	ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1);
+
 	if (cmd->scan_end_arg > 1) {
 		devpriv->ao_mode1 |= NISTC_AO_MODE1_MULTI_CHAN;
-		ni_stc_writew(dev,
-			      NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1) |
-			      NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ,
-			      NISTC_AO_OUT_CTRL_REG);
-	} else {
-		unsigned bits;
+		bits = NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1)
+				 | NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ;
 
+	} else {
 		devpriv->ao_mode1 &= ~NISTC_AO_MODE1_MULTI_CHAN;
 		bits = NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ;
-		if (devpriv->is_m_series || devpriv->is_6xxx) {
+		if (devpriv->is_m_series | devpriv->is_6xxx)
 			bits |= NISTC_AO_OUT_CTRL_CHANS(0);
-		} else {
-			bits |=
-			    NISTC_AO_OUT_CTRL_CHANS(CR_CHAN(cmd->chanlist[0]));
-		}
-		ni_stc_writew(dev, bits, NISTC_AO_OUT_CTRL_REG);
+		else
+			bits |= NISTC_AO_OUT_CTRL_CHANS(
+					CR_CHAN(cmd->chanlist[0]));
 	}
-	ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
 
-	ni_stc_writew(dev, NISTC_AO_CMD1_DAC1_UPDATE_MODE |
-			   NISTC_AO_CMD1_DAC0_UPDATE_MODE,
-		      NISTC_AO_CMD1_REG);
+	ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
+	ni_stc_writew(dev, bits,              NISTC_AO_OUT_CTRL_REG);
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_stop_conditions(struct comedi_device *dev,
+					  const struct comedi_cmd *cmd)
+{
+	struct ni_private *devpriv = dev->private;
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
 
 	devpriv->ao_mode3 |= NISTC_AO_MODE3_STOP_ON_OVERRUN_ERR;
 	ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
 
+	/*
+	 * Since we are not supporting waveform staging, we ignore these errors:
+	 * NISTC_AO_MODE3_STOP_ON_BC_TC_ERR,
+	 * NISTC_AO_MODE3_STOP_ON_BC_TC_TRIG_ERR
+	 */
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_fifo_mode(struct comedi_device *dev)
+{
+	struct ni_private *devpriv = dev->private;
+
+	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
 	devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_MODE_MASK;
 #ifdef PCIDMA
 	devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF_F;
 #else
 	devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF;
 #endif
+	/* NOTE:  this is where use_onboard_memory=True would be implemented */
 	devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_REXMIT_ENA;
 	ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
 
-	bits = NISTC_AO_PERSONAL_BC_SRC_SEL |
-	       NISTC_AO_PERSONAL_UPDATE_PW |
-	       NISTC_AO_PERSONAL_TMRDACWR_PW;
-	if (board->ao_fifo_depth)
-		bits |= NISTC_AO_PERSONAL_FIFO_ENA;
-	else
-		bits |= NISTC_AO_PERSONAL_DMA_PIO_CTRL;
-#if 0
-	/*
-	 * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit
-	 * for 6281, verified with bus analyzer.
-	 */
-	if (devpriv->is_m_series)
-		bits |= NISTC_AO_PERSONAL_NUM_DAC;
-#endif
-	ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG);
-	/*  enable sending of ao dma requests */
+	/* enable sending of ao fifo requests (dma request) */
 	ni_stc_writew(dev, NISTC_AO_START_AOFREQ_ENA, NISTC_AO_START_SEL_REG);
 
 	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
 
-	if (cmd->stop_src == TRIG_COUNT) {
-		ni_stc_writew(dev, NISTC_INTB_ACK_AO_BC_TC,
-			      NISTC_INTB_ACK_REG);
+	/* we are not supporting boards with virtual fifos */
+}
+
+static void ni_ao_cmd_set_interrupts(struct comedi_device *dev,
+				     struct comedi_subdevice *s)
+{
+	if (s->async->cmd.stop_src == TRIG_COUNT)
 		ni_set_bits(dev, NISTC_INTB_ENA_REG,
 			    NISTC_INTB_ENA_AO_BC_TC, 1);
-	}
 
 	s->async->inttrig = ni_ao_inttrig;
+}
 
+static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+	struct ni_private *devpriv = dev->private;
+	const struct comedi_cmd *cmd = &s->async->cmd;
+
+	if (dev->irq == 0) {
+		dev_err(dev->class_dev, "cannot run command without an irq");
+		return -EIO;
+	}
+
+	/* ni_ao_reset should have already been done */
+	ni_ao_cmd_personalize(dev, cmd);
+	/* clearing fifo and preload happens elsewhere */
+
+	ni_ao_cmd_set_trigger(dev, cmd);
+	ni_ao_cmd_set_counters(dev, cmd);
+	ni_ao_cmd_set_update(dev, cmd);
+	ni_ao_cmd_set_channels(dev, s);
+	ni_ao_cmd_set_stop_conditions(dev, cmd);
+	ni_ao_cmd_set_fifo_mode(dev);
+	ni_cmd_set_mite_transfer(devpriv->ao_mite_ring, s, cmd, 0x00ffffff);
+	ni_ao_cmd_set_interrupts(dev, s);
+
+	/*
+	 * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be
+	 * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA
+	 * must be setup and initially written to before arm/start happen.
+	 */
 	return 0;
 }
 
+/* end ni_ao_cmd */
+
 static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
 			 struct comedi_cmd *cmd)
 {
@@ -3187,11 +3415,7 @@
 	err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
 	err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
 					   cmd->chanlist_len);
-
-	if (cmd->stop_src == TRIG_COUNT)
-		err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
-	else	/* TRIG_NONE */
-		err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+	err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
 
 	if (err)
 		return 3;
@@ -3214,48 +3438,70 @@
 
 static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
 {
+	/* See 3.6.1.2 "Resetting", of DAQ-STC Technical Reference Manual */
+
+	/*
+	 * In the following, the "--sync" comments are meant to denote
+	 * asynchronous boundaries for setting the registers as described in the
+	 * DAQ-STC mostly in the order also described in the DAQ-STC.
+	 */
+
 	struct ni_private *devpriv = dev->private;
 
 	ni_release_ao_mite_channel(dev);
 
+	/* --sync (reset AO) */
+	if (devpriv->is_m_series)
+		/* following example in mhddk for m-series */
+		ni_stc_writew(dev, NISTC_RESET_AO, NISTC_RESET_REG);
+
+	/*--sync (start config) */
 	ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
+	/*--sync (Disarm) */
 	ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG);
-	ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0);
-	ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG);
-	ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG);
-	ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL |
-			   NISTC_AO_PERSONAL_UPDATE_PW |
-			   NISTC_AO_PERSONAL_TMRDACWR_PW,
-		      NISTC_AO_PERSONAL_REG);
-	ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG);
-	ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG);
-	devpriv->ao_cmd1 = 0;
-	ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG);
-	devpriv->ao_cmd2 = 0;
-	ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
+
+	/*
+	 * --sync
+	 * (clear bunch of registers--mseries mhddk examples do not include
+	 * this)
+	 */
+	devpriv->ao_cmd1  = 0;
+	devpriv->ao_cmd2  = 0;
 	devpriv->ao_mode1 = 0;
-	ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
 	devpriv->ao_mode2 = 0;
-	ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
 	if (devpriv->is_m_series)
 		devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
 	else
 		devpriv->ao_mode3 = 0;
-	ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
 	devpriv->ao_trigger_select = 0;
-	ni_stc_writew(dev, devpriv->ao_trigger_select,
-		      NISTC_AO_TRIG_SEL_REG);
-	if (devpriv->is_6xxx) {
-		unsigned immediate_bits = 0;
-		unsigned i;
 
-		for (i = 0; i < s->n_chan; ++i)
-			immediate_bits |= 1 << i;
-		ni_ao_win_outw(dev, immediate_bits, NI671X_AO_IMMEDIATE_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_CMD2_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_MODE1_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_MODE2_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG);
+	ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG);
+	ni_stc_writew(dev, 0, NISTC_AO_TRIG_SEL_REG);
+
+	/*--sync (disable interrupts) */
+	ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0);
+
+	/*--sync (ack) */
+	ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG);
+	ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG);
+
+	/*--not in DAQ-STC.  which doc? */
+	if (devpriv->is_6xxx) {
+		ni_ao_win_outw(dev, (1u << s->n_chan) - 1u,
+			       NI671X_AO_IMMEDIATE_REG);
 		ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG,
 			       NI611X_AO_MISC_REG);
 	}
 	ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+	/*--end */
 
 	return 0;
 }
@@ -3381,7 +3627,9 @@
 	err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
 	err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
 					   cmd->chanlist_len);
-	err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+	err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+					    s->async->prealloc_bufsz /
+					    comedi_bytes_per_scan(s));
 
 	if (err)
 		return 3;
@@ -3458,6 +3706,7 @@
 
 static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 {
+	struct ni_private *devpriv = dev->private;
 	const struct comedi_cmd *cmd = &s->async->cmd;
 	unsigned cdo_mode_bits;
 	int retval;
@@ -3482,6 +3731,10 @@
 	if (retval < 0)
 		return retval;
 
+	ni_cmd_set_mite_transfer(devpriv->cdo_mite_ring, s, cmd,
+				 s->async->prealloc_bufsz /
+				 comedi_bytes_per_scan(s));
+
 	s->async->inttrig = ni_cdo_inttrig;
 
 	return 0;
@@ -5024,7 +5277,6 @@
 	unsigned long flags;
 #ifdef PCIDMA
 	struct ni_private *devpriv = dev->private;
-	struct mite_struct *mite = devpriv->mite;
 #endif
 
 	if (!dev->attached)
@@ -5036,8 +5288,7 @@
 	a_status = ni_stc_readw(dev, NISTC_AI_STATUS1_REG);
 	b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
 #ifdef PCIDMA
-	if (mite) {
-		struct ni_private *devpriv = dev->private;
+	if (devpriv->mite) {
 		unsigned long flags_too;
 
 		spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
@@ -5053,7 +5304,7 @@
 			ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
 			if (ao_mite_status & CHSR_LINKC)
 				writel(CHOR_CLRLC,
-				       mite->mite_io_addr +
+				       devpriv->mite->mite_io_addr +
 				       MITE_CHOR(devpriv->
 						 ao_mite_chan->channel));
 		}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index ac79099..67b9f22 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -525,7 +525,7 @@
 	switch (flags & CMDF_ROUND_MASK) {
 	case CMDF_ROUND_NEAREST:
 	default:
-		divider = (*nanosec + base / 2) / base;
+		divider = DIV_ROUND_CLOSEST(*nanosec, base);
 		break;
 	case CMDF_ROUND_DOWN:
 		divider = (*nanosec) / base;
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 30a5a75..231e37d 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -26,7 +26,8 @@
   PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
   PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
   PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
-  PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
+  PCI-6225, PXI-6225, PCI-6229, PCI-6250,
+  PCI-6251, PXI-6251, PCIe-6251, PXIe-6251,
   PCI-6254, PCI-6259, PCIe-6259,
   PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
   PCI-6711, PXI-6711, PCI-6713, PXI-6713,
@@ -193,6 +194,7 @@
 	BOARD_PCI6229,
 	BOARD_PCI6250,
 	BOARD_PCI6251,
+	BOARD_PXI6251,
 	BOARD_PCIE6251,
 	BOARD_PXIE6251,
 	BOARD_PCI6254,
@@ -811,6 +813,21 @@
 		.ao_speed	= 350,
 		.caldac		= { caldac_none },
 	},
+	[BOARD_PXI6251] = {
+		.name		= "pxi-6251",
+		.n_adchan	= 16,
+		.ai_maxdata	= 0xffff,
+		.ai_fifo_depth	= 4095,
+		.gainlkup	= ai_gain_628x,
+		.ai_speed	= 800,
+		.n_aochan	= 2,
+		.ao_maxdata	= 0xffff,
+		.ao_fifo_depth	= 8191,
+		.ao_range_table	= &range_ni_M_625x_ao,
+		.reg_type	= ni_reg_625x,
+		.ao_speed	= 350,
+		.caldac		= { caldac_none },
+	},
 	[BOARD_PCIE6251] = {
 		.name		= "pcie-6251",
 		.n_adchan	= 16,
@@ -1290,6 +1307,7 @@
 	{ PCI_VDEVICE(NI, 0x71bc), BOARD_PCI6221_37PIN },
 	{ PCI_VDEVICE(NI, 0x717d), BOARD_PCIE6251 },
 	{ PCI_VDEVICE(NI, 0x72e8), BOARD_PXIE6251 },
+	{ PCI_VDEVICE(NI, 0x70ad), BOARD_PXI6251 },
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, ni_pcimio_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 437f723..823e479 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -92,7 +92,7 @@
 	unsigned long flags;
 	int ret = 0;
 
-	if (trig_num != cmd->start_src)
+	if (trig_num != cmd->start_arg)
 		return -EINVAL;
 
 	spin_lock_irqsave(&counter->lock, flags);
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
deleted file mode 100644
index 3bbe9e1..0000000
--- a/drivers/staging/dgap/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DGAP
-       tristate "Digi EPCA PCI products"
-       default n
-       depends on TTY && HAS_IOMEM
-       ---help---
-       Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
deleted file mode 100644
index 0063d04..0000000
--- a/drivers/staging/dgap/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_DGAP) += dgap.o
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
deleted file mode 100644
index bad3551..0000000
--- a/drivers/staging/dgap/dgap.c
+++ /dev/null
@@ -1,7079 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- *	Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.  See the GNU General Public License for more details.
- *
- */
-
-/*
- *      In the original out of kernel Digi dgap driver, firmware
- *      loading was done via user land to driver handshaking.
- *
- *      For cards that support a concentrator (port expander),
- *      I believe the concentrator its self told the card which
- *      concentrator is actually attached and then that info
- *      was used to tell user land which concentrator firmware
- *      image was to be downloaded. I think even the BIOS or
- *      FEP images required could change with the connection
- *      of a particular concentrator.
- *
- *      Since I have no access to any of these cards or
- *      concentrators, I cannot put the correct concentrator
- *      firmware file names into the firmware_info structure
- *      as is now done for the BIOS and FEP images.
- *
- *      I think, but am not certain, that the cards supporting
- *      concentrators will function without them. So support
- *      of these cards has been left in this driver.
- *
- *      In order to fully support those cards, they would
- *      either have to be acquired for dissection or maybe
- *      Digi International could provide some assistance.
- */
-#undef DIGI_CONCENTRATORS_SUPPORTED
-
-#define pr_fmt(fmt) "dgap: " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h>	/* For udelay */
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/sched.h>
-
-#include <linux/interrupt.h>	/* For tasklet and interrupt structs/defines */
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h>		/* For read[bwl]/write[bwl] */
-
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/firmware.h>
-
-#include "dgap.h"
-
-/*
- * File operations permitted on Control/Management major.
- */
-static const struct file_operations dgap_board_fops = {
-	.owner	= THIS_MODULE,
-};
-
-static uint dgap_numboards;
-static struct board_t *dgap_board[MAXBOARDS];
-static ulong dgap_poll_counter;
-static int dgap_driver_state = DRIVER_INITIALIZED;
-static int dgap_poll_tick = 20;	/* Poll interval - 20 ms */
-
-static struct class *dgap_class;
-
-static uint dgap_count = 500;
-
-/*
- * Poller stuff
- */
-static DEFINE_SPINLOCK(dgap_poll_lock);	/* Poll scheduling lock */
-static ulong dgap_poll_time;		/* Time of next poll */
-static uint dgap_poll_stop;		/* Used to tell poller to stop */
-static struct timer_list dgap_poll_timer;
-
-/*
-     SUPPORTED PRODUCTS
-
-     Card Model               Number of Ports      Interface
-     ----------------------------------------------------------------
-     Acceleport Xem           4 - 64              (EIA232 & EIA422)
-     Acceleport Xr            4 & 8               (EIA232)
-     Acceleport Xr 920        4 & 8               (EIA232)
-     Acceleport C/X           8 - 128             (EIA232)
-     Acceleport EPC/X         8 - 224             (EIA232)
-     Acceleport Xr/422        4 & 8               (EIA422)
-     Acceleport 2r/920        2                   (EIA232)
-     Acceleport 4r/920        4                   (EIA232)
-     Acceleport 8r/920        8                   (EIA232)
-
-     IBM 8-Port Asynchronous PCI Adapter          (EIA232)
-     IBM 128-Port Asynchronous PCI Adapter        (EIA232 & EIA422)
-*/
-
-static struct pci_device_id dgap_pci_tbl[] = {
-	{ DIGI_VID, PCI_DEV_XEM_DID,      PCI_ANY_ID, PCI_ANY_ID, 0, 0,  0 },
-	{ DIGI_VID, PCI_DEV_CX_DID,       PCI_ANY_ID, PCI_ANY_ID, 0, 0,  1 },
-	{ DIGI_VID, PCI_DEV_CX_IBM_DID,   PCI_ANY_ID, PCI_ANY_ID, 0, 0,  2 },
-	{ DIGI_VID, PCI_DEV_EPCJ_DID,     PCI_ANY_ID, PCI_ANY_ID, 0, 0,  3 },
-	{ DIGI_VID, PCI_DEV_920_2_DID,    PCI_ANY_ID, PCI_ANY_ID, 0, 0,  4 },
-	{ DIGI_VID, PCI_DEV_920_4_DID,    PCI_ANY_ID, PCI_ANY_ID, 0, 0,  5 },
-	{ DIGI_VID, PCI_DEV_920_8_DID,    PCI_ANY_ID, PCI_ANY_ID, 0, 0,  6 },
-	{ DIGI_VID, PCI_DEV_XR_DID,       PCI_ANY_ID, PCI_ANY_ID, 0, 0,  7 },
-	{ DIGI_VID, PCI_DEV_XRJ_DID,      PCI_ANY_ID, PCI_ANY_ID, 0, 0,  8 },
-	{ DIGI_VID, PCI_DEV_XR_422_DID,   PCI_ANY_ID, PCI_ANY_ID, 0, 0,  9 },
-	{ DIGI_VID, PCI_DEV_XR_IBM_DID,   PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
-	{ DIGI_VID, PCI_DEV_XR_SAIP_DID,  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
-	{ DIGI_VID, PCI_DEV_XR_BULL_DID,  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
-	{ DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
-	{ DIGI_VID, PCI_DEV_XEM_HP_DID,   PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
-	{0,}					/* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
-
-/*
- * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
- */
-struct board_id {
-	uint config_type;
-	u8 *name;
-	uint maxports;
-	uint dpatype;
-};
-
-static struct board_id dgap_ids[] = {
-	{PPCM,        PCI_DEV_XEM_NAME,     64, (T_PCXM | T_PCLITE | T_PCIBUS)},
-	{PCX,         PCI_DEV_CX_NAME,     128, (T_CX | T_PCIBUS)            },
-	{PCX,         PCI_DEV_CX_IBM_NAME, 128, (T_CX | T_PCIBUS)            },
-	{PEPC,        PCI_DEV_EPCJ_NAME,   224, (T_EPC | T_PCIBUS)           },
-	{APORT2_920P, PCI_DEV_920_2_NAME,    2, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{APORT4_920P, PCI_DEV_920_4_NAME,    4, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{APORT8_920P, PCI_DEV_920_8_NAME,    8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PAPORT8,     PCI_DEV_XR_NAME,       8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PAPORT8,     PCI_DEV_XRJ_NAME,      8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PAPORT8,     PCI_DEV_XR_422_NAME,   8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PAPORT8,     PCI_DEV_XR_IBM_NAME,   8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PAPORT8,     PCI_DEV_XR_SAIP_NAME,  8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PAPORT8,     PCI_DEV_XR_BULL_NAME,  8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
-	{PPCM,        PCI_DEV_XEM_HP_NAME,  64, (T_PCXM | T_PCLITE | T_PCIBUS)},
-	{0,}						/* 0 terminated list. */
-};
-
-struct firmware_info {
-	u8 *conf_name;  /* dgap.conf */
-	u8 *bios_name;	/* BIOS filename */
-	u8 *fep_name;	/* FEP  filename */
-	u8 *con_name;	/* Concentrator filename  FIXME*/
-	int num;        /* sequence number */
-};
-
-/*
- * Firmware - BIOS, FEP, and CONC filenames
- */
-static struct firmware_info fw_info[] = {
-	{ "dgap/dgap.conf", "dgap/sxbios.bin",  "dgap/sxfep.bin",  NULL, 0 },
-	{ "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 1 },
-	{ "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 2 },
-	{ "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", NULL, 3 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 4 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 5 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 6 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 7 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 8 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 9 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 10 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 11 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 12 },
-	{ "dgap/dgap.conf", "dgap/xrbios.bin",  "dgap/xrfep.bin",  NULL, 13 },
-	{ "dgap/dgap.conf", "dgap/sxbios.bin",  "dgap/sxfep.bin",  NULL, 14 },
-	{NULL,}
-};
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgap_digi_init = {
-	.digi_flags =	DIGI_COOK,	/* Flags			*/
-	.digi_maxcps =	100,		/* Max CPS			*/
-	.digi_maxchar =	50,		/* Max chars in print queue	*/
-	.digi_bufsize =	100,		/* Printer buffer size		*/
-	.digi_onlen =	4,		/* size of printer on string	*/
-	.digi_offlen =	4,		/* size of printer off string	*/
-	.digi_onstr =	"\033[5i",	/* ANSI printer on string ]	*/
-	.digi_offstr =	"\033[4i",	/* ANSI printer off string ]	*/
-	.digi_term =	"ansi"		/* default terminal type	*/
-};
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-
-static struct ktermios dgap_default_termios = {
-	.c_iflag =	(DEFAULT_IFLAGS),	/* iflags */
-	.c_oflag =	(DEFAULT_OFLAGS),	/* oflags */
-	.c_cflag =	(DEFAULT_CFLAGS),	/* cflags */
-	.c_lflag =	(DEFAULT_LFLAGS),	/* lflags */
-	.c_cc =		INIT_C_CC,
-	.c_line =	0,
-};
-
-/*
- * Our needed internal static variables from dgap_parse.c
- */
-static struct cnode dgap_head;
-#define MAXCWORD 200
-static char dgap_cword[MAXCWORD];
-
-struct toklist {
-	int token;
-	char *string;
-};
-
-static struct toklist dgap_brdtype[] = {
-	{ PCX,		"Digi_AccelePort_C/X_PCI" },
-	{ PEPC,		"Digi_AccelePort_EPC/X_PCI" },
-	{ PPCM,		"Digi_AccelePort_Xem_PCI" },
-	{ APORT2_920P,	"Digi_AccelePort_2r_920_PCI" },
-	{ APORT4_920P,	"Digi_AccelePort_4r_920_PCI" },
-	{ APORT8_920P,	"Digi_AccelePort_8r_920_PCI" },
-	{ PAPORT4,	"Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
-	{ PAPORT8,	"Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
-	{ 0, NULL }
-};
-
-static struct toklist dgap_tlist[] = {
-	{ BEGIN,	"config_begin" },
-	{ END,		"config_end" },
-	{ BOARD,	"board"	},
-	{ PCIINFO,	"pciinfo" },
-	{ LINE,		"line" },
-	{ CONC,		"conc" },
-	{ CONC,		"concentrator" },
-	{ CX,		"cx" },
-	{ CX,		"ccon" },
-	{ EPC,		"epccon" },
-	{ EPC,		"epc" },
-	{ MOD,		"module" },
-	{ ID,		"id" },
-	{ STARTO,	"start" },
-	{ SPEED,	"speed"	},
-	{ CABLE,	"cable"	},
-	{ CONNECT,	"connect" },
-	{ METHOD,	"method" },
-	{ STATUS,	"status" },
-	{ CUSTOM,	"Custom" },
-	{ BASIC,	"Basic"	},
-	{ MEM,		"mem" },
-	{ MEM,		"memory" },
-	{ PORTS,	"ports"	},
-	{ MODEM,	"modem"	},
-	{ NPORTS,	"nports" },
-	{ TTYN,		"ttyname" },
-	{ CU,		"cuname" },
-	{ PRINT,	"prname" },
-	{ CMAJOR,	"major"	 },
-	{ ALTPIN,	"altpin" },
-	{ USEINTR,	"useintr" },
-	{ TTSIZ,	"ttysize" },
-	{ CHSIZ,	"chsize" },
-	{ BSSIZ,	"boardsize" },
-	{ UNTSIZ,	"schedsize" },
-	{ F2SIZ,	"f2200size" },
-	{ VPSIZ,	"vpixsize" },
-	{ 0,		NULL }
-};
-
-/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
- */
-static char *dgap_getword(char **in)
-{
-	char *ret_ptr = *in;
-
-	char *ptr = strpbrk(*in, " \t\n");
-
-	/* If no word found, return null */
-	if (!ptr)
-		return NULL;
-
-	/* Mark new location for our buffer */
-	*ptr = '\0';
-	*in = ptr + 1;
-
-	/* Eat any extra spaces/tabs/newlines that might be present */
-	while (*in && **in && ((**in == ' ') ||
-			       (**in == '\t') ||
-			       (**in == '\n'))) {
-		**in = '\0';
-		*in = *in + 1;
-	}
-
-	return ret_ptr;
-}
-
-
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in)
-{
-	char *w;
-	struct toklist *t;
-
-	if (strstr(dgap_cword, "board")) {
-		w = dgap_getword(in);
-		if (!w)
-			return 0;
-		snprintf(dgap_cword, MAXCWORD, "%s", w);
-		for (t = dgap_brdtype; t->token != 0; t++) {
-			if (!strcmp(w, t->string))
-				return t->token;
-		}
-	} else {
-		while ((w = dgap_getword(in))) {
-			snprintf(dgap_cword, MAXCWORD, "%s", w);
-			for (t = dgap_tlist; t->token != 0; t++) {
-				if (!strcmp(w, t->string))
-					return t->token;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
-	switch (p->type) {
-	case LNODE:
-		if (p->u.line.v_speed == 0) {
-			pr_err("line speed not specified");
-			return 1;
-		}
-		return 0;
-
-	case CNODE:
-		if (p->u.conc.v_speed == 0) {
-			pr_err("concentrator line speed not specified");
-			return 1;
-		}
-		if (p->u.conc.v_nport == 0) {
-			pr_err("number of ports on concentrator not specified");
-			return 1;
-		}
-		if (p->u.conc.v_id == 0) {
-			pr_err("concentrator id letter not specified");
-			return 1;
-		}
-		return 0;
-
-	case MNODE:
-		if (p->u.module.v_nport == 0) {
-			pr_err("number of ports on EBI module not specified");
-			return 1;
-		}
-		if (p->u.module.v_id == 0) {
-			pr_err("EBI module id letter not specified");
-			return 1;
-		}
-		return 0;
-	}
-	return 0;
-}
-
-/*
- * Given a board pointer, returns whether we should use interrupts or not.
- */
-static uint dgap_config_get_useintr(struct board_t *bd)
-{
-	struct cnode *p;
-
-	if (!bd)
-		return 0;
-
-	for (p = bd->bd_config; p; p = p->next) {
-		if (p->type == INTRNODE) {
-			/*
-			 * check for pcxr types.
-			 */
-			return p->u.useintr;
-		}
-	}
-
-	/* If not found, then don't turn on interrupts. */
-	return 0;
-}
-
-/*
- * Given a board pointer, returns whether we turn on altpin or not.
- */
-static uint dgap_config_get_altpin(struct board_t *bd)
-{
-	struct cnode *p;
-
-	if (!bd)
-		return 0;
-
-	for (p = bd->bd_config; p; p = p->next) {
-		if (p->type == ANODE) {
-			/*
-			 * check for pcxr types.
-			 */
-			return p->u.altpin;
-		}
-	}
-
-	/* If not found, then don't turn on interrupts. */
-	return 0;
-}
-
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-static struct cnode *dgap_find_config(int type, int bus, int slot)
-{
-	struct cnode *p, *prev, *prev2, *found;
-
-	p = &dgap_head;
-
-	while (p->next) {
-		prev = p;
-		p = p->next;
-
-		if (p->type != BNODE)
-			continue;
-
-		if (p->u.board.type != type)
-			continue;
-
-		if (p->u.board.v_pcibus &&
-		    p->u.board.pcibus != bus)
-			continue;
-
-		if (p->u.board.v_pcislot &&
-		    p->u.board.pcislot != slot)
-			continue;
-
-		found = p;
-		/*
-		 * Keep walking thru the list till we
-		 * find the next board.
-		 */
-		while (p->next) {
-			prev2 = p;
-			p = p->next;
-
-			if (p->type != BNODE)
-				continue;
-
-			/*
-			 * Mark the end of our 1 board
-			 * chain of configs.
-			 */
-			prev2->next = NULL;
-
-			/*
-			 * Link the "next" board to the
-			 * previous board, effectively
-			 * "unlinking" our board from
-			 * the main config.
-			 */
-			prev->next = p;
-
-			return found;
-		}
-		/*
-		 * It must be the last board in the list.
-		 */
-		prev->next = NULL;
-		return found;
-	}
-	return NULL;
-}
-
-/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
- */
-static uint dgap_config_get_num_prts(struct board_t *bd)
-{
-	int count = 0;
-	struct cnode *p;
-
-	if (!bd)
-		return 0;
-
-	for (p = bd->bd_config; p; p = p->next) {
-		switch (p->type) {
-		case BNODE:
-			/*
-			 * check for pcxr types.
-			 */
-			if (p->u.board.type > EPCFE)
-				count += p->u.board.nport;
-			break;
-		case CNODE:
-			count += p->u.conc.nport;
-			break;
-		case MNODE:
-			count += p->u.module.nport;
-			break;
-		}
-	}
-	return count;
-}
-
-static char *dgap_create_config_string(struct board_t *bd, char *string)
-{
-	char *ptr = string;
-	struct cnode *p;
-	struct cnode *q;
-	int speed;
-
-	if (!bd) {
-		*ptr = 0xff;
-		return string;
-	}
-
-	for (p = bd->bd_config; p; p = p->next) {
-		switch (p->type) {
-		case LNODE:
-			*ptr = '\0';
-			ptr++;
-			*ptr = p->u.line.speed;
-			ptr++;
-			break;
-		case CNODE:
-			/*
-			 * Because the EPC/con concentrators can have EM modules
-			 * hanging off of them, we have to walk ahead in the
-			 * list and keep adding the number of ports on each EM
-			 * to the config. UGH!
-			 */
-			speed = p->u.conc.speed;
-			q = p->next;
-			if (q && (q->type == MNODE)) {
-				*ptr = (p->u.conc.nport + 0x80);
-				ptr++;
-				p = q;
-				while (q->next && (q->next->type) == MNODE) {
-					*ptr = (q->u.module.nport + 0x80);
-					ptr++;
-					p = q;
-					q = q->next;
-				}
-				*ptr = q->u.module.nport;
-				ptr++;
-			} else {
-				*ptr = p->u.conc.nport;
-				ptr++;
-			}
-
-			*ptr = speed;
-			ptr++;
-			break;
-		}
-	}
-
-	*ptr = 0xff;
-	return string;
-}
-
-/*
- * Parse a configuration file read into memory as a string.
- */
-static int dgap_parsefile(char **in)
-{
-	struct cnode *p, *brd, *line, *conc;
-	int rc;
-	char *s;
-	int linecnt = 0;
-
-	p = &dgap_head;
-	brd = line = conc = NULL;
-
-	/* perhaps we are adding to an existing list? */
-	while (p->next)
-		p = p->next;
-
-	/* file must start with a BEGIN */
-	while ((rc = dgap_gettok(in)) != BEGIN) {
-		if (rc == 0) {
-			pr_err("unexpected EOF");
-			return -1;
-		}
-	}
-
-	for (; ;) {
-		int board_type = 0;
-		int conc_type = 0;
-		int module_type = 0;
-
-		rc = dgap_gettok(in);
-		if (rc == 0) {
-			pr_err("unexpected EOF");
-			return -1;
-		}
-
-		switch (rc) {
-		case BEGIN:	/* should only be 1 begin */
-			pr_err("unexpected config_begin\n");
-			return -1;
-
-		case END:
-			return 0;
-
-		case BOARD:	/* board info */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-
-			p->type = BNODE;
-			p->u.board.status = kstrdup("No", GFP_KERNEL);
-			line = conc = NULL;
-			brd = p;
-			linecnt = -1;
-
-			board_type = dgap_gettok(in);
-			if (board_type == 0) {
-				pr_err("board !!type not specified");
-				return -1;
-			}
-
-			p->u.board.type = board_type;
-
-			break;
-
-		case MEM:	/* memory address */
-			if (p->type != BNODE) {
-				pr_err("memory address only valid for boards");
-				return -1;
-			}
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			kfree(p->u.board.addrstr);
-			p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
-			if (kstrtoul(s, 0, &p->u.board.addr)) {
-				pr_err("bad number for memory address");
-				return -1;
-			}
-			p->u.board.v_addr = 1;
-			break;
-
-		case PCIINFO:	/* pci information */
-			if (p->type != BNODE) {
-				pr_err("memory address only valid for boards");
-				return -1;
-			}
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			kfree(p->u.board.pcibusstr);
-			p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
-			if (kstrtoul(s, 0, &p->u.board.pcibus)) {
-				pr_err("bad number for pci bus");
-				return -1;
-			}
-			p->u.board.v_pcibus = 1;
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			kfree(p->u.board.pcislotstr);
-			p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
-			if (kstrtoul(s, 0, &p->u.board.pcislot)) {
-				pr_err("bad number for pci slot");
-				return -1;
-			}
-			p->u.board.v_pcislot = 1;
-			break;
-
-		case METHOD:
-			if (p->type != BNODE) {
-				pr_err("install method only valid for boards");
-				return -1;
-			}
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			kfree(p->u.board.method);
-			p->u.board.method = kstrdup(s, GFP_KERNEL);
-			p->u.board.v_method = 1;
-			break;
-
-		case STATUS:
-			if (p->type != BNODE) {
-				pr_err("config status only valid for boards");
-				return -1;
-			}
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			kfree(p->u.board.status);
-			p->u.board.status = kstrdup(s, GFP_KERNEL);
-			break;
-
-		case NPORTS:	/* number of ports */
-			if (p->type == BNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.board.nport)) {
-					pr_err("bad number for number of ports");
-					return -1;
-				}
-				p->u.board.v_nport = 1;
-			} else if (p->type == CNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.conc.nport)) {
-					pr_err("bad number for number of ports");
-					return -1;
-				}
-				p->u.conc.v_nport = 1;
-			} else if (p->type == MNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.module.nport)) {
-					pr_err("bad number for number of ports");
-					return -1;
-				}
-				p->u.module.v_nport = 1;
-			} else {
-				pr_err("nports only valid for concentrators or modules");
-				return -1;
-			}
-			break;
-
-		case ID:	/* letter ID used in tty name */
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			kfree(p->u.board.status);
-			p->u.board.status = kstrdup(s, GFP_KERNEL);
-
-			if (p->type == CNODE) {
-				kfree(p->u.conc.id);
-				p->u.conc.id = kstrdup(s, GFP_KERNEL);
-				p->u.conc.v_id = 1;
-			} else if (p->type == MNODE) {
-				kfree(p->u.module.id);
-				p->u.module.id = kstrdup(s, GFP_KERNEL);
-				p->u.module.v_id = 1;
-			} else {
-				pr_err("id only valid for concentrators or modules");
-				return -1;
-			}
-			break;
-
-		case STARTO:	/* start offset of ID */
-			if (p->type == BNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.board.start)) {
-					pr_err("bad number for start of tty count");
-					return -1;
-				}
-				p->u.board.v_start = 1;
-			} else if (p->type == CNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.conc.start)) {
-					pr_err("bad number for start of tty count");
-					return -1;
-				}
-				p->u.conc.v_start = 1;
-			} else if (p->type == MNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.module.start)) {
-					pr_err("bad number for start of tty count");
-					return -1;
-				}
-				p->u.module.v_start = 1;
-			} else {
-				pr_err("start only valid for concentrators or modules");
-				return -1;
-			}
-			break;
-
-		case TTYN:	/* tty name prefix */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = TNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpeced end of file");
-				return -1;
-			}
-			p->u.ttyname = kstrdup(s, GFP_KERNEL);
-			if (!p->u.ttyname)
-				return -1;
-
-			break;
-
-		case CU:	/* cu name prefix */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = CUNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpeced end of file");
-				return -1;
-			}
-			p->u.cuname = kstrdup(s, GFP_KERNEL);
-			if (!p->u.cuname)
-				return -1;
-
-			break;
-
-		case LINE:	/* line information */
-			if (dgap_checknode(p))
-				return -1;
-			if (!brd) {
-				pr_err("must specify board before line info");
-				return -1;
-			}
-			switch (brd->u.board.type) {
-			case PPCM:
-				pr_err("line not valid for PC/em");
-				return -1;
-			}
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = LNODE;
-			conc = NULL;
-			line = p;
-			linecnt++;
-			break;
-
-		case CONC:	/* concentrator information */
-			if (dgap_checknode(p))
-				return -1;
-			if (!line) {
-				pr_err("must specify line info before concentrator");
-				return -1;
-			}
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = CNODE;
-			conc = p;
-
-			if (linecnt)
-				brd->u.board.conc2++;
-			else
-				brd->u.board.conc1++;
-
-			conc_type = dgap_gettok(in);
-			if (conc_type == 0 ||
-			    (conc_type != CX && conc_type != EPC)) {
-				pr_err("failed to set a type of concentratros");
-				return -1;
-			}
-
-			p->u.conc.type = conc_type;
-
-			break;
-
-		case MOD:	/* EBI module */
-			if (dgap_checknode(p))
-				return -1;
-			if (!brd) {
-				pr_err("must specify board info before EBI modules");
-				return -1;
-			}
-			switch (brd->u.board.type) {
-			case PPCM:
-				linecnt = 0;
-				break;
-			default:
-				if (!conc) {
-					pr_err("must specify concentrator info before EBI module");
-					return -1;
-				}
-			}
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = MNODE;
-
-			if (linecnt)
-				brd->u.board.module2++;
-			else
-				brd->u.board.module1++;
-
-			module_type = dgap_gettok(in);
-			if (module_type == 0 ||
-			    (module_type != PORTS && module_type != MODEM)) {
-				pr_err("failed to set a type of module");
-				return -1;
-			}
-
-			p->u.module.type = module_type;
-
-			break;
-
-		case CABLE:
-			if (p->type == LNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				kfree(p->u.line.cable);
-				p->u.line.cable = kstrdup(s, GFP_KERNEL);
-				p->u.line.v_cable = 1;
-			}
-			break;
-
-		case SPEED:	/* sync line speed indication */
-			if (p->type == LNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.line.speed)) {
-					pr_err("bad number for line speed");
-					return -1;
-				}
-				p->u.line.v_speed = 1;
-			} else if (p->type == CNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				if (kstrtol(s, 0, &p->u.conc.speed)) {
-					pr_err("bad number for line speed");
-					return -1;
-				}
-				p->u.conc.v_speed = 1;
-			} else {
-				pr_err("speed valid only for lines or concentrators.");
-				return -1;
-			}
-			break;
-
-		case CONNECT:
-			if (p->type == CNODE) {
-				s = dgap_getword(in);
-				if (!s) {
-					pr_err("unexpected end of file");
-					return -1;
-				}
-				kfree(p->u.conc.connect);
-				p->u.conc.connect = kstrdup(s, GFP_KERNEL);
-				p->u.conc.v_connect = 1;
-			}
-			break;
-		case PRINT:	/* transparent print name prefix */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = PNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpeced end of file");
-				return -1;
-			}
-			p->u.printname = kstrdup(s, GFP_KERNEL);
-			if (!p->u.printname)
-				return -1;
-
-			break;
-
-		case CMAJOR:	/* major number */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = JNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.majornumber)) {
-				pr_err("bad number for major number");
-				return -1;
-			}
-			break;
-
-		case ALTPIN:	/* altpin setting */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = ANODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.altpin)) {
-				pr_err("bad number for altpin");
-				return -1;
-			}
-			break;
-
-		case USEINTR:		/* enable interrupt setting */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = INTRNODE;
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.useintr)) {
-				pr_err("bad number for useintr");
-				return -1;
-			}
-			break;
-
-		case TTSIZ:	/* size of tty structure */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = TSNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.ttysize)) {
-				pr_err("bad number for ttysize");
-				return -1;
-			}
-			break;
-
-		case CHSIZ:	/* channel structure size */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = CSNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.chsize)) {
-				pr_err("bad number for chsize");
-				return -1;
-			}
-			break;
-
-		case BSSIZ:	/* board structure size */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = BSNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.bssize)) {
-				pr_err("bad number for bssize");
-				return -1;
-			}
-			break;
-
-		case UNTSIZ:	/* sched structure size */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = USNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.unsize)) {
-				pr_err("bad number for schedsize");
-				return -1;
-			}
-			break;
-
-		case F2SIZ:	/* f2200 structure size */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = FSNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.f2size)) {
-				pr_err("bad number for f2200size");
-				return -1;
-			}
-			break;
-
-		case VPSIZ:	/* vpix structure size */
-			if (dgap_checknode(p))
-				return -1;
-
-			p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
-			if (!p->next)
-				return -ENOMEM;
-
-			p = p->next;
-			p->type = VSNODE;
-
-			s = dgap_getword(in);
-			if (!s) {
-				pr_err("unexpected end of file");
-				return -1;
-			}
-			if (kstrtol(s, 0, &p->u.vpixsize)) {
-				pr_err("bad number for vpixsize");
-				return -1;
-			}
-			break;
-		}
-	}
-}
-
-static void dgap_cleanup_nodes(void)
-{
-	struct cnode *p;
-
-	p = &dgap_head;
-
-	while (p) {
-		struct cnode *tmp = p->next;
-
-		if (p->type == NULLNODE) {
-			p = tmp;
-			continue;
-		}
-
-		switch (p->type) {
-		case BNODE:
-			kfree(p->u.board.addrstr);
-			kfree(p->u.board.pcibusstr);
-			kfree(p->u.board.pcislotstr);
-			kfree(p->u.board.method);
-			break;
-		case CNODE:
-			kfree(p->u.conc.id);
-			kfree(p->u.conc.connect);
-			break;
-		case MNODE:
-			kfree(p->u.module.id);
-			break;
-		case TNODE:
-			kfree(p->u.ttyname);
-			break;
-		case CUNODE:
-			kfree(p->u.cuname);
-			break;
-		case LNODE:
-			kfree(p->u.line.cable);
-			break;
-		case PNODE:
-			kfree(p->u.printname);
-			break;
-		}
-
-		kfree(p->u.board.status);
-		kfree(p);
-		p = tmp;
-	}
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-static uint dgap_get_custom_baud(struct channel_t *ch)
-{
-	u8 __iomem *vaddr;
-	ulong offset;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-
-	if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-
-	if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
-		return 0;
-
-	vaddr = ch->ch_bd->re_map_membase;
-
-	if (!vaddr)
-		return 0;
-
-	/*
-	 * Go get from fep mem, what the fep
-	 * believes the custom baud rate is.
-	 */
-	offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
-	       + LINE_SPEED;
-
-	return readw(vaddr + offset);
-}
-
-/*
- * Remap PCI memory.
- */
-static int dgap_remap(struct board_t *brd)
-{
-	if (!brd || brd->magic != DGAP_BOARD_MAGIC)
-		return -EIO;
-
-	if (!request_mem_region(brd->membase, 0x200000, "dgap"))
-		return -ENOMEM;
-
-	if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap"))
-		goto err_req_mem;
-
-	brd->re_map_membase = ioremap(brd->membase, 0x200000);
-	if (!brd->re_map_membase)
-		goto err_remap_mem;
-
-	brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
-	if (!brd->re_map_port)
-		goto err_remap_port;
-
-	return 0;
-
-err_remap_port:
-	iounmap(brd->re_map_membase);
-err_remap_mem:
-	release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
-err_req_mem:
-	release_mem_region(brd->membase, 0x200000);
-
-	return -ENOMEM;
-}
-
-static void dgap_unmap(struct board_t *brd)
-{
-	iounmap(brd->re_map_port);
-	iounmap(brd->re_map_membase);
-	release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
-	release_mem_region(brd->membase, 0x200000);
-}
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
-			     unsigned char *fbuf, int *len)
-{
-	int l = *len;
-	int count = 0;
-	unsigned char *in, *cout, *fout;
-	unsigned char c;
-
-	in = cbuf;
-	cout = cbuf;
-	fout = fbuf;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	while (l--) {
-		c = *in++;
-		switch (ch->pscan_state) {
-		default:
-			/* reset to sanity and fall through */
-			ch->pscan_state = 0;
-
-		case 0:
-			/* No FF seen yet */
-			if (c == (unsigned char)'\377')
-				/* delete this character from stream */
-				ch->pscan_state = 1;
-			else {
-				*cout++ = c;
-				*fout++ = TTY_NORMAL;
-				count += 1;
-			}
-			break;
-
-		case 1:
-			/* first FF seen */
-			if (c == (unsigned char)'\377') {
-				/* doubled ff, transform to single ff */
-				*cout++ = c;
-				*fout++ = TTY_NORMAL;
-				count += 1;
-				ch->pscan_state = 0;
-			} else {
-				/* save value examination in next state */
-				ch->pscan_savechar = c;
-				ch->pscan_state = 2;
-			}
-			break;
-
-		case 2:
-			/* third character of ff sequence */
-
-			*cout++ = c;
-
-			if (ch->pscan_savechar == 0x0) {
-				if (c == 0x0) {
-					ch->ch_err_break++;
-					*fout++ = TTY_BREAK;
-				} else {
-					ch->ch_err_parity++;
-					*fout++ = TTY_PARITY;
-				}
-			}
-
-			count += 1;
-			ch->pscan_state = 0;
-		}
-	}
-	*len = count;
-}
-
-/*=======================================================================
- *
- *      dgap_input - Process received data.
- *
- *              ch      - Pointer to channel structure.
- *
- *=======================================================================*/
-
-static void dgap_input(struct channel_t *ch)
-{
-	struct board_t *bd;
-	struct bs_t __iomem *bs;
-	struct tty_struct *tp;
-	struct tty_ldisc *ld;
-	uint rmask;
-	uint head;
-	uint tail;
-	int data_len;
-	ulong lock_flags;
-	ulong lock_flags2;
-	int flip_len;
-	int len;
-	int n;
-	u8 *buf;
-	u8 tmpchar;
-	int s;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	tp = ch->ch_tun.un_tty;
-
-	bs  = ch->ch_bs;
-	if (!bs)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	/*
-	 *      Figure the number of characters in the buffer.
-	 *      Exit immediately if none.
-	 */
-
-	rmask = ch->ch_rsize - 1;
-
-	head = readw(&bs->rx_head);
-	head &= rmask;
-	tail = readw(&bs->rx_tail);
-	tail &= rmask;
-
-	data_len = (head - tail) & rmask;
-
-	if (data_len == 0) {
-		writeb(1, &bs->idata);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return;
-	}
-
-	/*
-	 * If the device is not open, or CREAD is off, flush
-	 * input data and return immediately.
-	 */
-	if ((bd->state != BOARD_READY) || !tp  ||
-	    (tp->magic != TTY_MAGIC) ||
-	    !(ch->ch_tun.un_flags & UN_ISOPEN) ||
-	    !(tp->termios.c_cflag & CREAD) ||
-	    (ch->ch_tun.un_flags & UN_CLOSING)) {
-		writew(head, &bs->rx_tail);
-		writeb(1, &bs->idata);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return;
-	}
-
-	/*
-	 * If we are throttled, simply don't read any data.
-	 */
-	if (ch->ch_flags & CH_RXBLOCK) {
-		writeb(1, &bs->idata);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return;
-	}
-
-	/*
-	 *      Ignore oruns.
-	 */
-	tmpchar = readb(&bs->orun);
-	if (tmpchar) {
-		ch->ch_err_overrun++;
-		writeb(0, &bs->orun);
-	}
-
-	/* Decide how much data we can send into the tty layer */
-	flip_len = TTY_FLIPBUF_SIZE;
-
-	/* Chop down the length, if needed */
-	len = min(data_len, flip_len);
-	len = min(len, (N_TTY_BUF_SIZE - 1));
-
-	ld = tty_ldisc_ref(tp);
-
-#ifdef TTY_DONT_FLIP
-	/*
-	 * If the DONT_FLIP flag is on, don't flush our buffer, and act
-	 * like the ld doesn't have any space to put the data right now.
-	 */
-	if (test_bit(TTY_DONT_FLIP, &tp->flags))
-		len = 0;
-#endif
-
-	/*
-	 * If we were unable to get a reference to the ld,
-	 * don't flush our buffer, and act like the ld doesn't
-	 * have any space to put the data right now.
-	 */
-	if (!ld) {
-		len = 0;
-	} else {
-		/*
-		 * If ld doesn't have a pointer to a receive_buf function,
-		 * flush the data, then act like the ld doesn't have any
-		 * space to put the data right now.
-		 */
-		if (!ld->ops->receive_buf) {
-			writew(head, &bs->rx_tail);
-			len = 0;
-		}
-	}
-
-	if (len <= 0) {
-		writeb(1, &bs->idata);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		if (ld)
-			tty_ldisc_deref(ld);
-		return;
-	}
-
-	buf = ch->ch_bd->flipbuf;
-	n = len;
-
-	/*
-	 * n now contains the most amount of data we can copy,
-	 * bounded either by our buffer size or the amount
-	 * of data the card actually has pending...
-	 */
-	while (n) {
-		s = ((head >= tail) ? head : ch->ch_rsize) - tail;
-		s = min(s, n);
-
-		if (s <= 0)
-			break;
-
-		memcpy_fromio(buf, ch->ch_raddr + tail, s);
-
-		tail += s;
-		buf += s;
-
-		n -= s;
-		/* Flip queue if needed */
-		tail &= rmask;
-	}
-
-	writew(tail, &bs->rx_tail);
-	writeb(1, &bs->idata);
-	ch->ch_rxcount += len;
-
-	/*
-	 * If we are completely raw, we don't need to go through a lot
-	 * of the tty layers that exist.
-	 * In this case, we take the shortest and fastest route we
-	 * can to relay the data to the user.
-	 *
-	 * On the other hand, if we are not raw, we need to go through
-	 * the tty layer, which has its API more well defined.
-	 */
-	if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
-		dgap_parity_scan(ch, ch->ch_bd->flipbuf,
-				 ch->ch_bd->flipflagbuf, &len);
-
-		len = tty_buffer_request_room(tp->port, len);
-		tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
-					     ch->ch_bd->flipflagbuf, len);
-	} else {
-		len = tty_buffer_request_room(tp->port, len);
-		tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
-	}
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	/* Tell the tty layer its okay to "eat" the data now */
-	tty_flip_buffer_push(tp->port);
-
-	if (ld)
-		tty_ldisc_deref(ld);
-}
-
-static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
-			      struct un_t *un, u32 mask,
-			      unsigned long *irq_flags1,
-			      unsigned long *irq_flags2)
-{
-	if (!(un->un_flags & mask))
-		return;
-
-	un->un_flags &= ~mask;
-
-	if (!(un->un_flags & UN_ISOPEN))
-		return;
-
-	if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-	    un->un_tty->ldisc->ops->write_wakeup) {
-		spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
-
-		(un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
-
-		spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
-		spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
-	}
-	wake_up_interruptible(&un->un_tty->write_wait);
-	wake_up_interruptible(&un->un_flags_wait);
-}
-
-/************************************************************************
- * Determines when CARRIER changes state and takes appropriate
- * action.
- ************************************************************************/
-static void dgap_carrier(struct channel_t *ch)
-{
-	struct board_t *bd;
-
-	int virt_carrier = 0;
-	int phys_carrier = 0;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	/* Make sure altpin is always set correctly */
-	if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
-		ch->ch_dsr      = DM_CD;
-		ch->ch_cd       = DM_DSR;
-	} else {
-		ch->ch_dsr      = DM_DSR;
-		ch->ch_cd       = DM_CD;
-	}
-
-	if (ch->ch_mistat & D_CD(ch))
-		phys_carrier = 1;
-
-	if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
-		virt_carrier = 1;
-
-	if (ch->ch_c_cflag & CLOCAL)
-		virt_carrier = 1;
-
-	/*
-	 * Test for a VIRTUAL carrier transition to HIGH.
-	 */
-	if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
-		/*
-		 * When carrier rises, wake any threads waiting
-		 * for carrier in the open routine.
-		 */
-
-		if (waitqueue_active(&(ch->ch_flags_wait)))
-			wake_up_interruptible(&ch->ch_flags_wait);
-	}
-
-	/*
-	 * Test for a PHYSICAL carrier transition to HIGH.
-	 */
-	if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
-		/*
-		 * When carrier rises, wake any threads waiting
-		 * for carrier in the open routine.
-		 */
-
-		if (waitqueue_active(&(ch->ch_flags_wait)))
-			wake_up_interruptible(&ch->ch_flags_wait);
-	}
-
-	/*
-	 *  Test for a PHYSICAL transition to low, so long as we aren't
-	 *  currently ignoring physical transitions (which is what "virtual
-	 *  carrier" indicates).
-	 *
-	 *  The transition of the virtual carrier to low really doesn't
-	 *  matter... it really only means "ignore carrier state", not
-	 *  "make pretend that carrier is there".
-	 */
-	if ((virt_carrier == 0) &&
-	    ((ch->ch_flags & CH_CD) != 0) &&
-	    (phys_carrier == 0)) {
-		/*
-		 *   When carrier drops:
-		 *
-		 *   Drop carrier on all open units.
-		 *
-		 *   Flush queues, waking up any task waiting in the
-		 *   line discipline.
-		 *
-		 *   Send a hangup to the control terminal.
-		 *
-		 *   Enable all select calls.
-		 */
-		if (waitqueue_active(&(ch->ch_flags_wait)))
-			wake_up_interruptible(&ch->ch_flags_wait);
-
-		if (ch->ch_tun.un_open_count > 0)
-			tty_hangup(ch->ch_tun.un_tty);
-
-		if (ch->ch_pun.un_open_count > 0)
-			tty_hangup(ch->ch_pun.un_tty);
-	}
-
-	/*
-	 *  Make sure that our cached values reflect the current reality.
-	 */
-	if (virt_carrier == 1)
-		ch->ch_flags |= CH_FCAR;
-	else
-		ch->ch_flags &= ~CH_FCAR;
-
-	if (phys_carrier == 1)
-		ch->ch_flags |= CH_CD;
-	else
-		ch->ch_flags &= ~CH_CD;
-}
-
-/*=======================================================================
- *
- *      dgap_event - FEP to host event processing routine.
- *
- *              bd     - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
-{
-	struct channel_t *ch;
-	ulong lock_flags;
-	ulong lock_flags2;
-	struct bs_t __iomem *bs;
-	u8 __iomem *event;
-	u8 __iomem *vaddr;
-	struct ev_t __iomem *eaddr;
-	uint head;
-	uint tail;
-	int port;
-	int reason;
-	int modem;
-
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return -EIO;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
-	vaddr = bd->re_map_membase;
-
-	if (!vaddr) {
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	eaddr = (struct ev_t __iomem *)(vaddr + EVBUF);
-
-	/* Get our head and tail */
-	head = readw(&eaddr->ev_head);
-	tail = readw(&eaddr->ev_tail);
-
-	/*
-	 * Forget it if pointers out of range.
-	 */
-
-	if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
-	    (head | tail) & 03) {
-		/* Let go of board lock */
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	/*
-	 * Loop to process all the events in the buffer.
-	 */
-	while (tail != head) {
-		/*
-		 * Get interrupt information.
-		 */
-
-		event = bd->re_map_membase + tail + EVSTART;
-
-		port   = ioread8(event);
-		reason = ioread8(event + 1);
-		modem  = ioread8(event + 2);
-		ioread8(event + 3);
-
-		/*
-		 * Make sure the interrupt is valid.
-		 */
-		if (port >= bd->nasync)
-			goto next;
-
-		if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
-			goto next;
-
-		ch = bd->channels[port];
-
-		if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-			goto next;
-
-		/*
-		 * If we have made it here, the event was valid.
-		 * Lock down the channel.
-		 */
-		spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-		bs = ch->ch_bs;
-
-		if (!bs) {
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			goto next;
-		}
-
-		/*
-		 * Process received data.
-		 */
-		if (reason & IFDATA) {
-			/*
-			 * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
-			 * input could send some data to ld, which in turn
-			 * could do a callback to one of our other functions.
-			 */
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-			dgap_input(ch);
-
-			spin_lock_irqsave(&bd->bd_lock, lock_flags);
-			spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-			if (ch->ch_flags & CH_RACTIVE)
-				ch->ch_flags |= CH_RENABLE;
-			else
-				writeb(1, &bs->idata);
-
-			if (ch->ch_flags & CH_RWAIT) {
-				ch->ch_flags &= ~CH_RWAIT;
-
-				wake_up_interruptible
-					(&ch->ch_tun.un_flags_wait);
-			}
-		}
-
-		/*
-		 * Process Modem change signals.
-		 */
-		if (reason & IFMODEM) {
-			ch->ch_mistat = modem;
-			dgap_carrier(ch);
-		}
-
-		/*
-		 * Process break.
-		 */
-		if (reason & IFBREAK) {
-			if (ch->ch_tun.un_tty) {
-				/* A break has been indicated */
-				ch->ch_err_break++;
-				tty_buffer_request_room
-					(ch->ch_tun.un_tty->port, 1);
-				tty_insert_flip_char(ch->ch_tun.un_tty->port,
-						     0, TTY_BREAK);
-				tty_flip_buffer_push(ch->ch_tun.un_tty->port);
-			}
-		}
-
-		/*
-		 * Process Transmit low.
-		 */
-		if (reason & IFTLW) {
-			dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
-					  &lock_flags, &lock_flags2);
-			dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
-					  &lock_flags, &lock_flags2);
-			if (ch->ch_flags & CH_WLOW) {
-				ch->ch_flags &= ~CH_WLOW;
-				wake_up_interruptible(&ch->ch_flags_wait);
-			}
-		}
-
-		/*
-		 * Process Transmit empty.
-		 */
-		if (reason & IFTEM) {
-			dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
-					  &lock_flags, &lock_flags2);
-			dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
-					  &lock_flags, &lock_flags2);
-			if (ch->ch_flags & CH_WEMPTY) {
-				ch->ch_flags &= ~CH_WEMPTY;
-				wake_up_interruptible(&ch->ch_flags_wait);
-			}
-		}
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-
-next:
-		tail = (tail + 4) & (EVMAX - EVSTART - 4);
-	}
-
-	writew(tail, &eaddr->ev_tail);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	return 0;
-}
-
-/*
- * Our board poller function.
- */
-static void dgap_poll_tasklet(unsigned long data)
-{
-	struct board_t *bd = (struct board_t *)data;
-	ulong lock_flags;
-	char __iomem *vaddr;
-	u16 head, tail;
-
-	if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
-		return;
-
-	if (bd->inhibit_poller)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
-	vaddr = bd->re_map_membase;
-
-	/*
-	 * If board is ready, parse deeper to see if there is anything to do.
-	 */
-	if (bd->state == BOARD_READY) {
-		struct ev_t __iomem *eaddr;
-
-		if (!bd->re_map_membase) {
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			return;
-		}
-		if (!bd->re_map_port) {
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			return;
-		}
-
-		if (!bd->nasync)
-			goto out;
-
-		eaddr = (struct ev_t __iomem *)(vaddr + EVBUF);
-
-		/* Get our head and tail */
-		head = readw(&eaddr->ev_head);
-		tail = readw(&eaddr->ev_tail);
-
-		/*
-		 * If there is an event pending. Go service it.
-		 */
-		if (head != tail) {
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			dgap_event(bd);
-			spin_lock_irqsave(&bd->bd_lock, lock_flags);
-		}
-
-out:
-		/*
-		 * If board is doing interrupts, ACK the interrupt.
-		 */
-		if (bd->intr_running)
-			readb(bd->re_map_port + 2);
-
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return;
-	}
-
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * dgap_found_board()
- *
- * A board has been found, init it.
- */
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
-					int boardnum)
-{
-	struct board_t *brd;
-	unsigned int pci_irq;
-	int i;
-	int ret;
-
-	/* get the board structure and prep it */
-	brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
-	if (!brd)
-		return ERR_PTR(-ENOMEM);
-
-	/* store the info for the board we've found */
-	brd->magic = DGAP_BOARD_MAGIC;
-	brd->boardnum = boardnum;
-	brd->vendor = dgap_pci_tbl[id].vendor;
-	brd->device = dgap_pci_tbl[id].device;
-	brd->pdev = pdev;
-	brd->pci_bus = pdev->bus->number;
-	brd->pci_slot = PCI_SLOT(pdev->devfn);
-	brd->name = dgap_ids[id].name;
-	brd->maxports = dgap_ids[id].maxports;
-	brd->type = dgap_ids[id].config_type;
-	brd->dpatype = dgap_ids[id].dpatype;
-	brd->dpastatus = BD_NOFEP;
-	init_waitqueue_head(&brd->state_wait);
-
-	spin_lock_init(&brd->bd_lock);
-
-	brd->inhibit_poller	= FALSE;
-	brd->wait_for_bios	= 0;
-	brd->wait_for_fep	= 0;
-
-	for (i = 0; i < MAXPORTS; i++)
-		brd->channels[i] = NULL;
-
-	/* store which card & revision we have */
-	pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
-	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
-	pci_irq = pdev->irq;
-	brd->irq = pci_irq;
-
-	/* get the PCI Base Address Registers */
-
-	/* Xr Jupiter and EPC use BAR 2 */
-	if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
-		brd->membase     = pci_resource_start(pdev, 2);
-		brd->membase_end = pci_resource_end(pdev, 2);
-	}
-	/* Everyone else uses BAR 0 */
-	else {
-		brd->membase     = pci_resource_start(pdev, 0);
-		brd->membase_end = pci_resource_end(pdev, 0);
-	}
-
-	if (!brd->membase) {
-		ret = -ENODEV;
-		goto free_brd;
-	}
-
-	if (brd->membase & 1)
-		brd->membase &= ~3;
-	else
-		brd->membase &= ~15;
-
-	/*
-	 * On the PCI boards, there is no IO space allocated
-	 * The I/O registers will be in the first 3 bytes of the
-	 * upper 2MB of the 4MB memory space.  The board memory
-	 * will be mapped into the low 2MB of the 4MB memory space
-	 */
-	brd->port = brd->membase + PCI_IO_OFFSET;
-	brd->port_end = brd->port + PCI_IO_SIZE_DGAP;
-
-	/*
-	 * Special initialization for non-PLX boards
-	 */
-	if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
-		unsigned short cmd;
-
-		pci_write_config_byte(pdev, 0x40, 0);
-		pci_write_config_byte(pdev, 0x46, 0);
-
-		/* Limit burst length to 2 doubleword transactions */
-		pci_write_config_byte(pdev, 0x42, 1);
-
-		/*
-		 * Enable IO and mem if not already done.
-		 * This was needed for support on Itanium.
-		 */
-		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-		cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
-		pci_write_config_word(pdev, PCI_COMMAND, cmd);
-	}
-
-	/* init our poll helper tasklet */
-	tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
-		     (unsigned long)brd);
-
-	ret = dgap_remap(brd);
-	if (ret)
-		goto free_brd;
-
-	pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
-		boardnum, brd->name, brd->rev, brd->irq);
-
-	return brd;
-
-free_brd:
-	kfree(brd);
-
-	return ERR_PTR(ret);
-}
-
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
-	struct board_t *brd = voidbrd;
-
-	if (!brd)
-		return IRQ_NONE;
-
-	/*
-	 * Check to make sure its for us.
-	 */
-	if (brd->magic != DGAP_BOARD_MAGIC)
-		return IRQ_NONE;
-
-	brd->intr_count++;
-
-	/*
-	 * Schedule tasklet to run at a better time.
-	 */
-	tasklet_schedule(&brd->helper_tasklet);
-	return IRQ_HANDLED;
-}
-
-/*****************************************************************************
-*
-* Function:
-*
-*    dgap_poll_handler
-*
-* Author:
-*
-*    Scott H Kilau
-*
-* Parameters:
-*
-*    dummy -- ignored
-*
-* Return Values:
-*
-*    none
-*
-* Description:
-*
-*    As each timer expires, it determines (a) whether the "transmit"
-*    waiter needs to be woken up, and (b) whether the poller needs to
-*    be rescheduled.
-*
-******************************************************************************/
-
-static void dgap_poll_handler(ulong dummy)
-{
-	unsigned int i;
-	struct board_t *brd;
-	unsigned long lock_flags;
-	ulong new_time;
-
-	dgap_poll_counter++;
-
-	/*
-	 * Do not start the board state machine until
-	 * driver tells us its up and running, and has
-	 * everything it needs.
-	 */
-	if (dgap_driver_state != DRIVER_READY)
-		goto schedule_poller;
-
-	/*
-	 * If we have just 1 board, or the system is not SMP,
-	 * then use the typical old style poller.
-	 * Otherwise, use our new tasklet based poller, which should
-	 * speed things up for multiple boards.
-	 */
-	if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
-		for (i = 0; i < dgap_numboards; i++) {
-			brd = dgap_board[i];
-
-			if (brd->state == BOARD_FAILED)
-				continue;
-			if (!brd->intr_running)
-				/* Call the real board poller directly */
-				dgap_poll_tasklet((unsigned long)brd);
-		}
-	} else {
-		/*
-		 * Go thru each board, kicking off a
-		 * tasklet for each if needed
-		 */
-		for (i = 0; i < dgap_numboards; i++) {
-			brd = dgap_board[i];
-
-			/*
-			 * Attempt to grab the board lock.
-			 *
-			 * If we can't get it, no big deal, the next poll
-			 * will get it. Basically, I just really don't want
-			 * to spin in here, because I want to kick off my
-			 * tasklets as fast as I can, and then get out the
-			 * poller.
-			 */
-			if (!spin_trylock(&brd->bd_lock))
-				continue;
-
-			/*
-			 * If board is in a failed state, don't bother
-			 *  scheduling a tasklet
-			 */
-			if (brd->state == BOARD_FAILED) {
-				spin_unlock(&brd->bd_lock);
-				continue;
-			}
-
-			/* Schedule a poll helper task */
-			if (!brd->intr_running)
-				tasklet_schedule(&brd->helper_tasklet);
-
-			/*
-			 * Can't do DGAP_UNLOCK here, as we don't have
-			 * lock_flags because we did a trylock above.
-			 */
-			spin_unlock(&brd->bd_lock);
-		}
-	}
-
-schedule_poller:
-
-	/*
-	 * Schedule ourself back at the nominal wakeup interval.
-	 */
-	spin_lock_irqsave(&dgap_poll_lock, lock_flags);
-	dgap_poll_time +=  dgap_jiffies_from_ms(dgap_poll_tick);
-
-	new_time = dgap_poll_time - jiffies;
-
-	if ((ulong)new_time >= 2 * dgap_poll_tick) {
-		dgap_poll_time =
-			jiffies +  dgap_jiffies_from_ms(dgap_poll_tick);
-	}
-
-	dgap_poll_timer.function = dgap_poll_handler;
-	dgap_poll_timer.data = 0;
-	dgap_poll_timer.expires = dgap_poll_time;
-	spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
-	if (!dgap_poll_stop)
-		add_timer(&dgap_poll_timer);
-}
-
-/*=======================================================================
- *
- *      dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- *              ch      - Pointer to channel structure.
- *              cmd     - Command to be sent.
- *              byte1   - Integer containing first byte to be sent.
- *              byte2   - Integer containing second byte to be sent.
- *              ncmds   - Wait until ncmds or fewer cmds are left
- *                        in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
-		      u8 byte2, uint ncmds)
-{
-	char __iomem *vaddr;
-	struct __iomem cm_t *cm_addr;
-	uint count;
-	uint n;
-	u16 head;
-	u16 tail;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	/*
-	 * Check if board is still alive.
-	 */
-	if (ch->ch_bd->state == BOARD_FAILED)
-		return;
-
-	/*
-	 * Make sure the pointers are in range before
-	 * writing to the FEP memory.
-	 */
-	vaddr = ch->ch_bd->re_map_membase;
-
-	if (!vaddr)
-		return;
-
-	cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF);
-	head = readw(&cm_addr->cm_head);
-
-	/*
-	 * Forget it if pointers out of range.
-	 */
-	if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
-		ch->ch_bd->state = BOARD_FAILED;
-		return;
-	}
-
-	/*
-	 * Put the data in the circular command buffer.
-	 */
-	writeb(cmd, (vaddr + head + CMDSTART + 0));
-	writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1));
-	writeb(byte1, (vaddr + head + CMDSTART + 2));
-	writeb(byte2, (vaddr + head + CMDSTART + 3));
-
-	head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
-	writew(head, &cm_addr->cm_head);
-
-	/*
-	 * Wait if necessary before updating the head
-	 * pointer to limit the number of outstanding
-	 * commands to the FEP.   If the time spent waiting
-	 * is outlandish, declare the FEP dead.
-	 */
-	for (count = dgap_count ;;) {
-		head = readw(&cm_addr->cm_head);
-		tail = readw(&cm_addr->cm_tail);
-
-		n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
-		if (n <= ncmds * sizeof(struct cm_t))
-			break;
-
-		if (--count == 0) {
-			ch->ch_bd->state = BOARD_FAILED;
-			return;
-		}
-		udelay(10);
-	}
-}
-
-/*=======================================================================
- *
- *      dgap_cmdw - Sends a 1 word command to the FEP.
- *
- *              ch      - Pointer to channel structure.
- *              cmd     - Command to be sent.
- *              word    - Integer containing word to be sent.
- *              ncmds   - Wait until ncmds or fewer cmds are left
- *                        in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
-{
-	char __iomem *vaddr;
-	struct __iomem cm_t *cm_addr;
-	uint count;
-	uint n;
-	u16 head;
-	u16 tail;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	/*
-	 * Check if board is still alive.
-	 */
-	if (ch->ch_bd->state == BOARD_FAILED)
-		return;
-
-	/*
-	 * Make sure the pointers are in range before
-	 * writing to the FEP memory.
-	 */
-	vaddr = ch->ch_bd->re_map_membase;
-	if (!vaddr)
-		return;
-
-	cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF);
-	head = readw(&cm_addr->cm_head);
-
-	/*
-	 * Forget it if pointers out of range.
-	 */
-	if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
-		ch->ch_bd->state = BOARD_FAILED;
-		return;
-	}
-
-	/*
-	 * Put the data in the circular command buffer.
-	 */
-	writeb(cmd, (vaddr + head + CMDSTART + 0));
-	writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1));
-	writew((u16)word, (vaddr + head + CMDSTART + 2));
-
-	head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
-	writew(head, &cm_addr->cm_head);
-
-	/*
-	 * Wait if necessary before updating the head
-	 * pointer to limit the number of outstanding
-	 * commands to the FEP.   If the time spent waiting
-	 * is outlandish, declare the FEP dead.
-	 */
-	for (count = dgap_count ;;) {
-		head = readw(&cm_addr->cm_head);
-		tail = readw(&cm_addr->cm_tail);
-
-		n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
-		if (n <= ncmds * sizeof(struct cm_t))
-			break;
-
-		if (--count == 0) {
-			ch->ch_bd->state = BOARD_FAILED;
-			return;
-		}
-		udelay(10);
-	}
-}
-
-/*=======================================================================
- *
- *      dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- *              ch      - Pointer to channel structure.
- *              cmd     - Command to be sent.
- *              word    - Integer containing word to be sent.
- *              ncmds   - Wait until ncmds or fewer cmds are left
- *                        in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
-	char __iomem *vaddr;
-	struct __iomem cm_t *cm_addr;
-	uint count;
-	uint n;
-	u16 head;
-	u16 tail;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	/*
-	 * Check if board is still alive.
-	 */
-	if (ch->ch_bd->state == BOARD_FAILED)
-		return;
-
-	/*
-	 * Make sure the pointers are in range before
-	 * writing to the FEP memory.
-	 */
-	vaddr = ch->ch_bd->re_map_membase;
-	if (!vaddr)
-		return;
-
-	cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF);
-	head = readw(&cm_addr->cm_head);
-
-	/*
-	 * Forget it if pointers out of range.
-	 */
-	if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
-		ch->ch_bd->state = BOARD_FAILED;
-		return;
-	}
-
-	/*
-	 * Put the data in the circular command buffer.
-	 */
-
-	/* Write an FF to tell the FEP that we want an extended command */
-	writeb((u8)0xff, (vaddr + head + CMDSTART + 0));
-
-	writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1));
-	writew((u16)cmd, (vaddr + head + CMDSTART + 2));
-
-	/*
-	 * If the second part of the command won't fit,
-	 * put it at the beginning of the circular buffer.
-	 */
-	if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
-		writew((u16)word, (vaddr + CMDSTART));
-	else
-		writew((u16)word, (vaddr + head + CMDSTART + 4));
-
-	head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
-	writew(head, &cm_addr->cm_head);
-
-	/*
-	 * Wait if necessary before updating the head
-	 * pointer to limit the number of outstanding
-	 * commands to the FEP.   If the time spent waiting
-	 * is outlandish, declare the FEP dead.
-	 */
-	for (count = dgap_count ;;) {
-		head = readw(&cm_addr->cm_head);
-		tail = readw(&cm_addr->cm_tail);
-
-		n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
-		if (n <= ncmds * sizeof(struct cm_t))
-			break;
-
-		if (--count == 0) {
-			ch->ch_bd->state = BOARD_FAILED;
-			return;
-		}
-		udelay(10);
-	}
-}
-
-/*=======================================================================
- *
- *      dgap_wmove - Write data to FEP buffer.
- *
- *              ch      - Pointer to channel structure.
- *              buf     - Pointer to characters to be moved.
- *              cnt     - Number of characters to move.
- *
- *=======================================================================*/
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
-	int n;
-	char __iomem *taddr;
-	struct bs_t __iomem *bs;
-	u16 head;
-
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	/*
-	 * Check parameters.
-	 */
-	bs   = ch->ch_bs;
-	head = readw(&bs->tx_head);
-
-	/*
-	 * If pointers are out of range, just return.
-	 */
-	if ((cnt > ch->ch_tsize) ||
-	    (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
-		return;
-
-	/*
-	 * If the write wraps over the top of the circular buffer,
-	 * move the portion up to the wrap point, and reset the
-	 * pointers to the bottom.
-	 */
-	n = ch->ch_tstart + ch->ch_tsize - head;
-
-	if (cnt >= n) {
-		cnt -= n;
-		taddr = ch->ch_taddr + head;
-		memcpy_toio(taddr, buf, n);
-		head = ch->ch_tstart;
-		buf += n;
-	}
-
-	/*
-	 * Move rest of data.
-	 */
-	taddr = ch->ch_taddr + head;
-	n = cnt;
-	memcpy_toio(taddr, buf, n);
-	head += cnt;
-
-	writew(head, &bs->tx_head);
-}
-
-/*
- * Calls the firmware to reset this channel.
- */
-static void dgap_firmware_reset_port(struct channel_t *ch)
-{
-	dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
-	/*
-	 * Now that the channel is reset, we need to make sure
-	 * all the current settings get reapplied to the port
-	 * in the firmware.
-	 *
-	 * So we will set the driver's cache of firmware
-	 * settings all to 0, and then call param.
-	 */
-	ch->ch_fepiflag = 0;
-	ch->ch_fepcflag = 0;
-	ch->ch_fepoflag = 0;
-	ch->ch_fepstartc = 0;
-	ch->ch_fepstopc = 0;
-	ch->ch_fepastartc = 0;
-	ch->ch_fepastopc = 0;
-	ch->ch_mostat = 0;
-	ch->ch_hflow = 0;
-}
-
-/*=======================================================================
- *
- *      dgap_param - Set Digi parameters.
- *
- *              struct tty_struct *     - TTY for port.
- *
- *=======================================================================*/
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
-{
-	u16 head;
-	u16 cflag;
-	u16 iflag;
-	u8 mval;
-	u8 hflow;
-
-	/*
-	 * If baud rate is zero, flush queues, and set mval to drop DTR.
-	 */
-	if ((ch->ch_c_cflag & (CBAUD)) == 0) {
-		/* flush rx */
-		head = readw(&ch->ch_bs->rx_head);
-		writew(head, &ch->ch_bs->rx_tail);
-
-		/* flush tx */
-		head = readw(&ch->ch_bs->tx_head);
-		writew(head, &ch->ch_bs->tx_tail);
-
-		ch->ch_flags |= (CH_BAUD0);
-
-		/* Drop RTS and DTR */
-		ch->ch_mval &= ~(D_RTS(ch) | D_DTR(ch));
-		mval = D_DTR(ch) | D_RTS(ch);
-		ch->ch_baud_info = 0;
-
-	} else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
-		/*
-		 * Tell the fep to do the command
-		 */
-
-		dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
-		/*
-		 * Now go get from fep mem, what the fep
-		 * believes the custom baud rate is.
-		 */
-		ch->ch_custom_speed = dgap_get_custom_baud(ch);
-		ch->ch_baud_info = ch->ch_custom_speed;
-
-		/* Handle transition from B0 */
-		if (ch->ch_flags & CH_BAUD0) {
-			ch->ch_flags &= ~(CH_BAUD0);
-			ch->ch_mval |= (D_RTS(ch) | D_DTR(ch));
-		}
-		mval = D_DTR(ch) | D_RTS(ch);
-
-	} else {
-		/*
-		 * Set baud rate, character size, and parity.
-		 */
-
-
-		int iindex = 0;
-		int jindex = 0;
-		int baud = 0;
-
-		ulong bauds[4][16] = {
-			{ /* slowbaud */
-				0,	50,	75,	110,
-				134,	150,	200,	300,
-				600,	1200,	1800,	2400,
-				4800,	9600,	19200,	38400 },
-			{ /* slowbaud & CBAUDEX */
-				0,	57600,	115200,	230400,
-				460800,	150,	200,	921600,
-				600,	1200,	1800,	2400,
-				4800,	9600,	19200,	38400 },
-			{ /* fastbaud */
-				0,	57600,	76800,	115200,
-				14400,	57600,	230400,	76800,
-				115200,	230400,	28800,	460800,
-				921600,	9600,	19200,	38400 },
-			{ /* fastbaud & CBAUDEX */
-				0,	57600,	115200,	230400,
-				460800,	150,	200,	921600,
-				600,	1200,	1800,	2400,
-				4800,	9600,	19200,	38400 }
-		};
-
-		/*
-		 * Only use the TXPrint baud rate if the
-		 * terminal unit is NOT open
-		 */
-		if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
-		    un_type == DGAP_PRINT)
-			baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
-		else
-			baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
-		if (ch->ch_c_cflag & CBAUDEX)
-			iindex = 1;
-
-		if (ch->ch_digi.digi_flags & DIGI_FAST)
-			iindex += 2;
-
-		jindex = baud;
-
-		if ((iindex >= 0) && (iindex < 4) &&
-		    (jindex >= 0) && (jindex < 16))
-			baud = bauds[iindex][jindex];
-		else
-			baud = 0;
-
-		if (baud == 0)
-			baud = 9600;
-
-		ch->ch_baud_info = baud;
-
-		/*
-		 * CBAUD has bit position 0x1000 set these days to
-		 * indicate Linux baud rate remap.
-		 * We use a different bit assignment for high speed.
-		 * Clear this bit out while grabbing the parts of
-		 * "cflag" we want.
-		 */
-		cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
-						   CSTOPB | CSIZE);
-
-		/*
-		 * HUPCL bit is used by FEP to indicate fast baud
-		 * table is to be used.
-		 */
-		if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
-		    (ch->ch_c_cflag & CBAUDEX))
-			cflag |= HUPCL;
-
-		if ((ch->ch_c_cflag & CBAUDEX) &&
-		    !(ch->ch_digi.digi_flags & DIGI_FAST)) {
-			/*
-			 * The below code is trying to guarantee that only
-			 * baud rates 115200, 230400, 460800, 921600 are
-			 * remapped. We use exclusive or  because the various
-			 * baud rates share common bit positions and therefore
-			 * can't be tested for easily.
-			 */
-			tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
-			int baudpart = 0;
-
-			/*
-			 * Map high speed requests to index
-			 * into FEP's baud table
-			 */
-			switch (tcflag) {
-			case B57600:
-				baudpart = 1;
-				break;
-#ifdef B76800
-			case B76800:
-				baudpart = 2;
-				break;
-#endif
-			case B115200:
-				baudpart = 3;
-				break;
-			case B230400:
-				baudpart = 9;
-				break;
-			case B460800:
-				baudpart = 11;
-				break;
-#ifdef B921600
-			case B921600:
-				baudpart = 12;
-				break;
-#endif
-			default:
-				baudpart = 0;
-			}
-
-			if (baudpart)
-				cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
-		}
-
-		cflag &= 0xffff;
-
-		if (cflag != ch->ch_fepcflag) {
-			ch->ch_fepcflag = (u16)(cflag & 0xffff);
-
-			/*
-			 * Okay to have channel and board
-			 * locks held calling this
-			 */
-			dgap_cmdw(ch, SCFLAG, (u16)cflag, 0);
-		}
-
-		/* Handle transition from B0 */
-		if (ch->ch_flags & CH_BAUD0) {
-			ch->ch_flags &= ~(CH_BAUD0);
-			ch->ch_mval |= (D_RTS(ch) | D_DTR(ch));
-		}
-		mval = D_DTR(ch) | D_RTS(ch);
-	}
-
-	/*
-	 * Get input flags.
-	 */
-	iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
-				  INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
-	if ((ch->ch_startc == _POSIX_VDISABLE) ||
-	    (ch->ch_stopc == _POSIX_VDISABLE)) {
-		iflag &= ~(IXON | IXOFF);
-		ch->ch_c_iflag &= ~(IXON | IXOFF);
-	}
-
-	/*
-	 * Only the IBM Xr card can switch between
-	 * 232 and 422 modes on the fly
-	 */
-	if (bd->device == PCI_DEV_XR_IBM_DID) {
-		if (ch->ch_digi.digi_flags & DIGI_422)
-			dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
-		else
-			dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
-	}
-
-	if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
-		iflag |= IALTPIN;
-
-	if (iflag != ch->ch_fepiflag) {
-		ch->ch_fepiflag = iflag;
-
-		/* Okay to have channel and board locks held calling this */
-		dgap_cmdw(ch, SIFLAG, (u16)ch->ch_fepiflag, 0);
-	}
-
-	/*
-	 * Select hardware handshaking.
-	 */
-	hflow = 0;
-
-	if (ch->ch_c_cflag & CRTSCTS)
-		hflow |= (D_RTS(ch) | D_CTS(ch));
-	if (ch->ch_digi.digi_flags & RTSPACE)
-		hflow |= D_RTS(ch);
-	if (ch->ch_digi.digi_flags & DTRPACE)
-		hflow |= D_DTR(ch);
-	if (ch->ch_digi.digi_flags & CTSPACE)
-		hflow |= D_CTS(ch);
-	if (ch->ch_digi.digi_flags & DSRPACE)
-		hflow |= D_DSR(ch);
-	if (ch->ch_digi.digi_flags & DCDPACE)
-		hflow |= D_CD(ch);
-
-	if (hflow != ch->ch_hflow) {
-		ch->ch_hflow = hflow;
-
-		/* Okay to have channel and board locks held calling this */
-		dgap_cmdb(ch, SHFLOW, (u8)hflow, 0xff, 0);
-	}
-
-	/*
-	 * Set RTS and/or DTR Toggle if needed,
-	 * but only if product is FEP5+ based.
-	 */
-	if (bd->bd_flags & BD_FEP5PLUS) {
-		u16 hflow2 = 0;
-
-		if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
-			hflow2 |= (D_RTS(ch));
-		if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
-			hflow2 |= (D_DTR(ch));
-
-		dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
-	}
-
-	/*
-	 * Set modem control lines.
-	 */
-
-	mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
-	if (ch->ch_mostat ^ mval) {
-		ch->ch_mostat = mval;
-
-		/* Okay to have channel and board locks held calling this */
-		dgap_cmdb(ch, SMODEM, (u8)mval, D_RTS(ch) | D_DTR(ch), 0);
-	}
-
-	/*
-	 * Read modem signals, and then call carrier function.
-	 */
-	ch->ch_mistat = readb(&ch->ch_bs->m_stat);
-	dgap_carrier(ch);
-
-	/*
-	 * Set the start and stop characters.
-	 */
-	if (ch->ch_startc != ch->ch_fepstartc ||
-	    ch->ch_stopc != ch->ch_fepstopc) {
-		ch->ch_fepstartc = ch->ch_startc;
-		ch->ch_fepstopc =  ch->ch_stopc;
-
-		/* Okay to have channel and board locks held calling this */
-		dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
-	}
-
-	/*
-	 * Set the Auxiliary start and stop characters.
-	 */
-	if (ch->ch_astartc != ch->ch_fepastartc ||
-	    ch->ch_astopc != ch->ch_fepastopc) {
-		ch->ch_fepastartc = ch->ch_astartc;
-		ch->ch_fepastopc = ch->ch_astopc;
-
-		/* Okay to have channel and board locks held calling this */
-		dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
-	}
-
-	return 0;
-}
-
-/*
- * dgap_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
-				struct channel_t *ch)
-{
-	int retval = 0;
-	struct un_t *un;
-	ulong lock_flags;
-	uint old_flags;
-	int sleep_on_un_flags;
-
-	if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
-	    ch->magic != DGAP_CHANNEL_MAGIC)
-		return -EIO;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -EIO;
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-	ch->ch_wopen++;
-
-	/* Loop forever */
-	while (1) {
-		sleep_on_un_flags = 0;
-
-		/*
-		 * If board has failed somehow during our sleep,
-		 * bail with error.
-		 */
-		if (ch->ch_bd->state == BOARD_FAILED) {
-			retval = -EIO;
-			break;
-		}
-
-		/* If tty was hung up, break out of loop and set error. */
-		if (tty_hung_up_p(file)) {
-			retval = -EAGAIN;
-			break;
-		}
-
-		/*
-		 * If either unit is in the middle of the fragile part of close,
-		 * we just cannot touch the channel safely.
-		 * Go back to sleep, knowing that when the channel can be
-		 * touched safely, the close routine will signal the
-		 * ch_wait_flags to wake us back up.
-		 */
-		if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
-		      UN_CLOSING)) {
-			/*
-			 * Our conditions to leave cleanly and happily:
-			 * 1) NONBLOCKING on the tty is set.
-			 * 2) CLOCAL is set.
-			 * 3) DCD (fake or real) is active.
-			 */
-
-			if (file->f_flags & O_NONBLOCK)
-				break;
-
-			if (tty->flags & (1 << TTY_IO_ERROR))
-				break;
-
-			if (ch->ch_flags & CH_CD)
-				break;
-
-			if (ch->ch_flags & CH_FCAR)
-				break;
-		} else {
-			sleep_on_un_flags = 1;
-		}
-
-		/*
-		 * If there is a signal pending, the user probably
-		 * interrupted (ctrl-c) us.
-		 * Leave loop with error set.
-		 */
-		if (signal_pending(current)) {
-			retval = -ERESTARTSYS;
-			break;
-		}
-
-		/*
-		 * Store the flags before we let go of channel lock
-		 */
-		if (sleep_on_un_flags)
-			old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
-		else
-			old_flags = ch->ch_flags;
-
-		/*
-		 * Let go of channel lock before calling schedule.
-		 * Our poller will get any FEP events and wake us up when DCD
-		 * eventually goes active.
-		 */
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-		/*
-		 * Wait for something in the flags to change
-		 * from the current value.
-		 */
-		if (sleep_on_un_flags) {
-			retval = wait_event_interruptible(un->un_flags_wait,
-				(old_flags != (ch->ch_tun.un_flags |
-					       ch->ch_pun.un_flags)));
-		} else {
-			retval = wait_event_interruptible(ch->ch_flags_wait,
-				(old_flags != ch->ch_flags));
-		}
-
-		/*
-		 * We got woken up for some reason.
-		 * Before looping around, grab our channel lock.
-		 */
-		spin_lock_irqsave(&ch->ch_lock, lock_flags);
-	}
-
-	ch->ch_wopen--;
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	return retval;
-}
-
-/*
- * dgap_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-	u16 head;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	ch->ch_flags &= ~CH_STOP;
-	head = readw(&ch->ch_bs->tx_head);
-	dgap_cmdw(ch, FLUSHTX, (u16)head, 0);
-	dgap_cmdw(ch, RESUMETX, 0, 0);
-	if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
-		ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY);
-		wake_up_interruptible(&ch->ch_tun.un_flags_wait);
-	}
-	if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) {
-		ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY);
-		wake_up_interruptible(&ch->ch_pun.un_flags_wait);
-	}
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-	if (waitqueue_active(&tty->write_wait))
-		wake_up_interruptible(&tty->write_wait);
-	tty_wakeup(tty);
-}
-
-/*
- * dgap_tty_hangup()
- *
- * Hangup the port.  Like a close, but don't wait for output to drain.
- */
-static void dgap_tty_hangup(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	/* flush the transmit queues */
-	dgap_tty_flush_buffer(tty);
-}
-
-/*
- * dgap_tty_chars_in_buffer()
- *
- * Return number of characters that have not been transmitted yet.
- *
- * This routine is used by the line discipline to determine if there
- * is data waiting to be transmitted/drained/flushed or not.
- */
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	struct bs_t __iomem *bs;
-	u8 tbusy;
-	uint chars;
-	u16 thead, ttail, tmask, chead, ctail;
-	ulong lock_flags = 0;
-	ulong lock_flags2 = 0;
-
-	if (!tty)
-		return 0;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-
-	bs = ch->ch_bs;
-	if (!bs)
-		return 0;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	tmask = (ch->ch_tsize - 1);
-
-	/* Get Transmit queue pointers */
-	thead = readw(&bs->tx_head) & tmask;
-	ttail = readw(&bs->tx_tail) & tmask;
-
-	/* Get tbusy flag */
-	tbusy = readb(&bs->tbusy);
-
-	/* Get Command queue pointers */
-	chead = readw(&ch->ch_cm->cm_head);
-	ctail = readw(&ch->ch_cm->cm_tail);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	/*
-	 * The only way we know for sure if there is no pending
-	 * data left to be transferred, is if:
-	 * 1) Transmit head and tail are equal (empty).
-	 * 2) Command queue head and tail are equal (empty).
-	 * 3) The "TBUSY" flag is 0. (Transmitter not busy).
-	 */
-
-	if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
-		chars = 0;
-	} else {
-		if (thead >= ttail)
-			chars = thead - ttail;
-		else
-			chars = thead - ttail + ch->ch_tsize;
-		/*
-		 * Fudge factor here.
-		 * If chars is zero, we know that the command queue had
-		 * something in it or tbusy was set.  Because we cannot
-		 * be sure if there is still some data to be transmitted,
-		 * lets lie, and tell ld we have 1 byte left.
-		 */
-		if (chars == 0) {
-			/*
-			 * If TBUSY is still set, and our tx buffers are empty,
-			 * force the firmware to send me another wakeup after
-			 * TBUSY has been cleared.
-			 */
-			if (tbusy != 0) {
-				spin_lock_irqsave(&ch->ch_lock, lock_flags);
-				un->un_flags |= UN_EMPTY;
-				writeb(1, &bs->iempty);
-				spin_unlock_irqrestore(&ch->ch_lock,
-						       lock_flags);
-			}
-			chars = 1;
-		}
-	}
-
-	return chars;
-}
-
-static int dgap_wait_for_drain(struct tty_struct *tty)
-{
-	struct channel_t *ch;
-	struct un_t *un;
-	struct bs_t __iomem *bs;
-	int ret = 0;
-	uint count = 1;
-	ulong lock_flags = 0;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return -EIO;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -EIO;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return -EIO;
-
-	bs = ch->ch_bs;
-	if (!bs)
-		return -EIO;
-
-	/* Loop until data is drained */
-	while (count != 0) {
-		count = dgap_tty_chars_in_buffer(tty);
-
-		if (count == 0)
-			break;
-
-		/* Set flag waiting for drain */
-		spin_lock_irqsave(&ch->ch_lock, lock_flags);
-		un->un_flags |= UN_EMPTY;
-		writeb(1, &bs->iempty);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-		/* Go to sleep till we get woken up */
-		ret = wait_event_interruptible(un->un_flags_wait,
-					((un->un_flags & UN_EMPTY) == 0));
-		/* If ret is non-zero, user ctrl-c'ed us */
-		if (ret)
-			break;
-	}
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-	un->un_flags &= ~(UN_EMPTY);
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	return ret;
-}
-
-/*
- * dgap_maxcps_room
- *
- * Reduces bytes_available to the max number of characters
- * that can be sent currently given the maxcps value, and
- * returns the new bytes_available.  This only affects printer
- * output.
- */
-static int dgap_maxcps_room(struct channel_t *ch, struct un_t *un,
-			    int bytes_available)
-{
-	/*
-	 * If its not the Transparent print device, return
-	 * the full data amount.
-	 */
-	if (un->un_type != DGAP_PRINT)
-		return bytes_available;
-
-	if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
-		int cps_limit = 0;
-		unsigned long current_time = jiffies;
-		unsigned long buffer_time = current_time +
-			(HZ * ch->ch_digi.digi_bufsize) /
-			ch->ch_digi.digi_maxcps;
-
-		if (ch->ch_cpstime < current_time) {
-			/* buffer is empty */
-			ch->ch_cpstime = current_time;   /* reset ch_cpstime */
-			cps_limit = ch->ch_digi.digi_bufsize;
-		} else if (ch->ch_cpstime < buffer_time) {
-			/* still room in the buffer */
-			cps_limit = ((buffer_time - ch->ch_cpstime) *
-				     ch->ch_digi.digi_maxcps) / HZ;
-		} else {
-			/* no room in the buffer */
-			cps_limit = 0;
-		}
-
-		bytes_available = min(cps_limit, bytes_available);
-	}
-
-	return bytes_available;
-}
-
-static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
-{
-	struct channel_t *ch;
-	struct bs_t __iomem *bs;
-
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-	bs = ch->ch_bs;
-	if (!bs)
-		return;
-
-	if ((event & UN_LOW) != 0) {
-		if ((un->un_flags & UN_LOW) == 0) {
-			un->un_flags |= UN_LOW;
-			writeb(1, &bs->ilow);
-		}
-	}
-	if ((event & UN_LOW) != 0) {
-		if ((un->un_flags & UN_EMPTY) == 0) {
-			un->un_flags |= UN_EMPTY;
-			writeb(1, &bs->iempty);
-		}
-	}
-}
-
-/*
- * dgap_tty_write_room()
- *
- * Return space available in Tx buffer
- */
-static int dgap_tty_write_room(struct tty_struct *tty)
-{
-	struct channel_t *ch;
-	struct un_t *un;
-	struct bs_t __iomem *bs;
-	u16 head, tail, tmask;
-	int ret;
-	ulong lock_flags = 0;
-
-	if (!tty)
-		return 0;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-
-	bs = ch->ch_bs;
-	if (!bs)
-		return 0;
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-	tmask = ch->ch_tsize - 1;
-	head = readw(&bs->tx_head) & tmask;
-	tail = readw(&bs->tx_tail) & tmask;
-
-	ret = tail - head - 1;
-	if (ret < 0)
-		ret += ch->ch_tsize;
-
-	/* Limit printer to maxcps */
-	ret = dgap_maxcps_room(ch, un, ret);
-
-	/*
-	 * If we are printer device, leave space for
-	 * possibly both the on and off strings.
-	 */
-	if (un->un_type == DGAP_PRINT) {
-		if (!(ch->ch_flags & CH_PRON))
-			ret -= ch->ch_digi.digi_onlen;
-		ret -= ch->ch_digi.digi_offlen;
-	} else {
-		if (ch->ch_flags & CH_PRON)
-			ret -= ch->ch_digi.digi_offlen;
-	}
-
-	if (ret < 0)
-		ret = 0;
-
-	/*
-	 * Schedule FEP to wake us up if needed.
-	 *
-	 * TODO:  This might be overkill...
-	 * Do we really need to schedule callbacks from the FEP
-	 * in every case?  Can we get smarter based on ret?
-	 */
-	dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	return ret;
-}
-
-/*
- * dgap_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
-			  int count)
-{
-	struct channel_t *ch;
-	struct un_t *un;
-	struct bs_t __iomem *bs;
-	char __iomem *vaddr;
-	u16 head, tail, tmask, remain;
-	int bufcount, n;
-	ulong lock_flags;
-
-	if (!tty)
-		return 0;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-
-	bs = ch->ch_bs;
-	if (!bs)
-		return 0;
-
-	if (!count)
-		return 0;
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-	/* Get our space available for the channel from the board */
-	tmask = ch->ch_tsize - 1;
-	head = readw(&(bs->tx_head)) & tmask;
-	tail = readw(&(bs->tx_tail)) & tmask;
-
-	bufcount = tail - head - 1;
-	if (bufcount < 0)
-		bufcount += ch->ch_tsize;
-
-	/*
-	 * Limit printer output to maxcps overall, with bursts allowed
-	 * up to bufsize characters.
-	 */
-	bufcount = dgap_maxcps_room(ch, un, bufcount);
-
-	/*
-	 * Take minimum of what the user wants to send, and the
-	 * space available in the FEP buffer.
-	 */
-	count = min(count, bufcount);
-
-	/*
-	 * Bail if no space left.
-	 */
-	if (count <= 0) {
-		dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-		return 0;
-	}
-
-	/*
-	 * Output the printer ON string, if we are in terminal mode, but
-	 * need to be in printer mode.
-	 */
-	if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
-		dgap_wmove(ch, ch->ch_digi.digi_onstr,
-			   (int)ch->ch_digi.digi_onlen);
-		head = readw(&bs->tx_head) & tmask;
-		ch->ch_flags |= CH_PRON;
-	}
-
-	/*
-	 * On the other hand, output the printer OFF string, if we are
-	 * currently in printer mode, but need to output to the terminal.
-	 */
-	if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
-		dgap_wmove(ch, ch->ch_digi.digi_offstr,
-			   (int)ch->ch_digi.digi_offlen);
-		head = readw(&bs->tx_head) & tmask;
-		ch->ch_flags &= ~CH_PRON;
-	}
-
-	n = count;
-
-	/*
-	 * If the write wraps over the top of the circular buffer,
-	 * move the portion up to the wrap point, and reset the
-	 * pointers to the bottom.
-	 */
-	remain = ch->ch_tstart + ch->ch_tsize - head;
-
-	if (n >= remain) {
-		n -= remain;
-		vaddr = ch->ch_taddr + head;
-
-		memcpy_toio(vaddr, (u8 *)buf, remain);
-
-		head = ch->ch_tstart;
-		buf += remain;
-	}
-
-	if (n > 0) {
-		/*
-		 * Move rest of data.
-		 */
-		vaddr = ch->ch_taddr + head;
-		remain = n;
-
-		memcpy_toio(vaddr, (u8 *)buf, remain);
-		head += remain;
-	}
-
-	if (count) {
-		ch->ch_txcount += count;
-		head &= tmask;
-		writew(head, &bs->tx_head);
-	}
-
-	dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-
-	/*
-	 * If this is the print device, and the
-	 * printer is still on, we need to turn it
-	 * off before going idle.  If the buffer is
-	 * non-empty, wait until it goes empty.
-	 * Otherwise turn it off right now.
-	 */
-	if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
-		tail = readw(&bs->tx_tail) & tmask;
-
-		if (tail != head) {
-			un->un_flags |= UN_EMPTY;
-			writeb(1, &bs->iempty);
-		} else {
-			dgap_wmove(ch, ch->ch_digi.digi_offstr,
-				   (int)ch->ch_digi.digi_offlen);
-			head = readw(&bs->tx_head) & tmask;
-			ch->ch_flags &= ~CH_PRON;
-		}
-	}
-
-	/* Update printer buffer empty time. */
-	if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
-	    && (ch->ch_digi.digi_bufsize > 0)) {
-		ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
-	}
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	return count;
-}
-
-/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- *      - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
-	/*
-	 * Simply call tty_write.
-	 */
-	dgap_tty_write(tty, &c, 1);
-	return 1;
-}
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_tty_tiocmget(struct tty_struct *tty)
-{
-	struct channel_t *ch;
-	struct un_t *un;
-	int result;
-	u8 mstat;
-	ulong lock_flags;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return -EIO;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -EIO;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return -EIO;
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-	mstat = readb(&ch->ch_bs->m_stat);
-	/* Append any outbound signals that might be pending... */
-	mstat |= ch->ch_mostat;
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	result = 0;
-
-	if (mstat & D_DTR(ch))
-		result |= TIOCM_DTR;
-	if (mstat & D_RTS(ch))
-		result |= TIOCM_RTS;
-	if (mstat & D_CTS(ch))
-		result |= TIOCM_CTS;
-	if (mstat & D_DSR(ch))
-		result |= TIOCM_DSR;
-	if (mstat & D_RI(ch))
-		result |= TIOCM_RI;
-	if (mstat & D_CD(ch))
-		result |= TIOCM_CD;
-
-	return result;
-}
-
-/*
- * dgap_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_tty_tiocmset(struct tty_struct *tty,
-			     unsigned int set, unsigned int clear)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return -EIO;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -EIO;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return -EIO;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return -EIO;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	if (set & TIOCM_RTS) {
-		ch->ch_mforce |= D_RTS(ch);
-		ch->ch_mval   |= D_RTS(ch);
-	}
-
-	if (set & TIOCM_DTR) {
-		ch->ch_mforce |= D_DTR(ch);
-		ch->ch_mval   |= D_DTR(ch);
-	}
-
-	if (clear & TIOCM_RTS) {
-		ch->ch_mforce |= D_RTS(ch);
-		ch->ch_mval   &= ~(D_RTS(ch));
-	}
-
-	if (clear & TIOCM_DTR) {
-		ch->ch_mforce |= D_DTR(ch);
-		ch->ch_mval   &= ~(D_DTR(ch));
-	}
-
-	dgap_param(ch, bd, un->un_type);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	return 0;
-}
-
-/*
- * dgap_tty_send_break()
- *
- * Send a Break, called by ld.
- */
-static int dgap_tty_send_break(struct tty_struct *tty, int msec)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return -EIO;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -EIO;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return -EIO;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return -EIO;
-
-	switch (msec) {
-	case -1:
-		msec = 0xFFFF;
-		break;
-	case 0:
-		msec = 1;
-		break;
-	default:
-		msec /= 10;
-		break;
-	}
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-#if 0
-	dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-#endif
-	dgap_cmdw(ch, SBREAK, (u16)msec, 0);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	return 0;
-}
-
-/*
- * dgap_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
-{
-	dgap_wait_for_drain(tty);
-}
-
-/*
- * dgap_send_xchar()
- *
- * send a high priority character, called by ld.
- */
-static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	/*
-	 * This is technically what we should do.
-	 * However, the NIST tests specifically want
-	 * to see each XON or XOFF character that it
-	 * sends, so lets just send each character
-	 * by hand...
-	 */
-#if 0
-	if (c == STOP_CHAR(tty))
-		dgap_cmdw(ch, RPAUSE, 0, 0);
-	else if (c == START_CHAR(tty))
-		dgap_cmdw(ch, RRESUME, 0, 0);
-	else
-		dgap_wmove(ch, &c, 1);
-#else
-	dgap_wmove(ch, &c, 1);
-#endif
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
-{
-	int result;
-	u8 mstat;
-	ulong lock_flags;
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-	mstat = readb(&ch->ch_bs->m_stat);
-	/* Append any outbound signals that might be pending... */
-	mstat |= ch->ch_mostat;
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	result = 0;
-
-	if (mstat & D_DTR(ch))
-		result |= TIOCM_DTR;
-	if (mstat & D_RTS(ch))
-		result |= TIOCM_RTS;
-	if (mstat & D_CTS(ch))
-		result |= TIOCM_CTS;
-	if (mstat & D_DSR(ch))
-		result |= TIOCM_DSR;
-	if (mstat & D_RI(ch))
-		result |= TIOCM_RI;
-	if (mstat & D_CD(ch))
-		result |= TIOCM_CD;
-
-	return put_user(result, value);
-}
-
-/*
- * dgap_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd,
-			       struct un_t *un, unsigned int command,
-			       unsigned int __user *value)
-{
-	int ret;
-	unsigned int arg;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	ret = get_user(arg, value);
-	if (ret)
-		return ret;
-
-	switch (command) {
-	case TIOCMBIS:
-		if (arg & TIOCM_RTS) {
-			ch->ch_mforce |= D_RTS(ch);
-			ch->ch_mval   |= D_RTS(ch);
-		}
-
-		if (arg & TIOCM_DTR) {
-			ch->ch_mforce |= D_DTR(ch);
-			ch->ch_mval   |= D_DTR(ch);
-		}
-
-		break;
-
-	case TIOCMBIC:
-		if (arg & TIOCM_RTS) {
-			ch->ch_mforce |= D_RTS(ch);
-			ch->ch_mval   &= ~(D_RTS(ch));
-		}
-
-		if (arg & TIOCM_DTR) {
-			ch->ch_mforce |= D_DTR(ch);
-			ch->ch_mval   &= ~(D_DTR(ch));
-		}
-
-		break;
-
-	case TIOCMSET:
-		ch->ch_mforce = D_DTR(ch) | D_RTS(ch);
-
-		if (arg & TIOCM_RTS)
-			ch->ch_mval |= D_RTS(ch);
-		else
-			ch->ch_mval &= ~(D_RTS(ch));
-
-		if (arg & TIOCM_DTR)
-			ch->ch_mval |= (D_DTR(ch));
-		else
-			ch->ch_mval &= ~(D_DTR(ch));
-
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	dgap_param(ch, bd, un->un_type);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	return 0;
-}
-
-/*
- * dgap_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digigeta(struct channel_t *ch,
-			     struct digi_t __user *retinfo)
-{
-	struct digi_t tmp;
-	ulong lock_flags;
-
-	if (!retinfo)
-		return -EFAULT;
-
-	memset(&tmp, 0, sizeof(tmp));
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-	memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
-		return -EFAULT;
-
-	return 0;
-}
-
-/*
- * dgap_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
-			     struct un_t *un, struct digi_t __user *new_info)
-{
-	struct digi_t new_digi;
-	ulong lock_flags = 0;
-	unsigned long lock_flags2;
-
-	if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t)))
-		return -EFAULT;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
-
-	if (ch->ch_digi.digi_maxcps < 1)
-		ch->ch_digi.digi_maxcps = 1;
-
-	if (ch->ch_digi.digi_maxcps > 10000)
-		ch->ch_digi.digi_maxcps = 10000;
-
-	if (ch->ch_digi.digi_bufsize < 10)
-		ch->ch_digi.digi_bufsize = 10;
-
-	if (ch->ch_digi.digi_maxchar < 1)
-		ch->ch_digi.digi_maxchar = 1;
-
-	if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
-		ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
-
-	if (ch->ch_digi.digi_onlen > DIGI_PLEN)
-		ch->ch_digi.digi_onlen = DIGI_PLEN;
-
-	if (ch->ch_digi.digi_offlen > DIGI_PLEN)
-		ch->ch_digi.digi_offlen = DIGI_PLEN;
-
-	dgap_param(ch, bd, un->un_type);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	return 0;
-}
-
-/*
- * dgap_tty_digigetedelay()
- *
- * Ioctl to get the current edelay setting.
- *
- *
- *
- */
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
-{
-	struct channel_t *ch;
-	struct un_t *un;
-	int tmp;
-	ulong lock_flags;
-
-	if (!retinfo)
-		return -EFAULT;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return -EFAULT;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -EFAULT;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return -EFAULT;
-
-	memset(&tmp, 0, sizeof(tmp));
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-	tmp = readw(&ch->ch_bs->edelay);
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
-		return -EFAULT;
-
-	return 0;
-}
-
-/*
- * dgap_tty_digisetedelay()
- *
- * Ioctl to set the EDELAY setting
- *
- */
-static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
-				  struct un_t *un, int __user *new_info)
-{
-	int new_digi;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (copy_from_user(&new_digi, new_info, sizeof(int)))
-		return -EFAULT;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	writew((u16)new_digi, &ch->ch_bs->edelay);
-
-	dgap_param(ch, bd, un->un_type);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-	return 0;
-}
-
-/*
- * dgap_tty_digigetcustombaud()
- *
- * Ioctl to get the current custom baud rate setting.
- */
-static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
-				      int __user *retinfo)
-{
-	int tmp;
-	ulong lock_flags;
-
-	if (!retinfo)
-		return -EFAULT;
-
-	memset(&tmp, 0, sizeof(tmp));
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-	tmp = dgap_get_custom_baud(ch);
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
-		return -EFAULT;
-
-	return 0;
-}
-
-/*
- * dgap_tty_digisetcustombaud()
- *
- * Ioctl to set the custom baud rate setting
- */
-static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
-				      struct un_t *un, int __user *new_info)
-{
-	uint new_rate;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (copy_from_user(&new_rate, new_info, sizeof(unsigned int)))
-		return -EFAULT;
-
-	if (bd->bd_flags & BD_FEP5PLUS) {
-		spin_lock_irqsave(&bd->bd_lock, lock_flags);
-		spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-		ch->ch_custom_speed = new_rate;
-
-		dgap_param(ch, bd, un->un_type);
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-	}
-
-	return 0;
-}
-
-/*
- * dgap_set_termios()
- */
-static void dgap_tty_set_termios(struct tty_struct *tty,
-				 struct ktermios *old_termios)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	unsigned long lock_flags;
-	unsigned long lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	ch->ch_c_cflag   = tty->termios.c_cflag;
-	ch->ch_c_iflag   = tty->termios.c_iflag;
-	ch->ch_c_oflag   = tty->termios.c_oflag;
-	ch->ch_c_lflag   = tty->termios.c_lflag;
-	ch->ch_startc    = tty->termios.c_cc[VSTART];
-	ch->ch_stopc     = tty->termios.c_cc[VSTOP];
-
-	dgap_carrier(ch);
-	dgap_param(ch, bd, un->un_type);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_throttle(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	ch->ch_flags |= (CH_RXBLOCK);
-#if 1
-	dgap_cmdw(ch, RPAUSE, 0, 0);
-#endif
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_unthrottle(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	ch->ch_flags &= ~(CH_RXBLOCK);
-
-#if 1
-	dgap_cmdw(ch, RRESUME, 0, 0);
-#endif
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static struct board_t *find_board_by_major(unsigned int major)
-{
-	unsigned int i;
-
-	for (i = 0; i < MAXBOARDS; i++) {
-		struct board_t *brd = dgap_board[i];
-
-		if (!brd)
-			return NULL;
-		if (major == brd->serial_driver->major ||
-		    major == brd->print_driver->major)
-			return brd;
-	}
-
-	return NULL;
-}
-
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_open()
- *
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
-{
-	struct board_t *brd;
-	struct channel_t *ch;
-	struct un_t *un;
-	struct bs_t __iomem *bs;
-	uint major;
-	uint minor;
-	int rc;
-	ulong lock_flags;
-	ulong lock_flags2;
-	u16 head;
-
-	major = MAJOR(tty_devnum(tty));
-	minor = MINOR(tty_devnum(tty));
-
-	brd = find_board_by_major(major);
-	if (!brd)
-		return -EIO;
-
-	/*
-	 * If board is not yet up to a state of READY, go to
-	 * sleep waiting for it to happen or they cancel the open.
-	 */
-	rc = wait_event_interruptible(brd->state_wait,
-				      (brd->state & BOARD_READY));
-
-	if (rc)
-		return rc;
-
-	spin_lock_irqsave(&brd->bd_lock, lock_flags);
-
-	/* The wait above should guarantee this cannot happen */
-	if (brd->state != BOARD_READY) {
-		spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	/* If opened device is greater than our number of ports, bail. */
-	if (MINOR(tty_devnum(tty)) > brd->nasync) {
-		spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	ch = brd->channels[minor];
-	if (!ch) {
-		spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	/* Grab channel lock */
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	/* Figure out our type */
-	if (major == brd->serial_driver->major) {
-		un = &brd->channels[minor]->ch_tun;
-		un->un_type = DGAP_SERIAL;
-	} else if (major == brd->print_driver->major) {
-		un = &brd->channels[minor]->ch_pun;
-		un->un_type = DGAP_PRINT;
-	} else {
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	/* Store our unit into driver_data, so we always have it available. */
-	tty->driver_data = un;
-
-	/*
-	 * Error if channel info pointer is NULL.
-	 */
-	bs = ch->ch_bs;
-	if (!bs) {
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	/*
-	 * Initialize tty's
-	 */
-	if (!(un->un_flags & UN_ISOPEN)) {
-		/* Store important variables. */
-		un->un_tty     = tty;
-
-		/* Maybe do something here to the TTY struct as well? */
-	}
-
-	/*
-	 * Initialize if neither terminal or printer is open.
-	 */
-	if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
-		ch->ch_mforce = 0;
-		ch->ch_mval = 0;
-
-		/*
-		 * Flush input queue.
-		 */
-		head = readw(&bs->rx_head);
-		writew(head, &bs->rx_tail);
-
-		ch->ch_flags = 0;
-		ch->pscan_state = 0;
-		ch->pscan_savechar = 0;
-
-		ch->ch_c_cflag   = tty->termios.c_cflag;
-		ch->ch_c_iflag   = tty->termios.c_iflag;
-		ch->ch_c_oflag   = tty->termios.c_oflag;
-		ch->ch_c_lflag   = tty->termios.c_lflag;
-		ch->ch_startc = tty->termios.c_cc[VSTART];
-		ch->ch_stopc  = tty->termios.c_cc[VSTOP];
-
-		/* TODO: flush our TTY struct here? */
-	}
-
-	dgap_carrier(ch);
-	/*
-	 * Run param in case we changed anything
-	 */
-	dgap_param(ch, brd, un->un_type);
-
-	/*
-	 * follow protocol for opening port
-	 */
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-
-	rc = dgap_block_til_ready(tty, file, ch);
-
-	if (!un->un_tty)
-		return -ENODEV;
-
-	/* No going back now, increment our unit and channel counters */
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-	ch->ch_open_count++;
-	un->un_open_count++;
-	un->un_flags |= (UN_ISOPEN);
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-	return rc;
-}
-
-/*
- * dgap_tty_close()
- *
- */
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-	/*
-	 * Determine if this is the last close or not - and if we agree about
-	 * which type of close it is with the Line Discipline
-	 */
-	if ((tty->count == 1) && (un->un_open_count != 1)) {
-		/*
-		 * Uh, oh.  tty->count is 1, which means that the tty
-		 * structure will be freed.  un_open_count should always
-		 * be one in these conditions.  If it's greater than
-		 * one, we've got real problems, since it means the
-		 * serial port won't be shutdown.
-		 */
-		un->un_open_count = 1;
-	}
-
-	if (--un->un_open_count < 0)
-		un->un_open_count = 0;
-
-	ch->ch_open_count--;
-
-	if (ch->ch_open_count && un->un_open_count) {
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-		return;
-	}
-
-	/* OK, its the last close on the unit */
-
-	un->un_flags |= UN_CLOSING;
-
-	tty->closing = 1;
-
-	/*
-	 * Only officially close channel if count is 0 and
-	 * DIGI_PRINTER bit is not set.
-	 */
-	if ((ch->ch_open_count == 0) &&
-	    !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-		ch->ch_flags &= ~(CH_RXBLOCK);
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
-		/* wait for output to drain */
-		/* This will also return if we take an interrupt */
-
-		dgap_wait_for_drain(tty);
-
-		dgap_tty_flush_buffer(tty);
-		tty_ldisc_flush(tty);
-
-		spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
-		tty->closing = 0;
-
-		/*
-		 * If we have HUPCL set, lower DTR and RTS
-		 */
-		if (ch->ch_c_cflag & HUPCL) {
-			ch->ch_mostat &= ~(D_RTS(ch) | D_DTR(ch));
-			dgap_cmdb(ch, SMODEM, 0, D_DTR(ch) | D_RTS(ch), 0);
-
-			/*
-			 * Go to sleep to ensure RTS/DTR
-			 * have been dropped for modems to see it.
-			 */
-			spin_unlock_irqrestore(&ch->ch_lock,
-					       lock_flags);
-
-			/* .25 second delay for dropping RTS/DTR */
-			schedule_timeout_interruptible(msecs_to_jiffies(250));
-
-			spin_lock_irqsave(&ch->ch_lock, lock_flags);
-		}
-
-		ch->pscan_state = 0;
-		ch->pscan_savechar = 0;
-		ch->ch_baud_info = 0;
-	}
-
-	/*
-	 * turn off print device when closing print device.
-	 */
-	if ((un->un_type == DGAP_PRINT)  && (ch->ch_flags & CH_PRON)) {
-		dgap_wmove(ch, ch->ch_digi.digi_offstr,
-			   (int)ch->ch_digi.digi_offlen);
-		ch->ch_flags &= ~CH_PRON;
-	}
-
-	un->un_tty = NULL;
-	un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
-	tty->driver_data = NULL;
-
-	wake_up_interruptible(&ch->ch_flags_wait);
-	wake_up_interruptible(&un->un_flags_wait);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-}
-
-static void dgap_tty_start(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	dgap_cmdw(ch, RESUMETX, 0, 0);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_stop(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	dgap_cmdw(ch, PAUSETX, 0, 0);
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already."  Just guess how I figured that out...   SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	ulong lock_flags;
-	ulong lock_flags2;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	/* TODO: Do something here */
-
-	spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-	spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
-
-/*
- * dgap_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
-			  unsigned long arg)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	int rc;
-	u16 head;
-	ulong lock_flags = 0;
-	ulong lock_flags2 = 0;
-	void __user *uarg = (void __user *)arg;
-
-	if (!tty || tty->magic != TTY_MAGIC)
-		return -ENODEV;
-
-	un = tty->driver_data;
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return -ENODEV;
-
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return -ENODEV;
-
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return -ENODEV;
-
-	spin_lock_irqsave(&bd->bd_lock, lock_flags);
-	spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-	if (un->un_open_count <= 0) {
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return -EIO;
-	}
-
-	switch (cmd) {
-	/* Here are all the standard ioctl's that we MUST implement */
-	case TCSBRK:
-		/*
-		 * TCSBRK is SVID version: non-zero arg --> no break
-		 * this behaviour is exploited by tcdrain().
-		 *
-		 * According to POSIX.1 spec (7.2.2.1.2) breaks should be
-		 * between 0.25 and 0.5 seconds so we'll ask for something
-		 * in the middle: 0.375 seconds.
-		 */
-		rc = tty_check_change(tty);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		if (rc)
-			return rc;
-
-		rc = dgap_wait_for_drain(tty);
-
-		if (rc)
-			return -EINTR;
-
-		spin_lock_irqsave(&bd->bd_lock, lock_flags);
-		spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-		if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
-			dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0);
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		return 0;
-
-	case TCSBRKP:
-		/* support for POSIX tcsendbreak()
-
-		 * According to POSIX.1 spec (7.2.2.1.2) breaks should be
-		 * between 0.25 and 0.5 seconds so we'll ask for something
-		 * in the middle: 0.375 seconds.
-		 */
-		rc = tty_check_change(tty);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		if (rc)
-			return rc;
-
-		rc = dgap_wait_for_drain(tty);
-		if (rc)
-			return -EINTR;
-
-		spin_lock_irqsave(&bd->bd_lock, lock_flags);
-		spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-		dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0);
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		return 0;
-
-	case TIOCSBRK:
-		/*
-		 * FEP5 doesn't support turning on a break unconditionally.
-		 * The FEP5 device will stop sending a break automatically
-		 * after the specified time value that was sent when turning on
-		 * the break.
-		 */
-		rc = tty_check_change(tty);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		if (rc)
-			return rc;
-
-		rc = dgap_wait_for_drain(tty);
-		if (rc)
-			return -EINTR;
-
-		spin_lock_irqsave(&bd->bd_lock, lock_flags);
-		spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
-		dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0);
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		return 0;
-
-	case TIOCCBRK:
-		/*
-		 * FEP5 doesn't support turning off a break unconditionally.
-		 * The FEP5 device will stop sending a break automatically
-		 * after the specified time value that was sent when turning on
-		 * the break.
-		 */
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return 0;
-
-	case TIOCGSOFTCAR:
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		return put_user(C_CLOCAL(tty) ? 1 : 0,
-				(unsigned long __user *)arg);
-
-	case TIOCSSOFTCAR:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		rc = get_user(arg, (unsigned long __user *)arg);
-		if (rc)
-			return rc;
-
-		spin_lock_irqsave(&bd->bd_lock, lock_flags);
-		spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-		tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
-						(arg ? CLOCAL : 0));
-		dgap_param(ch, bd, un->un_type);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		return 0;
-
-	case TIOCMGET:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_get_modem_info(ch, uarg);
-
-	case TIOCMBIS:
-	case TIOCMBIC:
-	case TIOCMSET:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_set_modem_info(ch, bd, un, cmd, uarg);
-
-		/*
-		 * Here are any additional ioctl's that we want to implement
-		 */
-
-	case TCFLSH:
-		/*
-		 * The linux tty driver doesn't have a flush
-		 * input routine for the driver, assuming all backed
-		 * up data is in the line disc. buffers.  However,
-		 * we all know that's not the case.  Here, we
-		 * act on the ioctl, but then lie and say we didn't
-		 * so the line discipline will process the flush
-		 * also.
-		 */
-		rc = tty_check_change(tty);
-		if (rc) {
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			return rc;
-		}
-
-		if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
-			if (!(un->un_type == DGAP_PRINT)) {
-				head = readw(&ch->ch_bs->rx_head);
-				writew(head, &ch->ch_bs->rx_tail);
-				writeb(0, &ch->ch_bs->orun);
-			}
-		}
-
-		if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) {
-			/* pretend we didn't recognize this IOCTL */
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-			return -ENOIOCTLCMD;
-		}
-
-		ch->ch_flags &= ~CH_STOP;
-		head = readw(&ch->ch_bs->tx_head);
-		dgap_cmdw(ch, FLUSHTX, (u16)head, 0);
-		dgap_cmdw(ch, RESUMETX, 0, 0);
-		if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
-			ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY);
-			wake_up_interruptible(&ch->ch_tun.un_flags_wait);
-		}
-		if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) {
-			ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY);
-			wake_up_interruptible(&ch->ch_pun.un_flags_wait);
-		}
-		if (waitqueue_active(&tty->write_wait))
-			wake_up_interruptible(&tty->write_wait);
-
-		/* Can't hold any locks when calling tty_wakeup! */
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		tty_wakeup(tty);
-
-		/* pretend we didn't recognize this IOCTL */
-		return -ENOIOCTLCMD;
-
-	case TCSETSF:
-	case TCSETSW:
-		/*
-		 * The linux tty driver doesn't have a flush
-		 * input routine for the driver, assuming all backed
-		 * up data is in the line disc. buffers.  However,
-		 * we all know that's not the case.  Here, we
-		 * act on the ioctl, but then lie and say we didn't
-		 * so the line discipline will process the flush
-		 * also.
-		 */
-		if (cmd == TCSETSF) {
-			/* flush rx */
-			ch->ch_flags &= ~CH_STOP;
-			head = readw(&ch->ch_bs->rx_head);
-			writew(head, &ch->ch_bs->rx_tail);
-		}
-
-		/* now wait for all the output to drain */
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		rc = dgap_wait_for_drain(tty);
-		if (rc)
-			return -EINTR;
-
-		/* pretend we didn't recognize this */
-		return -ENOIOCTLCMD;
-
-	case TCSETAW:
-
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		rc = dgap_wait_for_drain(tty);
-		if (rc)
-			return -EINTR;
-
-		/* pretend we didn't recognize this */
-		return -ENOIOCTLCMD;
-
-	case TCXONC:
-		/*
-		 * The Linux Line Discipline (LD) would do this for us if we
-		 * let it, but we have the special firmware options to do this
-		 * the "right way" regardless of hardware or software flow
-		 * control so we'll do it outselves instead of letting the LD
-		 * do it.
-		 */
-		rc = tty_check_change(tty);
-		if (rc) {
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			return rc;
-		}
-
-		switch (arg) {
-		case TCOON:
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			dgap_tty_start(tty);
-			return 0;
-		case TCOOFF:
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			dgap_tty_stop(tty);
-			return 0;
-		case TCION:
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			/* Make the ld do it */
-			return -ENOIOCTLCMD;
-		case TCIOFF:
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			/* Make the ld do it */
-			return -ENOIOCTLCMD;
-		default:
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			return -EINVAL;
-		}
-
-	case DIGI_GETA:
-		/* get information for ditty */
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_tty_digigeta(ch, uarg);
-
-	case DIGI_SETAW:
-	case DIGI_SETAF:
-
-		/* set information for ditty */
-		if (cmd == (DIGI_SETAW)) {
-			spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-			spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-			rc = dgap_wait_for_drain(tty);
-			if (rc)
-				return -EINTR;
-			spin_lock_irqsave(&bd->bd_lock, lock_flags);
-			spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-		} else
-			tty_ldisc_flush(tty);
-		/* fall thru */
-
-	case DIGI_SETA:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_tty_digiseta(ch, bd, un, uarg);
-
-	case DIGI_GEDELAY:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_tty_digigetedelay(tty, uarg);
-
-	case DIGI_SEDELAY:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_tty_digisetedelay(ch, bd, un, uarg);
-
-	case DIGI_GETCUSTOMBAUD:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_tty_digigetcustombaud(ch, un, uarg);
-
-	case DIGI_SETCUSTOMBAUD:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return dgap_tty_digisetcustombaud(ch, bd, un, uarg);
-
-	case DIGI_RESET_PORT:
-		dgap_firmware_reset_port(ch);
-		dgap_param(ch, bd, un->un_type);
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-		return 0;
-
-	default:
-		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-		return -ENOIOCTLCMD;
-	}
-}
-
-static const struct tty_operations dgap_tty_ops = {
-	.open = dgap_tty_open,
-	.close = dgap_tty_close,
-	.write = dgap_tty_write,
-	.write_room = dgap_tty_write_room,
-	.flush_buffer = dgap_tty_flush_buffer,
-	.chars_in_buffer = dgap_tty_chars_in_buffer,
-	.flush_chars = dgap_tty_flush_chars,
-	.ioctl = dgap_tty_ioctl,
-	.set_termios = dgap_tty_set_termios,
-	.stop = dgap_tty_stop,
-	.start = dgap_tty_start,
-	.throttle = dgap_tty_throttle,
-	.unthrottle = dgap_tty_unthrottle,
-	.hangup = dgap_tty_hangup,
-	.put_char = dgap_tty_put_char,
-	.tiocmget = dgap_tty_tiocmget,
-	.tiocmset = dgap_tty_tiocmset,
-	.break_ctl = dgap_tty_send_break,
-	.wait_until_sent = dgap_tty_wait_until_sent,
-	.send_xchar = dgap_tty_send_xchar
-};
-
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-static int dgap_tty_register(struct board_t *brd)
-{
-	int rc;
-
-	brd->serial_driver = tty_alloc_driver(MAXPORTS,
-					      TTY_DRIVER_REAL_RAW |
-					      TTY_DRIVER_DYNAMIC_DEV |
-					      TTY_DRIVER_HARDWARE_BREAK);
-	if (IS_ERR(brd->serial_driver))
-		return PTR_ERR(brd->serial_driver);
-
-	snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
-		 brd->boardnum);
-	brd->serial_driver->name = brd->serial_name;
-	brd->serial_driver->name_base = 0;
-	brd->serial_driver->major = 0;
-	brd->serial_driver->minor_start = 0;
-	brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
-	brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
-	brd->serial_driver->init_termios = dgap_default_termios;
-	brd->serial_driver->driver_name = DRVSTR;
-
-	/*
-	 * Entry points for driver.  Called by the kernel from
-	 * tty_io.c and n_tty.c.
-	 */
-	tty_set_operations(brd->serial_driver, &dgap_tty_ops);
-
-	/*
-	 * If we're doing transparent print, we have to do all of the above
-	 * again, separately so we don't get the LD confused about what major
-	 * we are when we get into the dgap_tty_open() routine.
-	 */
-	brd->print_driver = tty_alloc_driver(MAXPORTS,
-					     TTY_DRIVER_REAL_RAW |
-					     TTY_DRIVER_DYNAMIC_DEV |
-					     TTY_DRIVER_HARDWARE_BREAK);
-	if (IS_ERR(brd->print_driver)) {
-		rc = PTR_ERR(brd->print_driver);
-		goto free_serial_drv;
-	}
-
-	snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
-		 brd->boardnum);
-	brd->print_driver->name = brd->print_name;
-	brd->print_driver->name_base = 0;
-	brd->print_driver->major = 0;
-	brd->print_driver->minor_start = 0;
-	brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
-	brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
-	brd->print_driver->init_termios = dgap_default_termios;
-	brd->print_driver->driver_name = DRVSTR;
-
-	/*
-	 * Entry points for driver.  Called by the kernel from
-	 * tty_io.c and n_tty.c.
-	 */
-	tty_set_operations(brd->print_driver, &dgap_tty_ops);
-
-	/* Register tty devices */
-	rc = tty_register_driver(brd->serial_driver);
-	if (rc < 0)
-		goto free_print_drv;
-
-	/* Register Transparent Print devices */
-	rc = tty_register_driver(brd->print_driver);
-	if (rc < 0)
-		goto unregister_serial_drv;
-
-	return 0;
-
-unregister_serial_drv:
-	tty_unregister_driver(brd->serial_driver);
-free_print_drv:
-	put_tty_driver(brd->print_driver);
-free_serial_drv:
-	put_tty_driver(brd->serial_driver);
-
-	return rc;
-}
-
-static void dgap_tty_unregister(struct board_t *brd)
-{
-	tty_unregister_driver(brd->print_driver);
-	tty_unregister_driver(brd->serial_driver);
-	put_tty_driver(brd->print_driver);
-	put_tty_driver(brd->serial_driver);
-}
-
-static int dgap_alloc_flipbuf(struct board_t *brd)
-{
-	/*
-	 * allocate flip buffer for board.
-	 */
-	brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
-	if (!brd->flipbuf)
-		return -ENOMEM;
-
-	brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
-	if (!brd->flipflagbuf) {
-		kfree(brd->flipbuf);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static void dgap_free_flipbuf(struct board_t *brd)
-{
-	kfree(brd->flipbuf);
-	kfree(brd->flipflagbuf);
-}
-
-static struct board_t *dgap_verify_board(struct device *p)
-{
-	struct board_t *bd;
-
-	if (!p)
-		return NULL;
-
-	bd = dev_get_drvdata(p);
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY)
-		return NULL;
-
-	return bd;
-}
-
-static ssize_t dgap_ports_state_show(struct device *p,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count,
-			"%d %s\n", bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_open_count ? "Open" : "Closed");
-	}
-	return count;
-}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
-
-static ssize_t dgap_ports_baud_show(struct device *p,
-				    struct device_attribute *attr,
-				    char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++) {
-		count +=  snprintf(buf + count, PAGE_SIZE - count, "%d %d\n",
-				   bd->channels[i]->ch_portnum,
-				   bd->channels[i]->ch_baud_info);
-	}
-	return count;
-}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
-
-static ssize_t dgap_ports_msignals_show(struct device *p,
-					struct device_attribute *attr,
-					char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++) {
-		if (bd->channels[i]->ch_open_count)
-			count += snprintf(buf + count, PAGE_SIZE - count,
-				"%d %s %s %s %s %s %s\n",
-				bd->channels[i]->ch_portnum,
-				(bd->channels[i]->ch_mostat &
-				 UART_MCR_RTS) ? "RTS" : "",
-				(bd->channels[i]->ch_mistat &
-				 UART_MSR_CTS) ? "CTS" : "",
-				(bd->channels[i]->ch_mostat &
-				 UART_MCR_DTR) ? "DTR" : "",
-				(bd->channels[i]->ch_mistat &
-				 UART_MSR_DSR) ? "DSR" : "",
-				(bd->channels[i]->ch_mistat &
-				 UART_MSR_DCD) ? "DCD" : "",
-				(bd->channels[i]->ch_mistat &
-				 UART_MSR_RI)  ? "RI"  : "");
-		else
-			count += snprintf(buf + count, PAGE_SIZE - count,
-				"%d\n", bd->channels[i]->ch_portnum);
-	}
-	return count;
-}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
-
-static ssize_t dgap_ports_iflag_show(struct device *p,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_c_iflag);
-	return count;
-}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
-
-static ssize_t dgap_ports_cflag_show(struct device *p,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_c_cflag);
-	return count;
-}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
-
-static ssize_t dgap_ports_oflag_show(struct device *p,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_c_oflag);
-	return count;
-}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
-
-static ssize_t dgap_ports_lflag_show(struct device *p,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_c_lflag);
-	return count;
-}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
-
-static ssize_t dgap_ports_digi_flag_show(struct device *p,
-					 struct device_attribute *attr,
-					 char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_digi.digi_flags);
-	return count;
-}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
-
-static ssize_t dgap_ports_rxcount_show(struct device *p,
-				       struct device_attribute *attr,
-				       char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_rxcount);
-	return count;
-}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
-
-static ssize_t dgap_ports_txcount_show(struct device *p,
-				       struct device_attribute *attr,
-				       char *buf)
-{
-	struct board_t *bd;
-	int count = 0;
-	unsigned int i;
-
-	bd = dgap_verify_board(p);
-	if (!bd)
-		return 0;
-
-	for (i = 0; i < bd->nasync; i++)
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
-				  bd->channels[i]->ch_portnum,
-				  bd->channels[i]->ch_txcount);
-	return count;
-}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-
-static ssize_t dgap_tty_state_show(struct device *d,
-				   struct device_attribute *attr,
-				   char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ?
-			"Open" : "Closed");
-}
-static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
-
-static ssize_t dgap_tty_baud_show(struct device *d,
-				  struct device_attribute *attr,
-				  char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
-}
-static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
-
-static ssize_t dgap_tty_msignals_show(struct device *d,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	if (ch->ch_open_count) {
-		return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
-			(ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
-			(ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
-			(ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
-			(ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
-			(ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
-			(ch->ch_mistat & UART_MSR_RI)  ? "RI"  : "");
-	}
-	return 0;
-}
-static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
-
-static ssize_t dgap_tty_iflag_show(struct device *d,
-				   struct device_attribute *attr,
-				   char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
-
-static ssize_t dgap_tty_cflag_show(struct device *d,
-				   struct device_attribute *attr,
-				   char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
-
-static ssize_t dgap_tty_oflag_show(struct device *d,
-				   struct device_attribute *attr,
-				   char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
-
-static ssize_t dgap_tty_lflag_show(struct device *d,
-				   struct device_attribute *attr,
-				   char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
-
-static ssize_t dgap_tty_digi_flag_show(struct device *d,
-				       struct device_attribute *attr,
-				       char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
-
-static ssize_t dgap_tty_rxcount_show(struct device *d,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
-
-static ssize_t dgap_tty_txcount_show(struct device *d,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
-
-static ssize_t dgap_tty_name_show(struct device *d,
-				  struct device_attribute *attr,
-				  char *buf)
-{
-	struct board_t *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-	int cn;
-	int bn;
-	struct cnode *cptr;
-	int found = FALSE;
-	int ncount = 0;
-	int starto = 0;
-	int i;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGAP_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGAP_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	bn = bd->boardnum;
-	cn = ch->ch_portnum;
-
-	for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
-		if ((cptr->type == BNODE) &&
-		    ((cptr->u.board.type == APORT2_920P) ||
-		     (cptr->u.board.type == APORT4_920P) ||
-		     (cptr->u.board.type == APORT8_920P) ||
-		     (cptr->u.board.type == PAPORT4) ||
-		     (cptr->u.board.type == PAPORT8))) {
-			found = TRUE;
-			if (cptr->u.board.v_start)
-				starto = cptr->u.board.start;
-			else
-				starto = 1;
-		}
-
-		if (cptr->type == TNODE && found == TRUE) {
-			char *ptr1;
-
-			if (strstr(cptr->u.ttyname, "tty")) {
-				ptr1 = cptr->u.ttyname;
-				ptr1 += 3;
-			} else
-				ptr1 = cptr->u.ttyname;
-
-			for (i = 0; i < dgap_config_get_num_prts(bd); i++) {
-				if (cn != i)
-					continue;
-
-				return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
-						(un->un_type == DGAP_PRINT) ?
-						 "pr" : "tty",
-						ptr1, i + starto);
-			}
-		}
-
-		if (cptr->type == CNODE) {
-			for (i = 0; i < cptr->u.conc.nport; i++) {
-				if (cn != (i + ncount))
-					continue;
-
-				return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
-						(un->un_type == DGAP_PRINT) ?
-						 "pr" : "tty",
-						cptr->u.conc.id,
-						i + (cptr->u.conc.v_start ?
-						     cptr->u.conc.start : 1));
-			}
-
-			ncount += cptr->u.conc.nport;
-		}
-
-		if (cptr->type == MNODE) {
-			for (i = 0; i < cptr->u.module.nport; i++) {
-				if (cn != (i + ncount))
-					continue;
-
-				return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
-						(un->un_type == DGAP_PRINT) ?
-						 "pr" : "tty",
-						cptr->u.module.id,
-						i + (cptr->u.module.v_start ?
-						     cptr->u.module.start : 1));
-			}
-
-			ncount += cptr->u.module.nport;
-		}
-	}
-
-	return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
-		(un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
-}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
-
-static struct attribute *dgap_sysfs_tty_entries[] = {
-	&dev_attr_state.attr,
-	&dev_attr_baud.attr,
-	&dev_attr_msignals.attr,
-	&dev_attr_iflag.attr,
-	&dev_attr_cflag.attr,
-	&dev_attr_oflag.attr,
-	&dev_attr_lflag.attr,
-	&dev_attr_digi_flag.attr,
-	&dev_attr_rxcount.attr,
-	&dev_attr_txcount.attr,
-	&dev_attr_custom_name.attr,
-	NULL
-};
-
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd)
-{
-	dev_set_drvdata(&bd->pdev->dev, bd);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_state);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_baud);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
-	device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount);
-}
-
-/* removes all the sys files created for that port */
-static void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_state);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount);
-}
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
-{
-	u8 __iomem *addr;
-	uint offset;
-	unsigned int i;
-
-	if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
-		return;
-
-	addr = brd->re_map_membase;
-
-	/*
-	 * clear POST area
-	 */
-	for (i = 0; i < 16; i++)
-		writeb(0, addr + POSTAREA + i);
-
-	/*
-	 * Download bios
-	 */
-	offset = 0x1000;
-	memcpy_toio(addr + offset, ubios, len);
-
-	writel(0x0bf00401, addr);
-	writel(0, (addr + 4));
-
-	/* Clear the reset, and change states. */
-	writeb(FEPCLR, brd->re_map_port);
-}
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static int dgap_test_bios(struct board_t *brd)
-{
-	u8 __iomem *addr;
-	u16 word;
-	u16 err1;
-	u16 err2;
-
-	if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
-		return -EINVAL;
-
-	addr = brd->re_map_membase;
-	word = readw(addr + POSTAREA);
-
-	/*
-	 * It can take 5-6 seconds for a board to
-	 * pass the bios self test and post results.
-	 * Give it 10 seconds.
-	 */
-	brd->wait_for_bios = 0;
-	while (brd->wait_for_bios < 1000) {
-		/* Check to see if BIOS thinks board is good. (GD). */
-		if (word == *(u16 *)"GD")
-			return 0;
-		msleep_interruptible(10);
-		brd->wait_for_bios++;
-		word = readw(addr + POSTAREA);
-	}
-
-	/* Gave up on board after too long of time taken */
-	err1 = readw(addr + SEQUENCE);
-	err2 = readw(addr + ERROR);
-	dev_warn(&brd->pdev->dev, "%s failed diagnostics.  Error #(%x,%x).\n",
-		 brd->name, err1, err2);
-	brd->state = BOARD_FAILED;
-	brd->dpastatus = BD_NOBIOS;
-
-	return -EIO;
-}
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
-{
-	u8 __iomem *addr;
-	uint offset;
-
-	if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
-		return;
-
-	addr = brd->re_map_membase;
-
-	/*
-	 * Download FEP
-	 */
-	offset = 0x1000;
-	memcpy_toio(addr + offset, ufep, len);
-
-	/*
-	 * If board is a concentrator product, we need to give
-	 * it its config string describing how the concentrators look.
-	 */
-	if ((brd->type == PCX) || (brd->type == PEPC)) {
-		u8 string[100];
-		u8 __iomem *config;
-		u8 *xconfig;
-		unsigned int i = 0;
-
-		xconfig = dgap_create_config_string(brd, string);
-
-		/* Write string to board memory */
-		config = addr + CONFIG;
-		for (; i < CONFIGSIZE; i++, config++, xconfig++) {
-			writeb(*xconfig, config);
-			if ((*xconfig & 0xff) == 0xff)
-				break;
-		}
-	}
-
-	writel(0xbfc01004, (addr + 0xc34));
-	writel(0x3, (addr + 0xc30));
-}
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static int dgap_test_fep(struct board_t *brd)
-{
-	u8 __iomem *addr;
-	u16 word;
-	u16 err1;
-	u16 err2;
-
-	if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
-		return -EINVAL;
-
-	addr = brd->re_map_membase;
-	word = readw(addr + FEPSTAT);
-
-	/*
-	 * It can take 2-3 seconds for the FEP to
-	 * be up and running. Give it 5 secs.
-	 */
-	brd->wait_for_fep = 0;
-	while (brd->wait_for_fep < 500) {
-		/* Check to see if FEP is up and running now. */
-		if (word == *(u16 *)"OS") {
-			/*
-			 * Check to see if the board can support FEP5+ commands.
-			*/
-			word = readw(addr + FEP5_PLUS);
-			if (word == *(u16 *)"5A")
-				brd->bd_flags |= BD_FEP5PLUS;
-
-			return 0;
-		}
-		msleep_interruptible(10);
-		brd->wait_for_fep++;
-		word = readw(addr + FEPSTAT);
-	}
-
-	/* Gave up on board after too long of time taken */
-	err1 = readw(addr + SEQUENCE);
-	err2 = readw(addr + ERROR);
-	dev_warn(&brd->pdev->dev,
-		 "FEPOS for %s not functioning.  Error #(%x,%x).\n",
-		 brd->name, err1, err2);
-	brd->state = BOARD_FAILED;
-	brd->dpastatus = BD_NOFEP;
-
-	return -EIO;
-}
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
-	u8 check;
-	u32 check1;
-	u32 check2;
-	unsigned int i;
-
-	if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
-	    !brd->re_map_membase || !brd->re_map_port)
-		return;
-
-	/* FEPRST does not vary among supported boards */
-	writeb(FEPRST, brd->re_map_port);
-
-	for (i = 0; i <= 1000; i++) {
-		check = readb(brd->re_map_port) & 0xe;
-		if (check == FEPRST)
-			break;
-		udelay(10);
-	}
-	if (i > 1000) {
-		dev_warn(&brd->pdev->dev,
-			 "dgap: Board not resetting...  Failing board.\n");
-		brd->state = BOARD_FAILED;
-		brd->dpastatus = BD_NOFEP;
-		return;
-	}
-
-	/*
-	 * Make sure there really is memory out there.
-	 */
-	writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
-	writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
-	check1 = readl(brd->re_map_membase + LOWMEM);
-	check2 = readl(brd->re_map_membase + HIGHMEM);
-
-	if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
-		dev_warn(&brd->pdev->dev,
-			 "No memory at %p for board.\n",
-			 brd->re_map_membase);
-		brd->state = BOARD_FAILED;
-		brd->dpastatus = BD_NOFEP;
-		return;
-	}
-}
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
-{
-	char __iomem *vaddr;
-	u16 offset;
-	struct downld_t *to_dp;
-
-	if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
-		return;
-
-	vaddr = brd->re_map_membase;
-
-	offset = readw((u16 *)(vaddr + DOWNREQ));
-	to_dp = (struct downld_t *)(vaddr + (int)offset);
-	memcpy_toio(to_dp, uaddr, len);
-
-	/* Tell card we have data for it */
-	writew(0, vaddr + (DOWNREQ));
-
-	brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-#endif
-
-#define EXPANSION_ROM_SIZE	(64 * 1024)
-#define FEP5_ROM_MAGIC		(0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
-	u32 magic;
-	u32 base_offset;
-	u16 rom_offset;
-	u16 vpd_offset;
-	u16 image_length;
-	u16 i;
-	u8 byte1;
-	u8 byte2;
-
-	/*
-	 * Poke the magic number at the PCI Rom Address location.
-	 * If VPD is supported, the value read from that address
-	 * will be non-zero.
-	 */
-	magic = FEP5_ROM_MAGIC;
-	pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-	pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
-	/* VPD not supported, bail */
-	if (!magic)
-		return;
-
-	/*
-	 * To get to the OTPROM memory, we have to send the boards base
-	 * address or'ed with 1 into the PCI Rom Address location.
-	 */
-	magic = brd->membase | 0x01;
-	pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-	pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
-	byte1 = readb(brd->re_map_membase);
-	byte2 = readb(brd->re_map_membase + 1);
-
-	/*
-	 * If the board correctly swapped to the OTPROM memory,
-	 * the first 2 bytes (header) should be 0x55, 0xAA
-	 */
-	if (byte1 == 0x55 && byte2 == 0xAA) {
-		base_offset = 0;
-
-		/*
-		 * We have to run through all the OTPROM memory looking
-		 * for the VPD offset.
-		 */
-		while (base_offset <= EXPANSION_ROM_SIZE) {
-			/*
-			 * Lots of magic numbers here.
-			 *
-			 * The VPD offset is located inside the ROM Data
-			 * Structure.
-			 *
-			 * We also have to remember the length of each
-			 * ROM Data Structure, so we can "hop" to the next
-			 * entry if the VPD isn't in the current
-			 * ROM Data Structure.
-			 */
-			rom_offset = readw(brd->re_map_membase +
-						base_offset + 0x18);
-			image_length = readw(brd->re_map_membase +
-						rom_offset + 0x10) * 512;
-			vpd_offset = readw(brd->re_map_membase +
-						rom_offset + 0x08);
-
-			/* Found the VPD entry */
-			if (vpd_offset)
-				break;
-
-			/* We didn't find a VPD entry, go to next ROM entry. */
-			base_offset += image_length;
-
-			byte1 = readb(brd->re_map_membase + base_offset);
-			byte2 = readb(brd->re_map_membase + base_offset + 1);
-
-			/*
-			 * If the new ROM offset doesn't have 0x55, 0xAA
-			 * as its header, we have run out of ROM.
-			 */
-			if (byte1 != 0x55 || byte2 != 0xAA)
-				break;
-		}
-
-		/*
-		 * If we have a VPD offset, then mark the board
-		 * as having a valid VPD, and copy VPDSIZE (512) bytes of
-		 * that VPD to the buffer we have in our board structure.
-		 */
-		if (vpd_offset) {
-			brd->bd_flags |= BD_HAS_VPD;
-			for (i = 0; i < VPDSIZE; i++) {
-				brd->vpd[i] = readb(brd->re_map_membase +
-							vpd_offset + i);
-			}
-		}
-	}
-
-	/*
-	 * We MUST poke the magic number at the PCI Rom Address location again.
-	 * This makes the card report the regular board memory back to us,
-	 * rather than the OTPROM memory.
-	 */
-	magic = FEP5_ROM_MAGIC;
-	pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
-					    char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
-					  const char *buf, size_t count)
-{
-	if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
-		return -EINVAL;
-	return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
-		   dgap_driver_pollrate_store);
-
-
-static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
-	int rc = 0;
-	struct device_driver *driverfs = &dgap_driver->driver;
-
-	rc |= driver_create_file(driverfs, &driver_attr_version);
-	rc |= driver_create_file(driverfs, &driver_attr_boards);
-	rc |= driver_create_file(driverfs, &driver_attr_maxboards);
-	rc |= driver_create_file(driverfs, &driver_attr_pollrate);
-	rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
-
-	return rc;
-}
-
-static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
-{
-	struct device_driver *driverfs = &dgap_driver->driver;
-
-	driver_remove_file(driverfs, &driver_attr_version);
-	driver_remove_file(driverfs, &driver_attr_boards);
-	driver_remove_file(driverfs, &driver_attr_maxboards);
-	driver_remove_file(driverfs, &driver_attr_pollrate);
-	driver_remove_file(driverfs, &driver_attr_pollcounter);
-}
-
-static struct attribute_group dgap_tty_attribute_group = {
-	.name = NULL,
-	.attrs = dgap_sysfs_tty_entries,
-};
-
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
-{
-	int ret;
-
-	ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
-	if (ret)
-		return;
-
-	dev_set_drvdata(c, un);
-}
-
-static void dgap_remove_tty_sysfs(struct device *c)
-{
-	sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
-}
-
-/*
- * Create pr and tty device entries
- */
-static int dgap_tty_register_ports(struct board_t *brd)
-{
-	struct channel_t *ch;
-	int i;
-	int ret;
-
-	brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
-					GFP_KERNEL);
-	if (!brd->serial_ports)
-		return -ENOMEM;
-
-	brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
-					GFP_KERNEL);
-	if (!brd->printer_ports) {
-		ret = -ENOMEM;
-		goto free_serial_ports;
-	}
-
-	for (i = 0; i < brd->nasync; i++) {
-		tty_port_init(&brd->serial_ports[i]);
-		tty_port_init(&brd->printer_ports[i]);
-	}
-
-	ch = brd->channels[0];
-	for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
-		struct device *classp;
-
-		classp = tty_port_register_device(&brd->serial_ports[i],
-						  brd->serial_driver,
-						  i, NULL);
-
-		if (IS_ERR(classp)) {
-			ret = PTR_ERR(classp);
-			goto unregister_ttys;
-		}
-
-		dgap_create_tty_sysfs(&ch->ch_tun, classp);
-		ch->ch_tun.un_sysfs = classp;
-
-		classp = tty_port_register_device(&brd->printer_ports[i],
-						  brd->print_driver,
-						  i, NULL);
-
-		if (IS_ERR(classp)) {
-			ret = PTR_ERR(classp);
-			goto unregister_ttys;
-		}
-
-		dgap_create_tty_sysfs(&ch->ch_pun, classp);
-		ch->ch_pun.un_sysfs = classp;
-	}
-	dgap_create_ports_sysfiles(brd);
-
-	return 0;
-
-unregister_ttys:
-	while (i >= 0) {
-		ch = brd->channels[i];
-		if (ch->ch_tun.un_sysfs) {
-			dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
-			tty_unregister_device(brd->serial_driver, i);
-		}
-
-		if (ch->ch_pun.un_sysfs) {
-			dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
-			tty_unregister_device(brd->print_driver, i);
-		}
-		i--;
-	}
-
-	for (i = 0; i < brd->nasync; i++) {
-		tty_port_destroy(&brd->serial_ports[i]);
-		tty_port_destroy(&brd->printer_ports[i]);
-	}
-
-	kfree(brd->printer_ports);
-	brd->printer_ports = NULL;
-
-free_serial_ports:
-	kfree(brd->serial_ports);
-	brd->serial_ports = NULL;
-
-	return ret;
-}
-
-/*
- * dgap_cleanup_tty()
- *
- * Uninitialize the TTY portion of this driver.  Free all memory and
- * resources.
- */
-static void dgap_cleanup_tty(struct board_t *brd)
-{
-	struct device *dev;
-	unsigned int i;
-
-	for (i = 0; i < brd->nasync; i++) {
-		tty_port_destroy(&brd->serial_ports[i]);
-		dev = brd->channels[i]->ch_tun.un_sysfs;
-		dgap_remove_tty_sysfs(dev);
-		tty_unregister_device(brd->serial_driver, i);
-	}
-	tty_unregister_driver(brd->serial_driver);
-	put_tty_driver(brd->serial_driver);
-	kfree(brd->serial_ports);
-
-	for (i = 0; i < brd->nasync; i++) {
-		tty_port_destroy(&brd->printer_ports[i]);
-		dev = brd->channels[i]->ch_pun.un_sysfs;
-		dgap_remove_tty_sysfs(dev);
-		tty_unregister_device(brd->print_driver, i);
-	}
-	tty_unregister_driver(brd->print_driver);
-	put_tty_driver(brd->print_driver);
-	kfree(brd->printer_ports);
-}
-
-static int dgap_request_irq(struct board_t *brd)
-{
-	int rc;
-
-	if (!brd || brd->magic != DGAP_BOARD_MAGIC)
-		return -ENODEV;
-
-	/*
-	 * Set up our interrupt handler if we are set to do interrupts.
-	 */
-	if (dgap_config_get_useintr(brd) && brd->irq) {
-		rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
-
-		if (!rc)
-			brd->intr_used = 1;
-	}
-	return 0;
-}
-
-static void dgap_free_irq(struct board_t *brd)
-{
-	if (brd->intr_used && brd->irq)
-		free_irq(brd->irq, brd);
-}
-
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
-			      struct board_t *brd)
-{
-	const struct firmware *fw;
-	char *tmp_ptr;
-	int ret;
-	char *dgap_config_buf;
-
-	dgap_get_vpd(brd);
-	dgap_do_reset_board(brd);
-
-	if (fw_info[card_type].conf_name) {
-		ret = request_firmware(&fw, fw_info[card_type].conf_name,
-				       &pdev->dev);
-		if (ret) {
-			dev_err(&pdev->dev, "config file %s not found\n",
-				fw_info[card_type].conf_name);
-			return ret;
-		}
-
-		dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
-		if (!dgap_config_buf) {
-			release_firmware(fw);
-			return -ENOMEM;
-		}
-
-		memcpy(dgap_config_buf, fw->data, fw->size);
-		release_firmware(fw);
-
-		/*
-		 * preserve dgap_config_buf
-		 * as dgap_parsefile would
-		 * otherwise alter it.
-		 */
-		tmp_ptr = dgap_config_buf;
-
-		if (dgap_parsefile(&tmp_ptr) != 0) {
-			kfree(dgap_config_buf);
-			return -EINVAL;
-		}
-		kfree(dgap_config_buf);
-	}
-
-	/*
-	 * Match this board to a config the user created for us.
-	 */
-	brd->bd_config =
-		dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
-
-	/*
-	 * Because the 4 port Xr products share the same PCI ID
-	 * as the 8 port Xr products, if we receive a NULL config
-	 * back, and this is a PAPORT8 board, retry with a
-	 * PAPORT4 attempt as well.
-	 */
-	if (brd->type == PAPORT8 && !brd->bd_config)
-		brd->bd_config =
-			dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
-
-	if (!brd->bd_config) {
-		dev_err(&pdev->dev, "No valid configuration found\n");
-		return -EINVAL;
-	}
-
-	if (fw_info[card_type].bios_name) {
-		ret = request_firmware(&fw, fw_info[card_type].bios_name,
-				       &pdev->dev);
-		if (ret) {
-			dev_err(&pdev->dev, "bios file %s not found\n",
-				fw_info[card_type].bios_name);
-			return ret;
-		}
-		dgap_do_bios_load(brd, fw->data, fw->size);
-		release_firmware(fw);
-
-		/* Wait for BIOS to test board... */
-		ret = dgap_test_bios(brd);
-		if (ret)
-			return ret;
-	}
-
-	if (fw_info[card_type].fep_name) {
-		ret = request_firmware(&fw, fw_info[card_type].fep_name,
-				       &pdev->dev);
-		if (ret) {
-			dev_err(&pdev->dev, "dgap: fep file %s not found\n",
-				fw_info[card_type].fep_name);
-			return ret;
-		}
-		dgap_do_fep_load(brd, fw->data, fw->size);
-		release_firmware(fw);
-
-		/* Wait for FEP to load on board... */
-		ret = dgap_test_fep(brd);
-		if (ret)
-			return ret;
-	}
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-	/*
-	 * If this is a CX or EPCX, we need to see if the firmware
-	 * is requesting a concentrator image from us.
-	 */
-	if ((bd->type == PCX) || (bd->type == PEPC)) {
-		chk_addr = (u16 *)(vaddr + DOWNREQ);
-		/* Nonzero if FEP is requesting concentrator image. */
-		check = readw(chk_addr);
-		vaddr = brd->re_map_membase;
-	}
-
-	if (fw_info[card_type].con_name && check && vaddr) {
-		ret = request_firmware(&fw, fw_info[card_type].con_name,
-				       &pdev->dev);
-		if (ret) {
-			dev_err(&pdev->dev, "conc file %s not found\n",
-				fw_info[card_type].con_name);
-			return ret;
-		}
-		/* Put concentrator firmware loading code here */
-		offset = readw((u16 *)(vaddr + DOWNREQ));
-		memcpy_toio(offset, fw->data, fw->size);
-
-		dgap_do_conc_load(brd, (char *)fw->data, fw->size)
-		release_firmware(fw);
-	}
-#endif
-
-	return 0;
-}
-
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem.  Called once per board after board has been
- * downloaded and init'ed.
- */
-static int dgap_tty_init(struct board_t *brd)
-{
-	int i;
-	int tlw;
-	uint true_count;
-	u8 __iomem *vaddr;
-	u8 modem;
-	struct channel_t *ch;
-	struct bs_t __iomem *bs;
-	struct cm_t __iomem *cm;
-	int ret;
-
-	/*
-	 * Initialize board structure elements.
-	 */
-
-	vaddr = brd->re_map_membase;
-	true_count = readw((vaddr + NCHAN));
-
-	brd->nasync = dgap_config_get_num_prts(brd);
-
-	if (!brd->nasync)
-		brd->nasync = brd->maxports;
-
-	if (brd->nasync > brd->maxports)
-		brd->nasync = brd->maxports;
-
-	if (true_count != brd->nasync) {
-		dev_warn(&brd->pdev->dev,
-			 "%s configured for %d ports, has %d ports.\n",
-			 brd->name, brd->nasync, true_count);
-
-		if ((brd->type == PPCM) &&
-		    (true_count == 64 || true_count == 0)) {
-			dev_warn(&brd->pdev->dev,
-				 "Please make SURE the EBI cable running from the card\n");
-			dev_warn(&brd->pdev->dev,
-				 "to each EM module is plugged into EBI IN!\n");
-		}
-
-		brd->nasync = true_count;
-
-		/* If no ports, don't bother going any further */
-		if (!brd->nasync) {
-			brd->state = BOARD_FAILED;
-			brd->dpastatus = BD_NOFEP;
-			return -EIO;
-		}
-	}
-
-	/*
-	 * Allocate channel memory that might not have been allocated
-	 * when the driver was first loaded.
-	 */
-	for (i = 0; i < brd->nasync; i++) {
-		brd->channels[i] =
-			kzalloc(sizeof(struct channel_t), GFP_KERNEL);
-		if (!brd->channels[i]) {
-			ret = -ENOMEM;
-			goto free_chan;
-		}
-	}
-
-	ch = brd->channels[0];
-	vaddr = brd->re_map_membase;
-
-	bs = (struct bs_t __iomem *)((ulong)vaddr + CHANBUF);
-	cm = (struct cm_t __iomem *)((ulong)vaddr + CMDBUF);
-
-	brd->bd_bs = bs;
-
-	/* Set up channel variables */
-	for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
-		spin_lock_init(&ch->ch_lock);
-
-		/* Store all our magic numbers */
-		ch->magic = DGAP_CHANNEL_MAGIC;
-		ch->ch_tun.magic = DGAP_UNIT_MAGIC;
-		ch->ch_tun.un_type = DGAP_SERIAL;
-		ch->ch_tun.un_ch = ch;
-		ch->ch_tun.un_dev = i;
-
-		ch->ch_pun.magic = DGAP_UNIT_MAGIC;
-		ch->ch_pun.un_type = DGAP_PRINT;
-		ch->ch_pun.un_ch = ch;
-		ch->ch_pun.un_dev = i;
-
-		ch->ch_vaddr = vaddr;
-		ch->ch_bs = bs;
-		ch->ch_cm = cm;
-		ch->ch_bd = brd;
-		ch->ch_portnum = i;
-		ch->ch_digi = dgap_digi_init;
-
-		/*
-		 * Set up digi dsr and dcd bits based on altpin flag.
-		 */
-		if (dgap_config_get_altpin(brd)) {
-			ch->ch_dsr	= DM_CD;
-			ch->ch_cd	= DM_DSR;
-			ch->ch_digi.digi_flags |= DIGI_ALTPIN;
-		} else {
-			ch->ch_cd	= DM_CD;
-			ch->ch_dsr	= DM_DSR;
-		}
-
-		ch->ch_taddr = vaddr + (ioread16(&ch->ch_bs->tx_seg) << 4);
-		ch->ch_raddr = vaddr + (ioread16(&ch->ch_bs->rx_seg) << 4);
-		ch->ch_tx_win = 0;
-		ch->ch_rx_win = 0;
-		ch->ch_tsize = readw(&ch->ch_bs->tx_max) + 1;
-		ch->ch_rsize = readw(&ch->ch_bs->rx_max) + 1;
-		ch->ch_tstart = 0;
-		ch->ch_rstart = 0;
-
-		/*
-		 * Set queue water marks, interrupt mask,
-		 * and general tty parameters.
-		 */
-		tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
-						ch->ch_tsize / 2;
-		ch->ch_tlw = tlw;
-
-		dgap_cmdw(ch, STLOW, tlw, 0);
-
-		dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
-
-		dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
-
-		ch->ch_mistat = readb(&ch->ch_bs->m_stat);
-
-		init_waitqueue_head(&ch->ch_flags_wait);
-		init_waitqueue_head(&ch->ch_tun.un_flags_wait);
-		init_waitqueue_head(&ch->ch_pun.un_flags_wait);
-
-		/* Turn on all modem interrupts for now */
-		modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
-		writeb(modem, &ch->ch_bs->m_int);
-
-		/*
-		 * Set edelay to 0 if interrupts are turned on,
-		 * otherwise set edelay to the usual 100.
-		 */
-		if (brd->intr_used)
-			writew(0, &ch->ch_bs->edelay);
-		else
-			writew(100, &ch->ch_bs->edelay);
-
-		writeb(1, &ch->ch_bs->idata);
-	}
-
-	return 0;
-
-free_chan:
-	while (--i >= 0) {
-		kfree(brd->channels[i]);
-		brd->channels[i] = NULL;
-	}
-	return ret;
-}
-
-/*
- * dgap_tty_free()
- *
- * Free the channles which are allocated in dgap_tty_init().
- */
-static void dgap_tty_free(struct board_t *brd)
-{
-	int i;
-
-	for (i = 0; i < brd->nasync; i++)
-		kfree(brd->channels[i]);
-}
-
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	int rc;
-	struct board_t *brd;
-
-	if (dgap_numboards >= MAXBOARDS)
-		return -EPERM;
-
-	rc = pci_enable_device(pdev);
-	if (rc)
-		return -EIO;
-
-	brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
-	if (IS_ERR(brd))
-		return PTR_ERR(brd);
-
-	rc = dgap_firmware_load(pdev, ent->driver_data, brd);
-	if (rc)
-		goto cleanup_brd;
-
-	rc = dgap_alloc_flipbuf(brd);
-	if (rc)
-		goto cleanup_brd;
-
-	rc = dgap_tty_register(brd);
-	if (rc)
-		goto free_flipbuf;
-
-	rc = dgap_request_irq(brd);
-	if (rc)
-		goto unregister_tty;
-
-	/*
-	 * Do tty device initialization.
-	 */
-	rc = dgap_tty_init(brd);
-	if (rc < 0)
-		goto free_irq;
-
-	rc = dgap_tty_register_ports(brd);
-	if (rc)
-		goto tty_free;
-
-	brd->state = BOARD_READY;
-	brd->dpastatus = BD_RUNNING;
-
-	dgap_board[dgap_numboards++] = brd;
-
-	return 0;
-
-tty_free:
-	dgap_tty_free(brd);
-free_irq:
-	dgap_free_irq(brd);
-unregister_tty:
-	dgap_tty_unregister(brd);
-free_flipbuf:
-	dgap_free_flipbuf(brd);
-cleanup_brd:
-	dgap_cleanup_nodes();
-	dgap_unmap(brd);
-	kfree(brd);
-
-	return rc;
-}
-
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
-{
-	unsigned int i;
-
-	if (!brd || brd->magic != DGAP_BOARD_MAGIC)
-		return;
-
-	dgap_free_irq(brd);
-
-	tasklet_kill(&brd->helper_tasklet);
-
-	dgap_unmap(brd);
-
-	/* Free all allocated channels structs */
-	for (i = 0; i < MAXPORTS ; i++)
-		kfree(brd->channels[i]);
-
-	kfree(brd->flipbuf);
-	kfree(brd->flipflagbuf);
-
-	dgap_board[brd->boardnum] = NULL;
-
-	kfree(brd);
-}
-
-static void dgap_stop(bool removesys, struct pci_driver *drv)
-{
-	unsigned long lock_flags;
-
-	spin_lock_irqsave(&dgap_poll_lock, lock_flags);
-	dgap_poll_stop = 1;
-	spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
-	del_timer_sync(&dgap_poll_timer);
-	if (removesys)
-		dgap_remove_driver_sysfiles(drv);
-
-	device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
-	class_destroy(dgap_class);
-	unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
-}
-
-static void dgap_remove_one(struct pci_dev *dev)
-{
-	unsigned int i;
-	struct pci_driver *drv = to_pci_driver(dev->dev.driver);
-
-	dgap_stop(true, drv);
-	for (i = 0; i < dgap_numboards; ++i) {
-		dgap_remove_ports_sysfiles(dgap_board[i]);
-		dgap_cleanup_tty(dgap_board[i]);
-		dgap_cleanup_board(dgap_board[i]);
-	}
-
-	dgap_cleanup_nodes();
-}
-
-static struct pci_driver dgap_driver = {
-	.name		= "dgap",
-	.probe		= dgap_init_one,
-	.id_table	= dgap_pci_tbl,
-	.remove		= dgap_remove_one,
-};
-
-/*
- * Start of driver.
- */
-static int dgap_start(void)
-{
-	int rc;
-	unsigned long flags;
-	struct device *device;
-
-	dgap_numboards = 0;
-
-	pr_info("For the tools package please visit http://www.digi.com\n");
-
-	/*
-	 * Register our base character device into the kernel.
-	 */
-
-	/*
-	 * Register management/dpa devices
-	 */
-	rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
-	if (rc < 0)
-		return rc;
-
-	dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
-	if (IS_ERR(dgap_class)) {
-		rc = PTR_ERR(dgap_class);
-		goto failed_class;
-	}
-
-	device = device_create(dgap_class, NULL,
-			       MKDEV(DIGI_DGAP_MAJOR, 0),
-			       NULL, "dgap_mgmt");
-	if (IS_ERR(device)) {
-		rc = PTR_ERR(device);
-		goto failed_device;
-	}
-
-	/* Start the poller */
-	spin_lock_irqsave(&dgap_poll_lock, flags);
-	setup_timer(&dgap_poll_timer, dgap_poll_handler, 0);
-	dgap_poll_timer.data = 0;
-	dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
-	dgap_poll_timer.expires = dgap_poll_time;
-	spin_unlock_irqrestore(&dgap_poll_lock, flags);
-
-	add_timer(&dgap_poll_timer);
-
-	return rc;
-
-failed_device:
-	class_destroy(dgap_class);
-failed_class:
-	unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
-	return rc;
-}
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-/*
- * init_module()
- *
- * Module load.  This is where it all starts.
- */
-static int dgap_init_module(void)
-{
-	int rc;
-
-	pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
-
-	rc = dgap_start();
-	if (rc)
-		return rc;
-
-	rc = pci_register_driver(&dgap_driver);
-	if (rc) {
-		dgap_stop(false, NULL);
-		return rc;
-	}
-
-	rc = dgap_create_driver_sysfiles(&dgap_driver);
-	if (rc)
-		goto err_unregister;
-
-	dgap_driver_state = DRIVER_READY;
-
-	return 0;
-
-err_unregister:
-	pci_unregister_driver(&dgap_driver);
-	return rc;
-}
-
-/*
- * dgap_cleanup_module()
- *
- * Module unload.  This is where it all ends.
- */
-static void dgap_cleanup_module(void)
-{
-	if (dgap_numboards)
-		pci_unregister_driver(&dgap_driver);
-}
-
-module_init(dgap_init_module);
-module_exit(dgap_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
deleted file mode 100644
index c84dbf2..0000000
--- a/drivers/staging/dgap/dgap.h
+++ /dev/null
@@ -1,1229 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- *      Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.  See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *	NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * Driver includes
- *
- *************************************************************************/
-
-#ifndef __DGAP_DRIVER_H
-#define __DGAP_DRIVER_H
-
-#include <linux/types.h>        /* To pick up the varions Linux types */
-#include <linux/tty.h>          /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h>    /* For irqreturn_t type */
-
-#ifndef TRUE
-# define TRUE 1
-#endif
-
-#ifndef FALSE
-# define FALSE 0
-#endif
-
-#if !defined(TTY_FLIPBUF_SIZE)
-# define TTY_FLIPBUF_SIZE 512
-#endif
-
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
-
-/*
- * Driver identification
- */
-#define	DG_NAME		"dgap-1.3-16"
-#define	DG_PART		"40002347_C"
-#define	DRVSTR		"dgap"
-
-/*
- * defines from dgap_pci.h
- */
-#define PCIMAX 32			/* maximum number of PCI boards */
-
-#define DIGI_VID		0x114F
-
-#define PCI_DEV_EPC_DID		0x0002
-#define PCI_DEV_XEM_DID		0x0004
-#define PCI_DEV_XR_DID		0x0005
-#define PCI_DEV_CX_DID		0x0006
-#define PCI_DEV_XRJ_DID		0x0009	/* PLX-based Xr adapter */
-#define PCI_DEV_XR_IBM_DID	0x0011	/* IBM 8-port Async Adapter */
-#define PCI_DEV_XR_BULL_DID	0x0013	/* BULL 8-port Async Adapter */
-#define PCI_DEV_XR_SAIP_DID	0x001c	/* SAIP card - Xr adapter */
-#define PCI_DEV_XR_422_DID	0x0012	/* Xr-422 */
-#define PCI_DEV_920_2_DID	0x0034	/* XR-Plus 920 K, 2 port */
-#define PCI_DEV_920_4_DID	0x0026	/* XR-Plus 920 K, 4 port */
-#define PCI_DEV_920_8_DID	0x0027	/* XR-Plus 920 K, 8 port */
-#define PCI_DEV_EPCJ_DID	0x000a	/* PLX 9060 chip for PCI  */
-#define PCI_DEV_CX_IBM_DID	0x001b	/* IBM 128-port Async Adapter */
-#define PCI_DEV_920_8_HP_DID	0x0058	/* HP XR-Plus 920 K, 8 port */
-#define PCI_DEV_XEM_HP_DID	0x0059  /* HP Xem PCI */
-
-#define PCI_DEV_XEM_NAME	"AccelePort XEM"
-#define PCI_DEV_CX_NAME		"AccelePort CX"
-#define PCI_DEV_XR_NAME		"AccelePort Xr"
-#define PCI_DEV_XRJ_NAME	"AccelePort Xr (PLX)"
-#define PCI_DEV_XR_SAIP_NAME	"AccelePort Xr (SAIP)"
-#define PCI_DEV_920_2_NAME	"AccelePort Xr920 2 port"
-#define PCI_DEV_920_4_NAME	"AccelePort Xr920 4 port"
-#define PCI_DEV_920_8_NAME	"AccelePort Xr920 8 port"
-#define PCI_DEV_XR_422_NAME	"AccelePort Xr 422"
-#define PCI_DEV_EPCJ_NAME	"AccelePort EPC (PLX)"
-#define PCI_DEV_XR_BULL_NAME	"AccelePort Xr (BULL)"
-#define PCI_DEV_XR_IBM_NAME	"AccelePort Xr (IBM)"
-#define PCI_DEV_CX_IBM_NAME	"AccelePort CX (IBM)"
-#define PCI_DEV_920_8_HP_NAME	"AccelePort Xr920 8 port (HP)"
-#define PCI_DEV_XEM_HP_NAME	"AccelePort XEM (HP)"
-
-/*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space.  The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
-
-/* Potential location of PCI Bios from E0000 to FFFFF*/
-#define PCI_BIOS_SIZE		0x00020000
-
-/* Size of Memory and I/O for PCI (4MB) */
-#define PCI_RAM_SIZE		0x00400000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE		0x00200000
-
-/* Max PCI Window Size (2MB) */
-#define PCI_WIN_SIZE		0x00200000
-
-#define PCI_WIN_SHIFT		21 /* 21 bits max */
-
-/* Offset of I/0 in Memory (2MB) */
-#define PCI_IO_OFFSET		0x00200000
-
-/* Size of IO (2MB) */
-#define PCI_IO_SIZE_DGAP	0x00200000
-
-/* Number of boards we support at once. */
-#define	MAXBOARDS	32
-#define	MAXPORTS	224
-#define MAXTTYNAMELEN	200
-
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGAP_BOARD_MAGIC	0x5c6df104
-#define DGAP_CHANNEL_MAGIC	0x6c6df104
-#define DGAP_UNIT_MAGIC		0x7c6df104
-
-/* Serial port types */
-#define DGAP_SERIAL		0
-#define DGAP_PRINT		1
-
-#define	SERIAL_TYPE_NORMAL	1
-
-/* 4 extra for alignment play space */
-#define WRITEBUFLEN		((4096) + 4)
-#define MYFLIPLEN		N_TTY_BUF_SIZE
-
-#define SBREAK_TIME 0x25
-#define U2BSIZE 0x400
-
-#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
-
-/*
- * Our major for the mgmt devices.
- *
- * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
- * 22 has now become obsolete now that the "cu" devices have
- * been removed from 2.6.
- * Also, this *IS* the epca driver, just PCI only now.
- */
-#ifndef DIGI_DGAP_MAJOR
-# define DIGI_DGAP_MAJOR         22
-#endif
-
-/*
- * The parameters we use to define the periods of the moving averages.
- */
-#define		MA_PERIOD	(HZ / 10)
-#define		SMA_DUR		(1 * HZ)
-#define		EMA_DUR		(1 * HZ)
-#define		SMA_NPERIODS	(SMA_DUR / MA_PERIOD)
-#define		EMA_NPERIODS	(EMA_DUR / MA_PERIOD)
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.  This is the same structure that is defined
- * as the default in tty_io.c with the same settings overridden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define	DEFAULT_IFLAGS	(ICRNL | IXON)
-#define	DEFAULT_OFLAGS	(OPOST | ONLCR)
-#define	DEFAULT_CFLAGS	(B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define	DEFAULT_LFLAGS	(ISIG | ICANON | ECHO | ECHOE | ECHOK | \
-			ECHOCTL | ECHOKE | IEXTEN)
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE ('\0')
-#endif
-
-#define SNIFF_MAX	65536		/* Sniff buffer size (2^n) */
-#define SNIFF_MASK	(SNIFF_MAX - 1)	/* Sniff wrap mask */
-
-#define VPDSIZE (512)
-
-/************************************************************************
- *      FEP memory offsets
- ************************************************************************/
-#define START           0x0004L         /* Execution start address      */
-
-#define CMDBUF          0x0d10L         /* Command (cm_t) structure offset */
-#define CMDSTART        0x0400L         /* Start of command buffer      */
-#define CMDMAX          0x0800L         /* End of command buffer        */
-
-#define EVBUF           0x0d18L         /* Event (ev_t) structure       */
-#define EVSTART         0x0800L         /* Start of event buffer        */
-#define EVMAX           0x0c00L         /* End of event buffer          */
-#define FEP5_PLUS       0x0E40          /* ASCII '5' and ASCII 'A' is here  */
-#define ECS_SEG         0x0E44          /* Segment of the extended      */
-					/* channel structure            */
-#define LINE_SPEED      0x10            /* Offset into ECS_SEG for line */
-					/* speed if the fep has extended */
-					/* capabilities                 */
-
-/* BIOS MAGIC SPOTS */
-#define ERROR           0x0C14L		/* BIOS error code              */
-#define SEQUENCE	0x0C12L		/* BIOS sequence indicator      */
-#define POSTAREA	0x0C00L		/* POST complete message area   */
-
-/* FEP MAGIC SPOTS */
-#define FEPSTAT         POSTAREA        /* OS here when FEP comes up    */
-#define NCHAN           0x0C02L         /* number of ports FEP sees     */
-#define PANIC           0x0C10L         /* PANIC area for FEP           */
-#define KMEMEM          0x0C30L         /* Memory for KME use           */
-#define CONFIG          0x0CD0L         /* Concentrator configuration info */
-#define CONFIGSIZE      0x0030          /* configuration info size      */
-#define DOWNREQ         0x0D00          /* Download request buffer pointer */
-
-#define CHANBUF         0x1000L         /* Async channel (bs_t) structs */
-#define FEPOSSIZE       0x1FFF          /* 8K FEPOS                     */
-
-#define XEMPORTS    0xC02	/*
-				 * Offset in board memory where FEP5 stores
-				 * how many ports it has detected.
-				 * NOTE: FEP5 reports 64 ports when the user
-				 * has the cable in EBI OUT instead of EBI IN.
-				 */
-
-#define FEPCLR      0x00
-#define FEPMEM      0x02
-#define FEPRST      0x04
-#define FEPINT      0x08
-#define FEPMASK     0x0e
-#define FEPWIN      0x80
-
-#define LOWMEM      0x0100
-#define HIGHMEM     0x7f00
-
-#define FEPTIMEOUT 200000
-
-#define ENABLE_INTR	0x0e04		/* Enable interrupts flag */
-#define FEPPOLL_MIN	1		/* minimum of 1 millisecond */
-#define FEPPOLL_MAX	20		/* maximum of 20 milliseconds */
-#define FEPPOLL		0x0c26		/* Fep event poll interval */
-
-#define	IALTPIN		0x0080		/* Input flag to swap DSR <-> DCD */
-
-/************************************************************************
- * FEP supported functions
- ************************************************************************/
-#define SRLOW		0xe0		/* Set receive low water	*/
-#define SRHIGH		0xe1		/* Set receive high water	*/
-#define FLUSHTX		0xe2		/* Flush transmit buffer	*/
-#define PAUSETX		0xe3		/* Pause data transmission	*/
-#define RESUMETX	0xe4		/* Resume data transmission	*/
-#define SMINT		0xe5		/* Set Modem Interrupt		*/
-#define SAFLOWC		0xe6		/* Set Aux. flow control chars	*/
-#define SBREAK		0xe8		/* Send break			*/
-#define SMODEM		0xe9		/* Set 8530 modem control lines	*/
-#define SIFLAG		0xea		/* Set UNIX iflags		*/
-#define SFLOWC		0xeb		/* Set flow control characters	*/
-#define STLOW		0xec		/* Set transmit low water mark	*/
-#define RPAUSE		0xee		/* Pause receive		*/
-#define RRESUME		0xef		/* Resume receive		*/
-#define CHRESET		0xf0		/* Reset Channel		*/
-#define BUFSETALL	0xf2		/* Set Tx & Rx buffer size avail*/
-#define SOFLAG		0xf3		/* Set UNIX oflags		*/
-#define SHFLOW		0xf4		/* Set hardware handshake	*/
-#define SCFLAG		0xf5		/* Set UNIX cflags		*/
-#define SVNEXT		0xf6		/* Set VNEXT character		*/
-#define SPINTFC		0xfc		/* Reserved			*/
-#define SCOMMODE	0xfd		/* Set RS232/422 mode		*/
-
-/************************************************************************
- *	Modes for SCOMMODE
- ************************************************************************/
-#define MODE_232	0x00
-#define MODE_422	0x01
-
-/************************************************************************
- *      Event flags.
- ************************************************************************/
-#define IFBREAK         0x01            /* Break received               */
-#define IFTLW           0x02            /* Transmit low water           */
-#define IFTEM           0x04            /* Transmitter empty            */
-#define IFDATA          0x08            /* Receive data present         */
-#define IFMODEM         0x20            /* Modem status change          */
-
-/************************************************************************
- *      Modem flags
- ************************************************************************/
-#       define  DM_RTS          0x02    /* Request to send              */
-#       define  DM_CD           0x80    /* Carrier detect               */
-#       define  DM_DSR          0x20    /* Data set ready               */
-#       define  DM_CTS          0x10    /* Clear to send                */
-#       define  DM_RI           0x40    /* Ring indicator               */
-#       define  DM_DTR          0x01    /* Data terminal ready          */
-
-/*
- * defines from dgap_conf.h
- */
-#define NULLNODE 0		/* header node, not used */
-#define BNODE 1			/* Board node */
-#define LNODE 2			/* Line node */
-#define CNODE 3			/* Concentrator node */
-#define MNODE 4			/* EBI Module node */
-#define TNODE 5			/* tty name prefix node */
-#define	CUNODE 6		/* cu name prefix (non-SCO) */
-#define PNODE 7			/* trans. print prefix node */
-#define JNODE 8			/* maJor number node */
-#define ANODE 9			/* altpin */
-#define	TSNODE 10		/* tty structure size */
-#define CSNODE 11		/* channel structure size */
-#define BSNODE 12		/* board structure size */
-#define USNODE 13		/* unit schedule structure size */
-#define FSNODE 14		/* f2200 structure size */
-#define VSNODE 15		/* size of VPIX structures */
-#define INTRNODE 16		/* enable interrupt */
-
-/* Enumeration of tokens */
-#define	BEGIN	1
-#define	END	2
-#define	BOARD	10
-
-#define EPCFS	11 /* start of EPC family definitions */
-#define	ICX		11
-#define	MCX		13
-#define PCX	14
-#define	IEPC	15
-#define	EEPC	16
-#define	MEPC	17
-#define	IPCM	18
-#define	EPCM	19
-#define	MPCM	20
-#define PEPC	21
-#define PPCM	22
-#ifdef CP
-#define ICP     23
-#define ECP     24
-#define MCP     25
-#endif
-#define EPCFE	25 /* end of EPC family definitions */
-#define	PC2E	26
-#define	PC4E	27
-#define	PC4E8K	28
-#define	PC8E	29
-#define	PC8E8K	30
-#define	PC16E	31
-#define MC2E8K  34
-#define MC4E8K  35
-#define MC8E8K  36
-
-#define AVANFS	42	/* start of Avanstar family definitions */
-#define A8P	42
-#define A16P	43
-#define AVANFE	43	/* end of Avanstar family definitions */
-
-#define DA2000FS	44 /* start of AccelePort 2000 family definitions */
-#define DA22		44 /* AccelePort 2002 */
-#define DA24		45 /* AccelePort 2004 */
-#define DA28		46 /* AccelePort 2008 */
-#define DA216		47 /* AccelePort 2016 */
-#define DAR4		48 /* AccelePort RAS 4 port */
-#define DAR8		49 /* AccelePort RAS 8 port */
-#define DDR24		50 /* DataFire RAS 24 port */
-#define DDR30		51 /* DataFire RAS 30 port */
-#define DDR48		52 /* DataFire RAS 48 port */
-#define DDR60		53 /* DataFire RAS 60 port */
-#define DA2000FE	53 /* end of AccelePort 2000/RAS family definitions */
-
-#define PCXRFS	106	/* start of PCXR family definitions */
-#define	APORT4	106
-#define	APORT8	107
-#define PAPORT4 108
-#define PAPORT8 109
-#define APORT4_920I	110
-#define APORT8_920I	111
-#define APORT4_920P	112
-#define APORT8_920P	113
-#define APORT2_920P 114
-#define PCXRFE	117	/* end of PCXR family definitions */
-
-#define	LINE	82
-#ifdef T1
-#define T1M	83
-#define E1M	84
-#endif
-#define	CONC	64
-#define	CX	65
-#define	EPC	66
-#define	MOD	67
-#define	PORTS	68
-#define METHOD	69
-#define CUSTOM	70
-#define BASIC	71
-#define STATUS	72
-#define MODEM	73
-/* The following tokens can appear in multiple places */
-#define	SPEED	74
-#define	NPORTS	75
-#define	ID	76
-#define CABLE	77
-#define CONNECT	78
-#define	MEM	80
-#define DPSZ	81
-
-#define	TTYN	90
-#define	CU	91
-#define	PRINT	92
-#define	XPRINT	93
-#define CMAJOR   94
-#define ALTPIN  95
-#define STARTO 96
-#define USEINTR  97
-#define PCIINFO  98
-
-#define	TTSIZ	100
-#define	CHSIZ	101
-#define BSSIZ	102
-#define	UNTSIZ	103
-#define	F2SIZ	104
-#define	VPSIZ	105
-
-#define	TOTAL_BOARD	2
-#define	CURRENT_BRD	4
-#define	BOARD_TYPE	6
-#define	IO_ADDRESS	8
-#define	MEM_ADDRESS	10
-
-#define	FIELDS_PER_PAGE	18
-
-#define TB_FIELD	1
-#define CB_FIELD	3
-#define BT_FIELD	5
-#define IO_FIELD	7
-#define ID_FIELD	8
-#define ME_FIELD	9
-#define TTY_FIELD	11
-#define CU_FIELD	13
-#define PR_FIELD	15
-#define MPR_FIELD	17
-
-#define	MAX_FIELD	512
-
-#define	INIT		0
-#define	NITEMS		128
-#define MAX_ITEM	512
-
-#define	DSCRINST	1
-#define	DSCRNUM		3
-#define	ALTPINQ		5
-#define	SSAVE		7
-
-#define	DSCR		"32"
-#define	ONETONINE	"123456789"
-#define	ALL		"1234567890"
-
-/*
- * All the possible states the driver can be while being loaded.
- */
-enum {
-	DRIVER_INITIALIZED = 0,
-	DRIVER_READY
-};
-
-/*
- * All the possible states the board can be while booting up.
- */
-enum {
-	BOARD_FAILED = 0,
-	BOARD_READY
-};
-
-/*
- * All the possible states that a requested concentrator image can be in.
- */
-enum {
-	NO_PENDING_CONCENTRATOR_REQUESTS = 0,
-	NEED_CONCENTRATOR,
-	REQUESTED_CONCENTRATOR
-};
-
-/*
- * Modem line constants are defined as macros because DSR and
- * DCD are swapable using the ditty altpin option.
- */
-#define D_CD(ch)        ch->ch_cd       /* Carrier detect       */
-#define D_DSR(ch)       ch->ch_dsr      /* Data set ready       */
-#define D_RTS(ch)       DM_RTS          /* Request to send      */
-#define D_CTS(ch)       DM_CTS          /* Clear to send        */
-#define D_RI(ch)        DM_RI           /* Ring indicator       */
-#define D_DTR(ch)       DM_DTR          /* Data terminal ready  */
-
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
-
-/*
- * A structure to hold a statistics counter.  We also
- * compute moving averages for this counter.
- */
-struct macounter {
-	u32		cnt;	/* Total count */
-	ulong		accum;	/* Acuumulator per period */
-	ulong		sma;	/* Simple moving average */
-	ulong		ema;	/* Exponential moving average */
-};
-
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
-#define	BD_FEP5PLUS	0x0001          /* Supports FEP5 Plus commands */
-#define BD_HAS_VPD	0x0002		/* Board has VPD info available */
-
-/*
- *	Per-board information
- */
-struct board_t {
-	int		magic;		/* Board Magic number.  */
-	int		boardnum;	/* Board number: 0-3 */
-
-	int		type;		/* Type of board */
-	char		*name;		/* Product Name */
-	struct pci_dev	*pdev;		/* Pointer to the pci_dev struct */
-	u16		vendor;		/* PCI vendor ID */
-	u16		device;		/* PCI device ID */
-	u16		subvendor;	/* PCI subsystem vendor ID */
-	u16		subdevice;	/* PCI subsystem device ID */
-	u8		rev;		/* PCI revision ID */
-	uint		pci_bus;	/* PCI bus value */
-	uint		pci_slot;	/* PCI slot value */
-	u16		maxports;	/* MAX ports this board can handle */
-	u8		vpd[VPDSIZE];	/* VPD of board, if found */
-	u32		bd_flags;	/* Board flags */
-
-	spinlock_t	bd_lock;	/* Used to protect board */
-
-	u32		state;		/* State of card. */
-	wait_queue_head_t state_wait;	/* Place to sleep on for state change */
-
-	struct		tasklet_struct helper_tasklet; /* Poll helper tasklet */
-
-	u32		wait_for_bios;
-	u32		wait_for_fep;
-
-	struct cnode    *bd_config;	/* Config of board */
-
-	u16		nasync;		/* Number of ports on card */
-
-	ulong		irq;		/* Interrupt request number */
-	ulong		intr_count;	/* Count of interrupts */
-	u32		intr_used;	/* Non-zero if using interrupts */
-	u32		intr_running;	/* Non-zero if FEP knows its doing */
-					/* interrupts */
-
-	ulong		port;		/* Start of base io port of the card */
-	ulong		port_end;	/* End of base io port of the card */
-	ulong		membase;	/* Start of base memory of the card */
-	ulong		membase_end;	/* End of base memory of the card */
-
-	u8 __iomem	*re_map_port;	/* Remapped io port of the card */
-	u8 __iomem	*re_map_membase;/* Remapped memory of the card */
-
-	u8		inhibit_poller; /* Tells the poller to leave us alone */
-
-	struct channel_t *channels[MAXPORTS]; /* array of pointers to our */
-					      /* channels.                */
-
-	struct tty_driver	*serial_driver;
-	struct tty_port *serial_ports;
-	char		serial_name[200];
-	struct tty_driver	*print_driver;
-	struct tty_port *printer_ports;
-	char		print_name[200];
-
-	struct bs_t __iomem *bd_bs;	/* Base structure pointer         */
-
-	char	*flipbuf;		/* Our flip buffer, alloced if    */
-					/* board is found                 */
-	char	*flipflagbuf;		/* Our flip flag buffer, alloced  */
-					/* if board is found              */
-
-	u16		dpatype;	/* The board "type", as defined   */
-					/* by DPA                         */
-	u16		dpastatus;	/* The board "status", as defined */
-					/* by DPA                         */
-
-	u32		conc_dl_status;	/* Status of any pending conc     */
-					/* download                       */
-};
-
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN	0x0001		/* Device is open		*/
-#define UN_CLOSING	0x0002		/* Line is being closed		*/
-#define UN_IMM		0x0004		/* Service immediately		*/
-#define UN_BUSY		0x0008		/* Some work this channel	*/
-#define UN_BREAKI	0x0010		/* Input break received		*/
-#define UN_PWAIT	0x0020		/* Printer waiting for terminal	*/
-#define UN_TIME		0x0040		/* Waiting on time		*/
-#define UN_EMPTY	0x0080		/* Waiting output queue empty	*/
-#define UN_LOW		0x0100		/* Waiting output low water mark*/
-#define UN_EXCL_OPEN	0x0200		/* Open for exclusive use	*/
-#define UN_WOPEN	0x0400		/* Device waiting for open	*/
-#define UN_WIOCTL	0x0800		/* Device waiting for open	*/
-#define UN_HANGUP	0x8000		/* Carrier lost			*/
-
-struct device;
-
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
-struct un_t {
-	int	magic;		/* Unit Magic Number.			*/
-	struct	channel_t *un_ch;
-	u32	un_time;
-	u32	un_type;
-	int	un_open_count;	/* Counter of opens to port		*/
-	struct tty_struct *un_tty;/* Pointer to unit tty structure	*/
-	u32	un_flags;	/* Unit flags				*/
-	wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
-	u32	un_dev;		/* Minor device number			*/
-	tcflag_t un_oflag;	/* oflags being done on board		*/
-	tcflag_t un_lflag;	/* lflags being done on board		*/
-	struct device *un_sysfs;
-};
-
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON         0x0001          /* Printer on string                */
-#define CH_OUT          0x0002          /* Dial-out device open             */
-#define CH_STOP         0x0004          /* Output is stopped                */
-#define CH_STOPI        0x0008          /* Input is stopped                 */
-#define CH_CD           0x0010          /* Carrier is present               */
-#define CH_FCAR         0x0020          /* Carrier forced on                */
-
-#define CH_RXBLOCK      0x0080          /* Enable rx blocked flag           */
-#define CH_WLOW         0x0100          /* Term waiting low event           */
-#define CH_WEMPTY       0x0200          /* Term waiting empty event         */
-#define CH_RENABLE      0x0400          /* Buffer just emptied          */
-#define CH_RACTIVE      0x0800          /* Process active in xxread()   */
-#define CH_RWAIT        0x1000          /* Process waiting in xxread()  */
-#define CH_BAUD0	0x2000		/* Used for checking B0 transitions */
-#define CH_HANGUP       0x8000		/* Hangup received                  */
-
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN	0x1
-#define SNIFF_WAIT_DATA	0x2
-#define SNIFF_WAIT_SPACE 0x4
-
-/************************************************************************
- ***	Definitions for Digi ditty(1) command.
- ************************************************************************/
-
-/************************************************************************
- * This module provides application access to special Digi
- * serial line enhancements which are not standard UNIX(tm) features.
- ************************************************************************/
-
-#if !defined(TIOCMODG)
-
-#define	TIOCMODG	(('d'<<8) | 250)	/* get modem ctrl state	*/
-#define	TIOCMODS	(('d'<<8) | 251)	/* set modem ctrl state	*/
-
-#ifndef TIOCM_LE
-#define		TIOCM_LE	0x01		/* line enable		*/
-#define		TIOCM_DTR	0x02		/* data terminal ready	*/
-#define		TIOCM_RTS	0x04		/* request to send	*/
-#define		TIOCM_ST	0x08		/* secondary transmit	*/
-#define		TIOCM_SR	0x10		/* secondary receive	*/
-#define		TIOCM_CTS	0x20		/* clear to send	*/
-#define		TIOCM_CAR	0x40		/* carrier detect	*/
-#define		TIOCM_RNG	0x80		/* ring	indicator	*/
-#define		TIOCM_DSR	0x100		/* data set ready	*/
-#define		TIOCM_RI	TIOCM_RNG	/* ring (alternate)	*/
-#define		TIOCM_CD	TIOCM_CAR	/* carrier detect (alt)	*/
-#endif
-
-#endif
-
-#if !defined(TIOCMSET)
-#define	TIOCMSET	(('d'<<8) | 252)	/* set modem ctrl state	*/
-#define	TIOCMGET	(('d'<<8) | 253)	/* set modem ctrl state	*/
-#endif
-
-#if !defined(TIOCMBIC)
-#define	TIOCMBIC	(('d'<<8) | 254)	/* set modem ctrl state */
-#define	TIOCMBIS	(('d'<<8) | 255)	/* set modem ctrl state */
-#endif
-
-#if !defined(TIOCSDTR)
-#define	TIOCSDTR	(('e'<<8) | 0)		/* set DTR		*/
-#define	TIOCCDTR	(('e'<<8) | 1)		/* clear DTR		*/
-#endif
-
-/************************************************************************
- * Ioctl command arguments for DIGI parameters.
- ************************************************************************/
-#define DIGI_GETA	(('e'<<8) | 94)		/* Read params		*/
-
-#define DIGI_SETA	(('e'<<8) | 95)		/* Set params		*/
-#define DIGI_SETAW	(('e'<<8) | 96)		/* Drain & set params	*/
-#define DIGI_SETAF	(('e'<<8) | 97)		/* Drain, flush & set params */
-
-#define DIGI_KME	(('e'<<8) | 98)		/* Read/Write Host	*/
-						/* Adapter Memory	*/
-
-#define	DIGI_GETFLOW	(('e'<<8) | 99)		/* Get startc/stopc flow */
-						/* control characters    */
-#define	DIGI_SETFLOW	(('e'<<8) | 100)	/* Set startc/stopc flow */
-						/* control characters	 */
-#define	DIGI_GETAFLOW	(('e'<<8) | 101)	/* Get Aux. startc/stopc */
-						/* flow control chars    */
-#define	DIGI_SETAFLOW	(('e'<<8) | 102)	/* Set Aux. startc/stopc */
-						/* flow control chars	 */
-
-#define DIGI_GEDELAY	(('d'<<8) | 246)	/* Get edelay */
-#define DIGI_SEDELAY	(('d'<<8) | 247)	/* Set edelay */
-
-struct	digiflow_t {
-	unsigned char	startc;			/* flow cntl start char	*/
-	unsigned char	stopc;			/* flow cntl stop char	*/
-};
-
-#ifdef	FLOW_2200
-#define	F2200_GETA	(('e'<<8) | 104)	/* Get 2x36 flow cntl flags */
-#define	F2200_SETAW	(('e'<<8) | 105)	/* Set 2x36 flow cntl flags */
-#define		F2200_MASK	0x03		/* 2200 flow cntl bit mask  */
-#define		FCNTL_2200	0x01		/* 2x36 terminal flow cntl  */
-#define		PCNTL_2200	0x02		/* 2x36 printer flow cntl   */
-#define	F2200_XON	0xf8
-#define	P2200_XON	0xf9
-#define	F2200_XOFF	0xfa
-#define	P2200_XOFF	0xfb
-
-#define	FXOFF_MASK	0x03			/* 2200 flow status mask    */
-#define	RCVD_FXOFF	0x01			/* 2x36 Terminal XOFF rcvd  */
-#define	RCVD_PXOFF	0x02			/* 2x36 Printer XOFF rcvd   */
-#endif
-
-/************************************************************************
- * Values for digi_flags
- ************************************************************************/
-#define DIGI_IXON	0x0001		/* Handle IXON in the FEP	*/
-#define DIGI_FAST	0x0002		/* Fast baud rates		*/
-#define RTSPACE		0x0004		/* RTS input flow control	*/
-#define CTSPACE		0x0008		/* CTS output flow control	*/
-#define DSRPACE		0x0010		/* DSR output flow control	*/
-#define DCDPACE		0x0020		/* DCD output flow control	*/
-#define DTRPACE		0x0040		/* DTR input flow control	*/
-#define DIGI_COOK	0x0080		/* Cooked processing done in FEP */
-#define DIGI_FORCEDCD	0x0100		/* Force carrier		*/
-#define	DIGI_ALTPIN	0x0200		/* Alternate RJ-45 pin config	*/
-#define	DIGI_AIXON	0x0400		/* Aux flow control in fep	*/
-#define	DIGI_PRINTER	0x0800		/* Hold port open for flow cntrl*/
-#define DIGI_PP_INPUT	0x1000		/* Change parallel port to input*/
-#define DIGI_DTR_TOGGLE 0x2000		/* Support DTR Toggle		*/
-#define	DIGI_422	0x4000		/* for 422/232 selectable panel */
-#define DIGI_RTS_TOGGLE	0x8000		/* Support RTS Toggle		*/
-
-/************************************************************************
- * These options are not supported on the comxi.
- ************************************************************************/
-#define	DIGI_COMXI	(DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
-
-#define DIGI_PLEN	28		/* String length		*/
-#define	DIGI_TSIZ	10		/* Terminal string len		*/
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_t {
-	unsigned short	digi_flags;		/* Flags (see above)	*/
-	unsigned short	digi_maxcps;		/* Max printer CPS	*/
-	unsigned short	digi_maxchar;		/* Max chars in print queue */
-	unsigned short	digi_bufsize;		/* Buffer size		*/
-	unsigned char	digi_onlen;		/* Length of ON string	*/
-	unsigned char	digi_offlen;		/* Length of OFF string	*/
-	char		digi_onstr[DIGI_PLEN];	/* Printer on string	*/
-	char		digi_offstr[DIGI_PLEN];	/* Printer off string	*/
-	char		digi_term[DIGI_TSIZ];	/* terminal string	*/
-};
-
-/************************************************************************
- * KME definitions and structures.
- ************************************************************************/
-#define	RW_IDLE		0	/* Operation complete			*/
-#define	RW_READ		1	/* Read Concentrator Memory		*/
-#define	RW_WRITE	2	/* Write Concentrator Memory		*/
-
-struct rw_t {
-	unsigned char	rw_req;		/* Request type			*/
-	unsigned char	rw_board;	/* Host Adapter board number	*/
-	unsigned char	rw_conc;	/* Concentrator number		*/
-	unsigned char	rw_reserved;	/* Reserved for expansion	*/
-	unsigned long	rw_addr;	/* Address in concentrator	*/
-	unsigned short	rw_size;	/* Read/write request length	*/
-	unsigned char	rw_data[128];	/* Data to read/write		*/
-};
-
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
-struct digi_dinfo {
-	unsigned long	dinfo_nboards;		/* # boards configured	*/
-	char		dinfo_reserved[12];	/* for future expansion */
-	char		dinfo_version[16];	/* driver version       */
-};
-
-#define	DIGI_GETDD	(('d'<<8) | 248)	/* get driver info      */
-
-/************************************************************************
- * Structure used with ioctl commands for per-board information
- *
- * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
-struct digi_info {
-	unsigned long	info_bdnum;		/* Board number (0 based)  */
-	unsigned long	info_ioport;		/* io port address         */
-	unsigned long	info_physaddr;		/* memory address          */
-	unsigned long	info_physsize;		/* Size of host mem window */
-	unsigned long	info_memsize;		/* Amount of dual-port mem */
-						/* on board                */
-	unsigned short	info_bdtype;		/* Board type              */
-	unsigned short	info_nports;		/* number of ports         */
-	char		info_bdstate;		/* board state             */
-	char		info_reserved[7];	/* for future expansion    */
-};
-
-#define	DIGI_GETBD	(('d'<<8) | 249)	/* get board info          */
-
-struct digi_stat {
-	unsigned int	info_chan;		/* Channel number (0 based)  */
-	unsigned int	info_brd;		/* Board number (0 based)  */
-	unsigned long	info_cflag;		/* cflag for channel       */
-	unsigned long	info_iflag;		/* iflag for channel       */
-	unsigned long	info_oflag;		/* oflag for channel       */
-	unsigned long	info_mstat;		/* mstat for channel       */
-	unsigned long	info_tx_data;		/* tx_data for channel       */
-	unsigned long	info_rx_data;		/* rx_data for channel       */
-	unsigned long	info_hflow;		/* hflow for channel       */
-	unsigned long	info_reserved[8];	/* for future expansion    */
-};
-
-#define	DIGI_GETSTAT	(('d'<<8) | 244)	/* get board info          */
-/************************************************************************
- *
- * Structure used with ioctl commands for per-channel information
- *
- ************************************************************************/
-struct digi_ch {
-	unsigned long	info_bdnum;		/* Board number (0 based)  */
-	unsigned long	info_channel;		/* Channel index number    */
-	unsigned long	info_ch_cflag;		/* Channel cflag           */
-	unsigned long	info_ch_iflag;		/* Channel iflag           */
-	unsigned long	info_ch_oflag;		/* Channel oflag           */
-	unsigned long	info_chsize;		/* Channel structure size  */
-	unsigned long	info_sleep_stat;	/* sleep status		   */
-	dev_t		info_dev;		/* device number	   */
-	unsigned char	info_initstate;		/* Channel init state	   */
-	unsigned char	info_running;		/* Channel running state   */
-	long		reserved[8];		/* reserved for future use */
-};
-
-/*
-* This structure is used with the DIGI_FEPCMD ioctl to
-* tell the driver which port to send the command for.
-*/
-struct digi_cmd {
-	int	cmd;
-	int	word;
-	int	ncmds;
-	int	chan; /* channel index (zero based) */
-	int	bdid; /* board index (zero based) */
-};
-
-/*
-*  info_sleep_stat defines
-*/
-#define INFO_RUNWAIT	0x0001
-#define INFO_WOPEN	0x0002
-#define INFO_TTIOW	0x0004
-#define INFO_CH_RWAIT	0x0008
-#define INFO_CH_WEMPTY	0x0010
-#define INFO_CH_WLOW	0x0020
-#define INFO_XXBUF_BUSY 0x0040
-
-#define	DIGI_GETCH	(('d'<<8) | 245)	/* get board info          */
-
-/* Board type definitions */
-
-#define	SUBTYPE		0007
-#define	T_PCXI		0000
-#define T_PCXM		0001
-#define T_PCXE		0002
-#define T_PCXR		0003
-#define T_SP		0004
-#define T_SP_PLUS	0005
-#	define T_HERC	0000
-#	define T_HOU	0001
-#	define T_LON	0002
-#	define T_CHA	0003
-#define FAMILY		0070
-#define T_COMXI		0000
-#define T_PCXX		0010
-#define T_CX		0020
-#define T_EPC		0030
-#define	T_PCLITE	0040
-#define	T_SPXX		0050
-#define	T_AVXX		0060
-#define T_DXB		0070
-#define T_A2K_4_8	0070
-#define BUSTYPE		0700
-#define T_ISABUS	0000
-#define T_MCBUS		0100
-#define	T_EISABUS	0200
-#define	T_PCIBUS	0400
-
-/* Board State Definitions */
-
-#define	BD_RUNNING	0x0
-#define	BD_REASON	0x7f
-#define	BD_NOTFOUND	0x1
-#define	BD_NOIOPORT	0x2
-#define	BD_NOMEM	0x3
-#define	BD_NOBIOS	0x4
-#define	BD_NOFEP	0x5
-#define	BD_FAILED	0x6
-#define BD_ALLOCATED	0x7
-#define BD_TRIBOOT	0x8
-#define	BD_BADKME	0x80
-
-#define DIGI_LOOPBACK	(('d'<<8) | 252)	/* Enable/disable UART  */
-						/* internal loopback    */
-#define DIGI_SPOLL	(('d'<<8) | 254)	/* change poller rate   */
-
-#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int)	/* Set integer baud rate */
-#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int)	/* Get integer baud rate */
-#define DIGI_RESET_PORT	   (('e'<<8) | 93)	/* Reset port		 */
-
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
-struct channel_t {
-	int magic;			/* Channel Magic Number		*/
-	struct bs_t __iomem *ch_bs;	/* Base structure pointer       */
-	struct cm_t __iomem *ch_cm;	/* Command queue pointer        */
-	struct board_t *ch_bd;		/* Board structure pointer      */
-	u8 __iomem *ch_vaddr;		/* FEP memory origin            */
-	u8 __iomem *ch_taddr;		/* Write buffer origin          */
-	u8 __iomem *ch_raddr;		/* Read buffer origin           */
-	struct digi_t  ch_digi;		/* Transparent Print structure  */
-	struct un_t ch_tun;		/* Terminal unit info           */
-	struct un_t ch_pun;		/* Printer unit info            */
-
-	spinlock_t	ch_lock;	/* provide for serialization */
-	wait_queue_head_t ch_flags_wait;
-
-	u32	pscan_state;
-	u8	pscan_savechar;
-
-	u32 ch_portnum;			/* Port number, 0 offset.	*/
-	u32 ch_open_count;		/* open count			*/
-	u32	ch_flags;		/* Channel flags                */
-
-	u32	ch_cpstime;		/* Time for CPS calculations    */
-
-	tcflag_t ch_c_iflag;		/* channel iflags               */
-	tcflag_t ch_c_cflag;		/* channel cflags               */
-	tcflag_t ch_c_oflag;		/* channel oflags               */
-	tcflag_t ch_c_lflag;		/* channel lflags               */
-
-	u16  ch_fepiflag;		/* FEP tty iflags               */
-	u16  ch_fepcflag;		/* FEP tty cflags               */
-	u16  ch_fepoflag;		/* FEP tty oflags               */
-	u16  ch_wopen;			/* Waiting for open process cnt */
-	u16  ch_tstart;			/* Transmit buffer start        */
-	u16  ch_tsize;			/* Transmit buffer size         */
-	u16  ch_rstart;			/* Receive buffer start         */
-	u16  ch_rsize;			/* Receive buffer size          */
-	u16  ch_rdelay;			/* Receive delay time           */
-
-	u16	ch_tlw;			/* Our currently set low water mark */
-
-	u16  ch_cook;			/* Output character mask        */
-
-	u8   ch_card;			/* Card channel is on           */
-	u8   ch_stopc;			/* Stop character               */
-	u8   ch_startc;			/* Start character              */
-
-	u8   ch_mostat;			/* FEP output modem status      */
-	u8   ch_mistat;			/* FEP input modem status       */
-	u8   ch_mforce;			/* Modem values to be forced    */
-	u8   ch_mval;			/* Force values                 */
-	u8   ch_fepstopc;		/* FEP stop character           */
-	u8   ch_fepstartc;		/* FEP start character          */
-
-	u8   ch_astopc;			/* Auxiliary Stop character     */
-	u8   ch_astartc;		/* Auxiliary Start character    */
-	u8   ch_fepastopc;		/* Auxiliary FEP stop char      */
-	u8   ch_fepastartc;		/* Auxiliary FEP start char     */
-
-	u8   ch_hflow;			/* FEP hardware handshake       */
-	u8   ch_dsr;			/* stores real dsr value        */
-	u8   ch_cd;			/* stores real cd value         */
-	u8   ch_tx_win;			/* channel tx buffer window     */
-	u8   ch_rx_win;			/* channel rx buffer window     */
-	uint	ch_custom_speed;	/* Custom baud, if set		*/
-	uint	ch_baud_info;		/* Current baud info for /proc output */
-	ulong	ch_rxcount;		/* total of data received so far      */
-	ulong	ch_txcount;		/* total of data transmitted so far   */
-	ulong	ch_err_parity;		/* Count of parity errors on channel  */
-	ulong	ch_err_frame;		/* Count of framing errors on channel */
-	ulong	ch_err_break;		/* Count of breaks on channel	*/
-	ulong	ch_err_overrun;		/* Count of overruns on channel	*/
-};
-
-/************************************************************************
- * Command structure definition.
- ************************************************************************/
-struct cm_t {
-	unsigned short cm_head;		/* Command buffer head offset */
-	unsigned short cm_tail;		/* Command buffer tail offset */
-	unsigned short cm_start;	/* start offset of buffer     */
-	unsigned short cm_max;		/* last offset of buffer      */
-};
-
-/************************************************************************
- * Event structure definition.
- ************************************************************************/
-struct ev_t {
-	unsigned short ev_head;		/* Command buffer head offset */
-	unsigned short ev_tail;		/* Command buffer tail offset */
-	unsigned short ev_start;	/* start offset of buffer     */
-	unsigned short ev_max;		/* last offset of buffer      */
-};
-
-/************************************************************************
- * Download buffer structure.
- ************************************************************************/
-struct downld_t {
-	u8	dl_type;		/* Header                       */
-	u8	dl_seq;			/* Download sequence            */
-	ushort	dl_srev;		/* Software revision number     */
-	ushort	dl_lrev;		/* Low revision number          */
-	ushort	dl_hrev;		/* High revision number         */
-	ushort	dl_seg;			/* Start segment address        */
-	ushort	dl_size;		/* Number of bytes to download  */
-	u8	dl_data[1024];		/* Download data                */
-};
-
-/************************************************************************
- * Per channel buffer structure
- ************************************************************************
- *              Base Structure Entries Usage Meanings to Host           *
- *                                                                      *
- *        W = read write        R = read only                           *
- *        C = changed by commands only                                  *
- *        U = unknown (may be changed w/o notice)                       *
- ************************************************************************/
-struct bs_t {
-	unsigned short  tp_jmp;		/* Transmit poll jump	 */
-	unsigned short  tc_jmp;		/* Cooked procedure jump */
-	unsigned short  ri_jmp;		/* Not currently used	 */
-	unsigned short  rp_jmp;		/* Receive poll jump	 */
-
-	unsigned short  tx_seg;		/* W Tx segment	 */
-	unsigned short  tx_head;	/* W Tx buffer head offset */
-	unsigned short  tx_tail;	/* R Tx buffer tail offset */
-	unsigned short  tx_max;		/* W Tx buffer size - 1    */
-
-	unsigned short  rx_seg;		/* W Rx segment	    */
-	unsigned short  rx_head;	/* W Rx buffer head offset */
-	unsigned short  rx_tail;	/* R Rx buffer tail offset */
-	unsigned short  rx_max;		/* W Rx buffer size - 1    */
-
-	unsigned short  tx_lw;		/* W Tx buffer low water mark */
-	unsigned short  rx_lw;		/* W Rx buffer low water mark */
-	unsigned short  rx_hw;		/* W Rx buffer high water mark*/
-	unsigned short  incr;		/* W Increment to next channel*/
-
-	unsigned short  fepdev;		/* U SCC device base address  */
-	unsigned short  edelay;		/* W Exception delay          */
-	unsigned short  blen;		/* W Break length             */
-	unsigned short  btime;		/* U Break complete time      */
-
-	unsigned short  iflag;		/* C UNIX input flags         */
-	unsigned short  oflag;		/* C UNIX output flags        */
-	unsigned short  cflag;		/* C UNIX control flags       */
-	unsigned short  wfill[13];	/* U Reserved for expansion   */
-
-	unsigned char   num;		/* U Channel number           */
-	unsigned char   ract;		/* U Receiver active counter  */
-	unsigned char   bstat;		/* U Break status bits        */
-	unsigned char   tbusy;		/* W Transmit busy            */
-	unsigned char   iempty;		/* W Transmit empty event     */
-					/* enable                     */
-	unsigned char   ilow;		/* W Transmit low-water event */
-					/* enable                     */
-	unsigned char   idata;		/* W Receive data interrupt   */
-					/* enable                     */
-	unsigned char   eflag;		/* U Host event flags         */
-
-	unsigned char   tflag;		/* U Transmit flags           */
-	unsigned char   rflag;		/* U Receive flags            */
-	unsigned char   xmask;		/* U Transmit ready flags     */
-	unsigned char   xval;		/* U Transmit ready value     */
-	unsigned char   m_stat;		/* RC Modem status bits       */
-	unsigned char   m_change;	/* U Modem bits which changed */
-	unsigned char   m_int;		/* W Modem interrupt enable   */
-					/* bits                       */
-	unsigned char   m_last;		/* U Last modem status        */
-
-	unsigned char   mtran;		/* C Unreported modem trans   */
-	unsigned char   orun;		/* C Buffer overrun occurred  */
-	unsigned char   astartc;	/* W Auxiliary Xon char       */
-	unsigned char   astopc;		/* W Auxiliary Xoff char      */
-	unsigned char   startc;		/* W Xon character            */
-	unsigned char   stopc;		/* W Xoff character           */
-	unsigned char   vnextc;		/* W Vnext character          */
-	unsigned char   hflow;		/* C Software flow control    */
-
-	unsigned char   fillc;		/* U Delay Fill character     */
-	unsigned char   ochar;		/* U Saved output character   */
-	unsigned char   omask;		/* U Output character mask    */
-
-	unsigned char   bfill[13];	/* U Reserved for expansion   */
-
-	unsigned char   scc[16];	/* U SCC registers            */
-};
-
-struct cnode {
-	struct cnode *next;
-	int type;
-	int numbrd;
-
-	union {
-		struct {
-			char  type;	/* Board Type           */
-			long  addr;	/* Memory Address	*/
-			char  *addrstr; /* Memory Address in string */
-			long  pcibus;	/* PCI BUS		*/
-			char  *pcibusstr; /* PCI BUS in string */
-			long  pcislot;	/* PCI SLOT		*/
-			char  *pcislotstr; /* PCI SLOT in string */
-			long  nport;	/* Number of Ports	*/
-			char  *id;	/* tty id		*/
-			long  start;	/* start of tty counting */
-			char  *method;  /* Install method       */
-			char  v_addr;
-			char  v_pcibus;
-			char  v_pcislot;
-			char  v_nport;
-			char  v_id;
-			char  v_start;
-			char  v_method;
-			char  line1;
-			char  line2;
-			char  conc1;   /* total concs in line1 */
-			char  conc2;   /* total concs in line2 */
-			char  module1; /* total modules for line1 */
-			char  module2; /* total modules for line2 */
-			char  *status; /* config status */
-			char  *dimstatus;	 /* Y/N */
-			int   status_index; /* field pointer */
-		} board;
-
-		struct {
-			char  *cable;
-			char  v_cable;
-			long  speed;
-			char  v_speed;
-		} line;
-
-		struct {
-			char  type;
-			char  *connect;
-			long  speed;
-			long  nport;
-			char  *id;
-			char  *idstr;
-			long  start;
-			char  v_connect;
-			char  v_speed;
-			char  v_nport;
-			char  v_id;
-			char  v_start;
-		} conc;
-
-		struct {
-			char type;
-			long nport;
-			char *id;
-			char *idstr;
-			long start;
-			char v_nport;
-			char v_id;
-			char v_start;
-		} module;
-
-		char *ttyname;
-		char *cuname;
-		char *printname;
-		long majornumber;
-		long altpin;
-		long ttysize;
-		long chsize;
-		long bssize;
-		long unsize;
-		long f2size;
-		long vpixsize;
-		long useintr;
-	} u;
-};
-#endif
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 39c76e7..ee3155b 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -1306,10 +1306,10 @@
 	/*
 	 * Go to sleep waiting for the tty layer to wake me back up when
 	 * the empty flag goes away.
-	 *
-	 * NOTE: TODO: Do something with time passed in.
 	 */
-	rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+	rc = wait_event_interruptible_timeout(un->un_flags_wait,
+					      ((un->un_flags & UN_EMPTY) == 0),
+					      msecs_to_jiffies(seconds * 1000));
 
 	/* If ret is non-zero, user ctrl-c'ed us */
 	return rc;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index beb9411..d38673c 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -21,7 +21,6 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/interrupt.h>
 #include <linux/proc_fs.h>
@@ -160,7 +159,7 @@
 			recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK);
 			selector  = p_ctrl->wValue;
 			if ((recipient == USB_RECIP_DEVICE) &&
-				(selector == USB_DEVICE_TEST_MODE)) {
+			    (selector == USB_DEVICE_TEST_MODE)) {
 				test_mode = (u32)(p_ctrl->wIndex >> 8);
 				_nbu2ss_set_test_mode(udc, test_mode);
 			}
@@ -271,21 +270,21 @@
 		data = EPn_EN | EPn_BCLR | EPn_DIR0;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = (EPn_ONAK | EPn_OSTL_EN | EPn_OSTL);
+		data = EPn_ONAK | EPn_OSTL_EN | EPn_OSTL;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = (EPn_OUT_EN | EPn_OUT_END_EN);
+		data = EPn_OUT_EN | EPn_OUT_END_EN;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
 	} else {
 		/*---------------------------------------------------------*/
 		/* IN */
-		data = (EPn_EN | EPn_BCLR | EPn_AUTO);
+		data = EPn_EN | EPn_BCLR | EPn_AUTO;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = (EPn_ISTL);
+		data = EPn_ISTL;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = (EPn_IN_EN | EPn_IN_END_EN);
+		data = EPn_IN_EN | EPn_IN_END_EN;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
 	}
 
@@ -460,7 +459,7 @@
 		if (length)
 			_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
 
-		data = (((((u32)length) << 5) & EPn_DW) | EPn_DEND);
+		data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND;
 		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
 
 		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
@@ -526,10 +525,10 @@
 		if (req->unaligned) {
 			if (direct == USB_DIR_OUT)
 				memcpy(req->req.buf, ep->virt_buf,
-					req->req.actual & 0xfffffffc);
+				       req->req.actual & 0xfffffffc);
 		} else
 			dma_unmap_single(udc->gadget.dev.parent,
-				req->req.dma, req->req.length,
+					 req->req.dma, req->req.length,
 				(direct == USB_DIR_IN)
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
@@ -538,7 +537,7 @@
 	} else {
 		if (!req->unaligned)
 			dma_sync_single_for_cpu(udc->gadget.dev.parent,
-				req->req.dma, req->req.length,
+						req->req.dma, req->req.length,
 				(direct == USB_DIR_IN)
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
@@ -844,7 +843,7 @@
 
 	/* Number of transfer packets */
 	mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT;
-	dmacnt = (length / mpkt);
+	dmacnt = length / mpkt;
 	lmpkt = (length % mpkt) & ~(u32)0x03;
 
 	if (dmacnt > DMA_MAX_COUNT) {
@@ -1490,7 +1489,7 @@
 	int	result = -EOPNOTSUPP;
 
 	if ((udc->ctrl.wLength != 0x0000) ||
-			(direction != USB_DIR_OUT)) {
+	    (direction != USB_DIR_OUT)) {
 		return -EINVAL;
 	}
 
@@ -1648,7 +1647,7 @@
 	u32		wValue = udc->ctrl.wValue;
 
 	if ((udc->ctrl.bRequestType != 0x00)	||
-		(udc->ctrl.wIndex != 0x0000)	||
+	    (udc->ctrl.wIndex != 0x0000)	||
 		(udc->ctrl.wLength != 0x0000)) {
 		return -EINVAL;
 	}
@@ -1670,7 +1669,7 @@
 	u32 ConfigValue = (u32)(udc->ctrl.wValue & 0x00ff);
 
 	if ((udc->ctrl.wIndex != 0x0000)	||
-		(udc->ctrl.wLength != 0x0000)	||
+	    (udc->ctrl.wLength != 0x0000)	||
 		(udc->ctrl.bRequestType != 0x00)) {
 		return -EINVAL;
 	}
@@ -1949,7 +1948,7 @@
 
 #ifdef USE_DMA
 	if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) &&
-			(req->req.dma != 0))
+	    (req->req.dma != 0))
 		_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
 #endif
 
@@ -2277,7 +2276,7 @@
 	_nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
 
 		_nbu2ss_writel(&udc->p_regs->AHBMCTR,
-			HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
+			       HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
 
 	while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
 		waitcnt++;
@@ -2701,7 +2700,7 @@
 
 	if (unlikely(!udc->driver)) {
 		dev_err(udc->dev, "%s, bogus device state %p\n", __func__,
-				udc->driver);
+			udc->driver);
 		return -ESHUTDOWN;
 	}
 
@@ -2721,12 +2720,12 @@
 		if (ep->epnum > 0)  {
 			if (ep->direct == USB_DIR_IN)
 				memcpy(ep->virt_buf, req->req.buf,
-					req->req.length);
+				       req->req.length);
 		}
 	}
 
 	if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) &&
-			(req->req.dma != 0))
+	    (req->req.dma != 0))
 		_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
 #endif
 
@@ -2741,12 +2740,12 @@
 		result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
 		if (result < 0) {
 			dev_err(udc->dev, " *** %s, result = %d\n", __func__,
-					result);
+				result);
 			list_del(&req->queue);
 		} else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) {
 #ifdef USE_DMA
 			if (req->req.length < 4 &&
-				req->req.length == req->req.actual)
+			    req->req.length == req->req.actual)
 #else
 			if (req->req.length == req->req.actual)
 #endif
@@ -3026,7 +3025,7 @@
 
 /*-------------------------------------------------------------------------*/
 static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
-					int is_selfpowered)
+				      int is_selfpowered)
 {
 	struct nbu2ss_udc       *udc;
 	unsigned long		flags;
@@ -3180,7 +3179,8 @@
 		ep->ep.ops = &nbu2ss_ep_ops;
 
 		usb_ep_set_maxpacket_limit(&ep->ep,
-				i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
+					   i == 0 ? EP0_PACKETSIZE
+					   : EP_PACKETSIZE);
 
 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
 		INIT_LIST_HEAD(&ep->queue);
@@ -3273,10 +3273,7 @@
 	/* VBUS Interrupt */
 	irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH);
 	status = request_irq(INT_VBUS,
-				_nbu2ss_vbus_irq,
-				IRQF_SHARED,
-				driver_name,
-				udc);
+			     _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
 
 	if (status != 0) {
 		dev_err(udc->dev, "request_irq(INT_VBUS) failed\n");
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 883ff5b..6f5e824 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -117,12 +117,24 @@
 	help
 	  Framebuffer support for SSD1289
 
+config FB_TFT_SSD1305
+        tristate "FB driver for the SSD1305 OLED Controller"
+        depends on FB_TFT
+        help
+          Framebuffer support for SSD1305
+
 config FB_TFT_SSD1306
 	tristate "FB driver for the SSD1306 OLED Controller"
 	depends on FB_TFT
 	help
 	  Framebuffer support for SSD1306
 
+config FB_TFT_SSD1325
+        tristate "FB driver for the SSD1325 OLED Controller"
+        depends on FB_TFT
+        help
+          Framebuffer support for SSD1305
+
 config FB_TFT_SSD1331
 	tristate "FB driver for the SSD1331 LCD Controller"
 	depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 4f9071d..2725ea9 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -21,7 +21,9 @@
 obj-$(CONFIG_FB_TFT_S6D02A1)     += fb_s6d02a1.o
 obj-$(CONFIG_FB_TFT_S6D1121)     += fb_s6d1121.o
 obj-$(CONFIG_FB_TFT_SSD1289)     += fb_ssd1289.o
+obj-$(CONFIG_FB_TFT_SSD1305)     += fb_ssd1305.o
 obj-$(CONFIG_FB_TFT_SSD1306)     += fb_ssd1306.o
+obj-$(CONFIG_FB_TFT_SSD1305)     += fb_ssd1325.o
 obj-$(CONFIG_FB_TFT_SSD1331)     += fb_ssd1331.o
 obj-$(CONFIG_FB_TFT_SSD1351)     += fb_ssd1351.o
 obj-$(CONFIG_FB_TFT_ST7735R)     += fb_st7735r.o
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
index e1ed177..9970ed7 100644
--- a/drivers/staging/fbtft/fb_hx8340bn.c
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -25,6 +25,7 @@
 #include <linux/vmalloc.h>
 #include <linux/spi/spi.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -45,56 +46,70 @@
 
 	/* BTL221722-276L startup sequence, from datasheet */
 
-	/* SETEXTCOM: Set extended command set (C1h)
-	   This command is used to set extended command set access enable.
-	   Enable: After command (C1h), must write: ffh,83h,40h */
+	/*
+	 * SETEXTCOM: Set extended command set (C1h)
+	 * This command is used to set extended command set access enable.
+	 * Enable: After command (C1h), must write: ffh,83h,40h
+	 */
 	write_reg(par, 0xC1, 0xFF, 0x83, 0x40);
 
-	/* Sleep out
-	   This command turns off sleep mode.
-	   In this mode the DC/DC converter is enabled, Internal oscillator
-	   is started, and panel scanning is started. */
+	/*
+	 * Sleep out
+	 * This command turns off sleep mode.
+	 * In this mode the DC/DC converter is enabled, Internal oscillator
+	 * is started, and panel scanning is started.
+	 */
 	write_reg(par, 0x11);
 	mdelay(150);
 
 	/* Undoc'd register? */
 	write_reg(par, 0xCA, 0x70, 0x00, 0xD9);
 
-	/* SETOSC: Set Internal Oscillator (B0h)
-	   This command is used to set internal oscillator related settings */
-	/*	OSC_EN: Enable internal oscillator */
-	/*	Internal oscillator frequency: 125% x 2.52MHz */
+	/*
+	 * SETOSC: Set Internal Oscillator (B0h)
+	 * This command is used to set internal oscillator related settings
+	 *	OSC_EN: Enable internal oscillator
+	 *	Internal oscillator frequency: 125% x 2.52MHz
+	 */
 	write_reg(par, 0xB0, 0x01, 0x11);
 
 	/* Drive ability setting */
 	write_reg(par, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06);
 	mdelay(20);
 
-	/* SETPWCTR5: Set Power Control 5(B5h)
-	   This command is used to set VCOM Low and VCOM High Voltage */
-	/* VCOMH 0110101 :  3.925 */
-	/* VCOML 0100000 : -1.700 */
-	/* 45h=69  VCOMH: "VMH" + 5d   VCOML: "VMH" + 5d */
+	/*
+	 * SETPWCTR5: Set Power Control 5(B5h)
+	 * This command is used to set VCOM Low and VCOM High Voltage
+	 * VCOMH 0110101 :  3.925
+	 * VCOML 0100000 : -1.700
+	 * 45h=69  VCOMH: "VMH" + 5d   VCOML: "VMH" + 5d
+	 */
 	write_reg(par, 0xB5, 0x35, 0x20, 0x45);
 
-	/* SETPWCTR4: Set Power Control 4(B4h)
-		VRH[4:0]:	Specify the VREG1 voltage adjusting.
-				VREG1 voltage is for gamma voltage setting.
-		BT[2:0]:	Switch the output factor of step-up circuit 2
-				for VGH and VGL voltage generation. */
+	/*
+	 * SETPWCTR4: Set Power Control 4(B4h)
+	 *	VRH[4:0]:	Specify the VREG1 voltage adjusting.
+	 *			VREG1 voltage is for gamma voltage setting.
+	 *	BT[2:0]:	Switch the output factor of step-up circuit 2
+	 *			for VGH and VGL voltage generation.
+	 */
 	write_reg(par, 0xB4, 0x33, 0x25, 0x4C);
 	mdelay(10);
 
-	/* Interface Pixel Format (3Ah)
-	   This command is used to define the format of RGB picture data,
-	   which is to be transfer via the system and RGB interface. */
-	/* RGB interface: 16 Bit/Pixel	*/
-	write_reg(par, 0x3A, 0x05);
+	/*
+	 * Interface Pixel Format (3Ah)
+	 * This command is used to define the format of RGB picture data,
+	 * which is to be transfer via the system and RGB interface.
+	 * RGB interface: 16 Bit/Pixel
+	 */
+	write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
 
-	/* Display on (29h)
-	   This command is used to recover from DISPLAY OFF mode.
-	   Output from the Frame Memory is enabled. */
-	write_reg(par, 0x29);
+	/*
+	 * Display on (29h)
+	 * This command is used to recover from DISPLAY OFF mode.
+	 * Output from the Frame Memory is enabled.
+	 */
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
 	mdelay(10);
 
 	return 0;
@@ -102,9 +117,9 @@
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	write_reg(par, FBTFT_CASET, 0x00, xs, 0x00, xe);
-	write_reg(par, FBTFT_RASET, 0x00, ys, 0x00, ye);
-	write_reg(par, FBTFT_RAMWR);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0x00, xs, 0x00, xe);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0x00, ys, 0x00, ye);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 static int set_var(struct fbtft_par *par)
@@ -116,16 +131,19 @@
 #define MV BIT(5)
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, 0x36, par->bgr << 3);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, par->bgr << 3);
 		break;
 	case 270:
-		write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MX | MV | (par->bgr << 3));
 		break;
 	case 180:
-		write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MX | MY | (par->bgr << 3));
 		break;
 	case 90:
-		write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MY | MV | (par->bgr << 3));
 		break;
 	}
 
@@ -133,12 +151,12 @@
 }
 
 /*
-  Gamma Curve selection, GC (only GC0 can be customized):
-    0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
-  Gamma string format:
-    OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
-    ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX  GC
-*/
+ * Gamma Curve selection, GC (only GC0 can be customized):
+ *   0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
+ * Gamma string format:
+ *   OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
+ *   ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX  GC
+ */
 #define CURVE(num, idx)  curves[num * par->gamma.num_values + idx]
 static int set_gamma(struct fbtft_par *par, unsigned long *curves)
 {
@@ -154,36 +172,38 @@
 		for (j = 0; j < par->gamma.num_values; j++)
 			CURVE(i, j) &= mask[i * par->gamma.num_values + j];
 
-	write_reg(par, 0x26, 1 << CURVE(1, 14)); /* Gamma Set (26h) */
+	/* Gamma Set (26h) */
+	write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 1 << CURVE(1, 14));
 
 	if (CURVE(1, 14))
 		return 0; /* only GC0 can be customized */
 
 	write_reg(par, 0xC2,
-		(CURVE(0, 8) << 4) | CURVE(0, 7),
-		(CURVE(0, 10) << 4) | CURVE(0, 9),
-		(CURVE(0, 12) << 4) | CURVE(0, 11),
-		CURVE(0, 2),
-		(CURVE(0, 4) << 4) | CURVE(0, 3),
-		CURVE(0, 5),
-		CURVE(0, 6),
-		(CURVE(0, 1) << 4) | CURVE(0, 0),
-		(CURVE(0, 14) << 2) | CURVE(0, 13));
+		  (CURVE(0, 8) << 4) | CURVE(0, 7),
+		  (CURVE(0, 10) << 4) | CURVE(0, 9),
+		  (CURVE(0, 12) << 4) | CURVE(0, 11),
+		  CURVE(0, 2),
+		  (CURVE(0, 4) << 4) | CURVE(0, 3),
+		  CURVE(0, 5),
+		  CURVE(0, 6),
+		  (CURVE(0, 1) << 4) | CURVE(0, 0),
+		  (CURVE(0, 14) << 2) | CURVE(0, 13));
 
 	write_reg(par, 0xC3,
-		(CURVE(1, 8) << 4) | CURVE(1, 7),
-		(CURVE(1, 10) << 4) | CURVE(1, 9),
-		(CURVE(1, 12) << 4) | CURVE(1, 11),
-		CURVE(1, 2),
-		(CURVE(1, 4) << 4) | CURVE(1, 3),
-		CURVE(1, 5),
-		CURVE(1, 6),
-		(CURVE(1, 1) << 4) | CURVE(1, 0));
+		  (CURVE(1, 8) << 4) | CURVE(1, 7),
+		  (CURVE(1, 10) << 4) | CURVE(1, 9),
+		  (CURVE(1, 12) << 4) | CURVE(1, 11),
+		  CURVE(1, 2),
+		  (CURVE(1, 4) << 4) | CURVE(1, 3),
+		  CURVE(1, 5),
+		  CURVE(1, 6),
+		  (CURVE(1, 1) << 4) | CURVE(1, 0));
 
 	mdelay(10);
 
 	return 0;
 }
+
 #undef CURVE
 
 static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index 6ff76e5..450a61e 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -97,10 +97,10 @@
 }
 
 /*
-  Gamma string format:
-    VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
-    VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
-*/
+ * Gamma string format:
+ *   VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
+ *   VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
+ */
 #define CURVE(num, idx)  curves[num * par->gamma.num_values + idx]
 static int set_gamma(struct fbtft_par *par, unsigned long *curves)
 {
@@ -140,6 +140,7 @@
 
 	return 0;
 }
+
 #undef CURVE
 
 static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_hx8353d.c b/drivers/staging/fbtft/fb_hx8353d.c
index 8552411..72e4ff8 100644
--- a/drivers/staging/fbtft/fb_hx8353d.c
+++ b/drivers/staging/fbtft/fb_hx8353d.c
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -27,7 +28,6 @@
 
 static int init_display(struct fbtft_par *par)
 {
-
 	par->fbtftops.reset(par);
 	mdelay(150);
 
@@ -47,18 +47,18 @@
 	write_reg(par, 0x3A, 0x05);
 
 	/* MEM ACCESS */
-	write_reg(par, 0x36, 0xC0);
+	write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0);
 
 	/* SLPOUT - Sleep out & booster on */
-	write_reg(par, 0x11);
+	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
 	mdelay(150);
 
 	/* DISPON - Display On */
-	write_reg(par, 0x29);
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
 
 	/* RGBSET */
-	write_reg(par, 0x2D,
-		 0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
+	write_reg(par, MIPI_DCS_WRITE_LUT,
+		  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
 		32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
 		 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
 		16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
@@ -87,41 +87,45 @@
 #define mv BIT(5)
 static int set_var(struct fbtft_par *par)
 {
-	/* madctl - memory data access control
-	     rgb/bgr:
-	     1. mode selection pin srgb
-		rgb h/w pin for color filter setting: 0=rgb, 1=bgr
-	     2. madctl rgb bit
-		rgb-bgr order color filter panel: 0=rgb, 1=bgr */
+	/*
+	 * madctl - memory data access control
+	 *   rgb/bgr:
+	 *   1. mode selection pin srgb
+	 *	rgb h/w pin for color filter setting: 0=rgb, 1=bgr
+	 *   2. madctl rgb bit
+	 *	rgb-bgr order color filter panel: 0=rgb, 1=bgr
+	 */
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, 0x36, mx | my | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  mx | my | (par->bgr << 3));
 		break;
 	case 270:
-		write_reg(par, 0x36, my | mv | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  my | mv | (par->bgr << 3));
 		break;
 	case 180:
-		write_reg(par, 0x36, par->bgr << 3);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  par->bgr << 3);
 		break;
 	case 90:
-		write_reg(par, 0x36, mx | mv | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  mx | mv | (par->bgr << 3));
 		break;
 	}
 
 	return 0;
 }
 
-/*
-  gamma string format:
-*/
+/* gamma string format: */
 static int set_gamma(struct fbtft_par *par, unsigned long *curves)
 {
 	write_reg(par, 0xE0,
-		curves[0], curves[1], curves[2], curves[3],
-		curves[4], curves[5], curves[6], curves[7],
-		curves[8], curves[9], curves[10], curves[11],
-		curves[12], curves[13], curves[14], curves[15],
-		curves[16], curves[17], curves[18]);
+		  curves[0], curves[1], curves[2], curves[3],
+		  curves[4], curves[5], curves[6], curves[7],
+		  curves[8], curves[9], curves[10], curves[11],
+		  curves[12], curves[13], curves[14], curves[15],
+		  curves[16], curves[17], curves[18]);
 
 	return 0;
 }
diff --git a/drivers/staging/fbtft/fb_hx8357d.c b/drivers/staging/fbtft/fb_hx8357d.c
index a381dbc..32e6efe 100644
--- a/drivers/staging/fbtft/fb_hx8357d.c
+++ b/drivers/staging/fbtft/fb_hx8357d.c
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 #include "fb_hx8357d.h"
@@ -35,7 +36,7 @@
 	par->fbtftops.reset(par);
 
 	/* Reset things like Gamma */
-	write_reg(par, HX8357B_SWRESET);
+	write_reg(par, MIPI_DCS_SOFT_RESET);
 	usleep_range(5000, 7000);
 
 	/* setextc */
@@ -55,83 +56,83 @@
 	write_reg(par, HX8357_SETPANEL, 0x05);
 
 	write_reg(par, HX8357_SETPWR1,
-		0x00,  /* Not deep standby */
-		0x15,  /* BT */
-		0x1C,  /* VSPR */
-		0x1C,  /* VSNR */
-		0x83,  /* AP */
-		0xAA);  /* FS */
+		  0x00,  /* Not deep standby */
+		  0x15,  /* BT */
+		  0x1C,  /* VSPR */
+		  0x1C,  /* VSNR */
+		  0x83,  /* AP */
+		  0xAA);  /* FS */
 
 	write_reg(par, HX8357D_SETSTBA,
-		0x50,  /* OPON normal */
-		0x50,  /* OPON idle */
-		0x01,  /* STBA */
-		0x3C,  /* STBA */
-		0x1E,  /* STBA */
-		0x08);  /* GEN */
+		  0x50,  /* OPON normal */
+		  0x50,  /* OPON idle */
+		  0x01,  /* STBA */
+		  0x3C,  /* STBA */
+		  0x1E,  /* STBA */
+		  0x08);  /* GEN */
 
 	write_reg(par, HX8357D_SETCYC,
-		0x02,  /* NW 0x02 */
-		0x40,  /* RTN */
-		0x00,  /* DIV */
-		0x2A,  /* DUM */
-		0x2A,  /* DUM */
-		0x0D,  /* GDON */
-		0x78);  /* GDOFF */
+		  0x02,  /* NW 0x02 */
+		  0x40,  /* RTN */
+		  0x00,  /* DIV */
+		  0x2A,  /* DUM */
+		  0x2A,  /* DUM */
+		  0x0D,  /* GDON */
+		  0x78);  /* GDOFF */
 
 	write_reg(par, HX8357D_SETGAMMA,
-		0x02,
-		0x0A,
-		0x11,
-		0x1d,
-		0x23,
-		0x35,
-		0x41,
-		0x4b,
-		0x4b,
-		0x42,
-		0x3A,
-		0x27,
-		0x1B,
-		0x08,
-		0x09,
-		0x03,
-		0x02,
-		0x0A,
-		0x11,
-		0x1d,
-		0x23,
-		0x35,
-		0x41,
-		0x4b,
-		0x4b,
-		0x42,
-		0x3A,
-		0x27,
-		0x1B,
-		0x08,
-		0x09,
-		0x03,
-		0x00,
-		0x01);
+		  0x02,
+		  0x0A,
+		  0x11,
+		  0x1d,
+		  0x23,
+		  0x35,
+		  0x41,
+		  0x4b,
+		  0x4b,
+		  0x42,
+		  0x3A,
+		  0x27,
+		  0x1B,
+		  0x08,
+		  0x09,
+		  0x03,
+		  0x02,
+		  0x0A,
+		  0x11,
+		  0x1d,
+		  0x23,
+		  0x35,
+		  0x41,
+		  0x4b,
+		  0x4b,
+		  0x42,
+		  0x3A,
+		  0x27,
+		  0x1B,
+		  0x08,
+		  0x09,
+		  0x03,
+		  0x00,
+		  0x01);
 
 	/* 16 bit */
-	write_reg(par, HX8357_COLMOD, 0x55);
+	write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
 
-	write_reg(par, HX8357_MADCTL, 0xC0);
+	write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0);
 
 	/* TE off */
-	write_reg(par, HX8357_TEON, 0x00);
+	write_reg(par, MIPI_DCS_SET_TEAR_ON, 0x00);
 
 	/* tear line */
-	write_reg(par, HX8357_TEARLINE, 0x00, 0x02);
+	write_reg(par, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
 
 	/* Exit Sleep */
-	write_reg(par, HX8357_SLPOUT);
+	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
 	msleep(150);
 
 	/* display on */
-	write_reg(par, HX8357_DISPON);
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
 	usleep_range(5000, 7000);
 
 	return 0;
@@ -139,18 +140,15 @@
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column addr set */
-	write_reg(par, HX8357_CASET,
-		xs >> 8, xs & 0xff,  /* XSTART */
-		xe >> 8, xe & 0xff); /* XEND */
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xff,  /* XSTART */
+		  xe >> 8, xe & 0xff); /* XEND */
 
-	/* Row addr set */
-	write_reg(par, HX8357_PASET,
-		ys >> 8, ys & 0xff,  /* YSTART */
-		ye >> 8, ye & 0xff); /* YEND */
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xff,  /* YSTART */
+		  ye >> 8, ye & 0xff); /* YEND */
 
-	/* write to RAM */
-	write_reg(par, HX8357_RAMWR);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 #define HX8357D_MADCTL_MY  0x80
@@ -182,7 +180,7 @@
 	val |= (par->bgr ? HX8357D_MADCTL_RGB : HX8357D_MADCTL_BGR);
 
 	/* Memory Access Control */
-	write_reg(par, HX8357_MADCTL, val);
+	write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val);
 
 	return 0;
 }
diff --git a/drivers/staging/fbtft/fb_hx8357d.h b/drivers/staging/fbtft/fb_hx8357d.h
index de05e8c..e281921 100644
--- a/drivers/staging/fbtft/fb_hx8357d.h
+++ b/drivers/staging/fbtft/fb_hx8357d.h
@@ -1,17 +1,17 @@
-/***************************************************
-  This is our library for the Adafruit  ILI9341 Breakout and Shield
-  ----> http://www.adafruit.com/products/1651
-
-  Check out the links above for our tutorials and wiring diagrams
-  These displays use SPI to communicate, 4 or 5 pins are required to
-  interface (RST is optional)
-  Adafruit invests time and resources providing this open source code,
-  please support Adafruit and open-source hardware by purchasing
-  products from Adafruit!
-
-  Written by Limor Fried/Ladyada for Adafruit Industries.
-  MIT license, all text above must be included in any redistribution
- ****************************************************/
+/*
+ * This is our library for the Adafruit  ILI9341 Breakout and Shield
+ * ----> http://www.adafruit.com/products/1651
+ *
+ * Check out the links above for our tutorials and wiring diagrams
+ * These displays use SPI to communicate, 4 or 5 pins are required to
+ * interface (RST is optional)
+ * Adafruit invests time and resources providing this open source code,
+ * please support Adafruit and open-source hardware by purchasing
+ * products from Adafruit!
+ *
+ * Written by Limor Fried/Ladyada for Adafruit Industries.
+ * MIT license, all text above must be included in any redistribution
+ */
 
 #ifndef __HX8357_H__
 #define __HX8357_H__
@@ -22,38 +22,6 @@
 #define HX8357_TFTWIDTH  320
 #define HX8357_TFTHEIGHT 480
 
-#define HX8357B_NOP     0x00
-#define HX8357B_SWRESET 0x01
-#define HX8357B_RDDID   0x04
-#define HX8357B_RDDST   0x09
-
-#define HX8357B_RDPOWMODE  0x0A
-#define HX8357B_RDMADCTL  0x0B
-#define HX8357B_RDCOLMOD  0x0C
-#define HX8357B_RDDIM  0x0D
-#define HX8357B_RDDSDR  0x0F
-
-#define HX8357_SLPIN   0x10
-#define HX8357_SLPOUT  0x11
-#define HX8357B_PTLON   0x12
-#define HX8357B_NORON   0x13
-
-#define HX8357_INVOFF  0x20
-#define HX8357_INVON   0x21
-#define HX8357_DISPOFF 0x28
-#define HX8357_DISPON  0x29
-
-#define HX8357_CASET   0x2A
-#define HX8357_PASET   0x2B
-#define HX8357_RAMWR   0x2C
-#define HX8357_RAMRD   0x2E
-
-#define HX8357B_PTLAR   0x30
-#define HX8357_TEON  0x35
-#define HX8357_TEARLINE  0x44
-#define HX8357_MADCTL  0x36
-#define HX8357_COLMOD  0x3A
-
 #define HX8357_SETOSC 0xB0
 #define HX8357_SETPWR1 0xB1
 #define HX8357B_SETDISPLAY 0xB2
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index f31b3f4..6b8f8b1 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -38,37 +39,11 @@
 #endif
 
 /* ILI9163C commands */
-#define CMD_NOP		0x00 /* Non operation*/
-#define CMD_SWRESET	0x01 /* Soft Reset */
-#define CMD_SLPIN	0x10 /* Sleep ON */
-#define CMD_SLPOUT	0x11 /* Sleep OFF */
-#define CMD_PTLON	0x12 /* Partial Mode ON */
-#define CMD_NORML	0x13 /* Normal Display ON */
-#define CMD_DINVOF	0x20 /* Display Inversion OFF */
-#define CMD_DINVON	0x21 /* Display Inversion ON */
-#define CMD_GAMMASET	0x26 /* Gamma Set (0x01[1],0x02[2],0x04[3],0x08[4]) */
-#define CMD_DISPOFF	0x28 /* Display OFF */
-#define CMD_DISPON	0x29 /* Display ON */
-#define CMD_IDLEON	0x39 /* Idle Mode ON */
-#define CMD_IDLEOF	0x38 /* Idle Mode OFF */
-#define CMD_CLMADRS	0x2A /* Column Address Set */
-#define CMD_PGEADRS	0x2B /* Page Address Set */
-
-#define CMD_RAMWR	0x2C /* Memory Write */
-#define CMD_RAMRD	0x2E /* Memory Read */
-#define CMD_CLRSPACE	0x2D /* Color Space : 4K/65K/262K */
-#define CMD_PARTAREA	0x30 /* Partial Area */
-#define CMD_VSCLLDEF	0x33 /* Vertical Scroll Definition */
-#define CMD_TEFXLON	0x34 /* Tearing Effect Line ON */
-#define CMD_TEFXLOF	0x35 /* Tearing Effect Line OFF */
-#define CMD_MADCTL	0x36 /* Memory Access Control */
-
-#define CMD_PIXFMT	0x3A /* Interface Pixel Format */
-#define CMD_FRMCTR1	0xB1 /* Frame Rate Control
-				(In normal mode/Full colors) */
+#define CMD_FRMCTR1	0xB1 /* Frame Rate Control */
+			     /*	(In normal mode/Full colors) */
 #define CMD_FRMCTR2	0xB2 /* Frame Rate Control (In Idle mode/8-colors) */
-#define CMD_FRMCTR3	0xB3 /* Frame Rate Control
-				(In Partial mode/full colors) */
+#define CMD_FRMCTR3	0xB3 /* Frame Rate Control */
+			     /*	(In Partial mode/full colors) */
 #define CMD_DINVCTR	0xB4 /* Display Inversion Control */
 #define CMD_RGBBLK	0xB5 /* RGB Interface Blanking Porch setting */
 #define CMD_DFUNCTR	0xB6 /* Display Function set 5 */
@@ -88,17 +63,18 @@
 #define CMD_GAMRSEL	0xF2 /* GAM_R_SEL */
 
 /*
-This display:
-http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-Color-TFT-LCD-Display-Module-/271422122271
-This particular display has a design error! The controller has 3 pins to
-configure to constrain the memory and resolution to a fixed dimension (in
-that case 128x128) but they leaved those pins configured for 128x160 so
-there was several pixel memory addressing problems.
-I solved by setup several parameters that dinamically fix the resolution as
-needit so below the parameters for this display. If you have a strain or a
-correct display (can happen with chinese) you can copy those parameters and
-create setup for different displays.
-*/
+ * This display:
+ * http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-
+ * Color-TFT-LCD-Display-Module-/271422122271
+ * This particular display has a design error! The controller has 3 pins to
+ * configure to constrain the memory and resolution to a fixed dimension (in
+ * that case 128x128) but they leaved those pins configured for 128x160 so
+ * there was several pixel memory addressing problems.
+ * I solved by setup several parameters that dinamically fix the resolution as
+ * needit so below the parameters for this display. If you have a strain or a
+ * correct display (can happen with chinese) you can copy those parameters and
+ * create setup for different displays.
+ */
 
 #ifdef RED
 #define __OFFSET		32 /*see note 2 - this is the red version */
@@ -113,16 +89,17 @@
 	if (par->gpio.cs != -1)
 		gpio_set_value(par->gpio.cs, 0);  /* Activate chip */
 
-	write_reg(par, CMD_SWRESET); /* software reset */
+	write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
 	mdelay(500);
-	write_reg(par, CMD_SLPOUT); /* exit sleep */
+	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
 	mdelay(5);
-	write_reg(par, CMD_PIXFMT, 0x05); /* Set Color Format 16bit */
-	write_reg(par, CMD_GAMMASET, 0x02); /* default gamma curve 3 */
+	write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+	/* default gamma curve 3 */
+	write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x02);
 #ifdef GAMMA_ADJ
 	write_reg(par, CMD_GAMRSEL, 0x01); /* Enable Gamma adj */
 #endif
-	write_reg(par, CMD_NORML);
+	write_reg(par, MIPI_DCS_ENTER_NORMAL_MODE);
 	write_reg(par, CMD_DFUNCTR, 0xff, 0x06);
 	/* Frame Rate Control (In normal mode/Full colors) */
 	write_reg(par, CMD_FRMCTR1, 0x08, 0x02);
@@ -135,66 +112,67 @@
 	write_reg(par, CMD_VCOMCTR1, 0x50, 0x63);
 	write_reg(par, CMD_VCOMOFFS, 0);
 
-	write_reg(par, CMD_CLMADRS, 0, 0, 0, WIDTH); /* Set Column Address */
-	write_reg(par, CMD_PGEADRS, 0, 0, 0, HEIGHT); /* Set Page Address */
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0, 0, WIDTH);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0, 0, HEIGHT);
 
-	write_reg(par, CMD_DISPON); /* display ON */
-	write_reg(par, CMD_RAMWR); /* Memory Write */
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON); /* display ON */
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START); /* Memory Write */
 
 	return 0;
 }
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys,
-				int xe, int ye)
+			 int xe, int ye)
 {
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8,
-				xe & 0xff);
-		write_reg(par, CMD_PGEADRS,
-				(ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff,
-				(ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff);
+		write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+			  xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+		write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+			  (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff,
+			  (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff);
 		break;
 	case 90:
-		write_reg(par, CMD_CLMADRS,
-				(xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff,
-				(xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff);
-		write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8,
-				ye & 0xff);
+		write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+			  (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff,
+			  (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff);
+		write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+			  ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
 		break;
 	case 180:
 	case 270:
-		write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8,
-				xe & 0xff);
-		write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8,
-				ye & 0xff);
+		write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+			  xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+		write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+			  ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
 		break;
 	default:
-		par->info->var.rotate = 0; /* Fix incorrect setting */
+		/* Fix incorrect setting */
+		par->info->var.rotate = 0;
 	}
-	write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 /*
-7) MY:  1(bottom to top),	0(top to bottom)    Row Address Order
-6) MX:  1(R to L),		0(L to R)	    Column Address Order
-5) MV:  1(Exchanged),		0(normal)	    Row/Column exchange
-4) ML:  1(bottom to top),	0(top to bottom)    Vertical Refresh Order
-3) RGB: 1(BGR),			0(RGB)		    Color Space
-2) MH:  1(R to L),		0(L to R)	    Horizontal Refresh Order
-1)
-0)
-
-	MY, MX, MV, ML,RGB, MH, D1, D0
-	0 | 0 | 0 | 0 | 1 | 0 | 0 | 0	//normal
-	1 | 0 | 0 | 0 | 1 | 0 | 0 | 0	//Y-Mirror
-	0 | 1 | 0 | 0 | 1 | 0 | 0 | 0	//X-Mirror
-	1 | 1 | 0 | 0 | 1 | 0 | 0 | 0	//X-Y-Mirror
-	0 | 0 | 1 | 0 | 1 | 0 | 0 | 0	//X-Y Exchange
-	1 | 0 | 1 | 0 | 1 | 0 | 0 | 0	//X-Y Exchange, Y-Mirror
-	0 | 1 | 1 | 0 | 1 | 0 | 0 | 0	//XY exchange
-	1 | 1 | 1 | 0 | 1 | 0 | 0 | 0
-*/
+ * 7) MY:  1(bottom to top),	0(top to bottom)    Row Address Order
+ * 6) MX:  1(R to L),		0(L to R)	    Column Address Order
+ * 5) MV:  1(Exchanged),	0(normal)	    Row/Column exchange
+ * 4) ML:  1(bottom to top),	0(top to bottom)    Vertical Refresh Order
+ * 3) RGB: 1(BGR),		0(RGB)		    Color Space
+ * 2) MH:  1(R to L),		0(L to R)	    Horizontal Refresh Order
+ * 1)
+ * 0)
+ *
+ *	MY, MX, MV, ML,RGB, MH, D1, D0
+ *	0 | 0 | 0 | 0 | 1 | 0 | 0 | 0	//normal
+ *	1 | 0 | 0 | 0 | 1 | 0 | 0 | 0	//Y-Mirror
+ *	0 | 1 | 0 | 0 | 1 | 0 | 0 | 0	//X-Mirror
+ *	1 | 1 | 0 | 0 | 1 | 0 | 0 | 0	//X-Y-Mirror
+ *	0 | 0 | 1 | 0 | 1 | 0 | 0 | 0	//X-Y Exchange
+ *	1 | 0 | 1 | 0 | 1 | 0 | 0 | 0	//X-Y Exchange, Y-Mirror
+ *	0 | 1 | 1 | 0 | 1 | 0 | 0 | 0	//XY exchange
+ *	1 | 1 | 1 | 0 | 1 | 0 | 0 | 0
+ */
 static int set_var(struct fbtft_par *par)
 {
 	u8 mactrl_data = 0; /* Avoid compiler warning */
@@ -217,8 +195,8 @@
 	/* Colorspcae */
 	if (par->bgr)
 		mactrl_data |= (1 << 2);
-	write_reg(par, CMD_MADCTL, mactrl_data);
-	write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+	write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, mactrl_data);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 	return 0;
 }
 
@@ -237,27 +215,28 @@
 			CURVE(i, j) &= mask[i * par->gamma.num_values + j];
 
 	write_reg(par, CMD_PGAMMAC,
-				CURVE(0, 0),
-				CURVE(0, 1),
-				CURVE(0, 2),
-				CURVE(0, 3),
-				CURVE(0, 4),
-				CURVE(0, 5),
-				CURVE(0, 6),
-				(CURVE(0, 7) << 4) | CURVE(0, 8),
-				CURVE(0, 9),
-				CURVE(0, 10),
-				CURVE(0, 11),
-				CURVE(0, 12),
-				CURVE(0, 13),
-				CURVE(0, 14),
-				CURVE(0, 15)
-				);
+		  CURVE(0, 0),
+		  CURVE(0, 1),
+		  CURVE(0, 2),
+		  CURVE(0, 3),
+		  CURVE(0, 4),
+		  CURVE(0, 5),
+		  CURVE(0, 6),
+		  (CURVE(0, 7) << 4) | CURVE(0, 8),
+		  CURVE(0, 9),
+		  CURVE(0, 10),
+		  CURVE(0, 11),
+		  CURVE(0, 12),
+		  CURVE(0, 13),
+		  CURVE(0, 14),
+		  CURVE(0, 15));
 
-	write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+	/* Write Data to GRAM mode */
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 
 	return 0;
 }
+
 #undef CURVE
 #endif
 
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 3ed50fe..6ff222d 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -47,10 +47,10 @@
 
 	devcode = read_devicecode(par);
 	fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
-		devcode);
+		      devcode);
 	if ((devcode != 0x0000) && (devcode != 0x9320))
 		dev_warn(par->info->device,
-			"Unrecognized Device code: 0x%04X (expected 0x9320)\n",
+			 "Unrecognized Device code: 0x%04X (expected 0x9320)\n",
 			devcode);
 
 	/* Initialization sequence from ILI9320 Application Notes */
@@ -216,10 +216,10 @@
 }
 
 /*
-  Gamma string format:
-    VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
-    VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
-*/
+ * Gamma string format:
+ *  VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ *  VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+ */
 #define CURVE(num, idx)  curves[num * par->gamma.num_values + idx]
 static int set_gamma(struct fbtft_par *par, unsigned long *curves)
 {
@@ -248,6 +248,7 @@
 
 	return 0;
 }
+
 #undef CURVE
 
 static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index 3b3a06d..fdf98d37 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -56,42 +56,42 @@
 MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
 
 /*
-Verify that this configuration is within the Voltage limits
-
-Display module configuration: Vcc = IOVcc = Vci = 3.3V
-
- Voltages
-----------
-Vci                                =   3.3
-Vci1           =  Vci * 0.80       =   2.64
-DDVDH          =  Vci1 * 2         =   5.28
-VCL            = -Vci1             =  -2.64
-VREG1OUT       =  Vci * 1.85       =   4.88
-VCOMH          =  VREG1OUT * 0.735 =   3.59
-VCOM amplitude =  VREG1OUT * 0.98  =   4.79
-VGH            =  Vci * 4          =  13.2
-VGL            = -Vci * 4          = -13.2
-
- Limits
---------
-Power supplies
-1.65 < IOVcc < 3.30   =>  1.65 < 3.3 < 3.30
-2.40 < Vcc   < 3.30   =>  2.40 < 3.3 < 3.30
-2.50 < Vci   < 3.30   =>  2.50 < 3.3 < 3.30
-
-Source/VCOM power supply voltage
- 4.50 < DDVDH < 6.0   =>  4.50 <  5.28 <  6.0
--3.0  < VCL   < -2.0  =>  -3.0 < -2.64 < -2.0
-VCI - VCL < 6.0       =>  5.94 < 6.0
-
-Gate driver output voltage
- 10  < VGH   < 20     =>   10 <  13.2  < 20
--15  < VGL   < -5     =>  -15 < -13.2  < -5
-VGH - VGL < 32        =>   26.4 < 32
-
-VCOM driver output voltage
-VCOMH - VCOML < 6.0   =>  4.79 < 6.0
-*/
+ * Verify that this configuration is within the Voltage limits
+ *
+ * Display module configuration: Vcc = IOVcc = Vci = 3.3V
+ *
+ * Voltages
+ * ----------
+ * Vci                                =   3.3
+ * Vci1           =  Vci * 0.80       =   2.64
+ * DDVDH          =  Vci1 * 2         =   5.28
+ * VCL            = -Vci1             =  -2.64
+ * VREG1OUT       =  Vci * 1.85       =   4.88
+ * VCOMH          =  VREG1OUT * 0.735 =   3.59
+ * VCOM amplitude =  VREG1OUT * 0.98  =   4.79
+ * VGH            =  Vci * 4          =  13.2
+ * VGL            = -Vci * 4          = -13.2
+ *
+ * Limits
+ * --------
+ * Power supplies
+ * 1.65 < IOVcc < 3.30   =>  1.65 < 3.3 < 3.30
+ * 2.40 < Vcc   < 3.30   =>  2.40 < 3.3 < 3.30
+ * 2.50 < Vci   < 3.30   =>  2.50 < 3.3 < 3.30
+ *
+ * Source/VCOM power supply voltage
+ *  4.50 < DDVDH < 6.0   =>  4.50 <  5.28 <  6.0
+ * -3.0  < VCL   < -2.0  =>  -3.0 < -2.64 < -2.0
+ * VCI - VCL < 6.0       =>  5.94 < 6.0
+ *
+ * Gate driver output voltage
+ *  10  < VGH   < 20     =>   10 <  13.2  < 20
+ * -15  < VGL   < -5     =>  -15 < -13.2  < -5
+ * VGH - VGL < 32        =>   26.4 < 32
+ *
+ * VCOM driver output voltage
+ * VCOMH - VCOML < 6.0   =>  4.79 < 6.0
+ */
 
 static int init_display(struct fbtft_par *par)
 {
@@ -213,10 +213,10 @@
 }
 
 /*
-  Gamma string format:
-    VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
-    VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
-*/
+ * Gamma string format:
+ *  VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ *  VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+ */
 #define CURVE(num, idx)  curves[num * par->gamma.num_values + idx]
 static int set_gamma(struct fbtft_par *par, unsigned long *curves)
 {
@@ -245,6 +245,7 @@
 
 	return 0;
 }
+
 #undef CURVE
 
 static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
index e0e2539..0711121 100644
--- a/drivers/staging/fbtft/fb_ili9340.c
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -53,7 +54,7 @@
 
 	/* COLMOD: Pixel Format Set */
 	/* 16 bits/pixel */
-	write_reg(par, 0x3A, 0x55);
+	write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
 
 	/* Frame Rate Control */
 	/* Division ratio = fosc, Frame Rate = 79Hz */
@@ -65,40 +66,37 @@
 	/* Gamma Function Disable */
 	write_reg(par, 0xF2, 0x00);
 
-	/* Gamma curve selected  */
-	write_reg(par, 0x26, 0x01);
+	/* Gamma curve selection */
+	write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
 
 	/* Positive Gamma Correction */
 	write_reg(par, 0xE0,
-		0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
-		0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
+		  0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
+		  0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
 
 	/* Negative Gamma Correction */
 	write_reg(par, 0xE1,
-		0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
-		0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
+		  0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
+		  0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
 
-	/* Sleep OUT */
-	write_reg(par, 0x11);
+	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
 
 	mdelay(120);
 
-	/* Display ON */
-	write_reg(par, 0x29);
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
 
 	return 0;
 }
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column address */
-	write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
 
-	/* Row address */
-	write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 #define ILI9340_MADCTL_MV  0x20
@@ -123,7 +121,7 @@
 		break;
 	}
 	/* Memory Access Control  */
-	write_reg(par, 0x36, val | (par->bgr << 3));
+	write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val | (par->bgr << 3));
 
 	return 0;
 }
diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
index dcee0af..ff35c86 100644
--- a/drivers/staging/fbtft/fb_ili9341.c
+++ b/drivers/staging/fbtft/fb_ili9341.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -39,9 +40,9 @@
 	par->fbtftops.reset(par);
 
 	/* startup sequence for MI0283QT-9A */
-	write_reg(par, 0x01); /* software reset */
+	write_reg(par, MIPI_DCS_SOFT_RESET);
 	mdelay(5);
-	write_reg(par, 0x28); /* display off */
+	write_reg(par, MIPI_DCS_SET_DISPLAY_OFF);
 	/* --------------------------------------------------------- */
 	write_reg(par, 0xCF, 0x00, 0x83, 0x30);
 	write_reg(par, 0xED, 0x64, 0x03, 0x12, 0x81);
@@ -56,18 +57,18 @@
 	write_reg(par, 0xC5, 0x35, 0x3E);
 	write_reg(par, 0xC7, 0xBE);
 	/* ------------memory access control------------------------ */
-	write_reg(par, 0x3A, 0x55); /* 16bit pixel */
+	write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); /* 16bit pixel */
 	/* ------------frame rate----------------------------------- */
 	write_reg(par, 0xB1, 0x00, 0x1B);
 	/* ------------Gamma---------------------------------------- */
 	/* write_reg(par, 0xF2, 0x08); */ /* Gamma Function Disable */
-	write_reg(par, 0x26, 0x01);
+	write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
 	/* ------------display-------------------------------------- */
 	write_reg(par, 0xB7, 0x07); /* entry mode set */
 	write_reg(par, 0xB6, 0x0A, 0x82, 0x27, 0x00);
-	write_reg(par, 0x11); /* sleep out */
+	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
 	mdelay(100);
-	write_reg(par, 0x29); /* display on */
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
 	mdelay(20);
 
 	return 0;
@@ -75,40 +76,39 @@
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column address set */
-	write_reg(par, 0x2A,
-		(xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
 
-	/* Row address set */
-	write_reg(par, 0x2B,
-		(ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
-#define MEM_Y   (7) /* MY row address order */
-#define MEM_X   (6) /* MX column address order */
-#define MEM_V   (5) /* MV row / column exchange */
-#define MEM_L   (4) /* ML vertical refresh order */
-#define MEM_H   (2) /* MH horizontal refresh order */
+#define MEM_Y   BIT(7) /* MY row address order */
+#define MEM_X   BIT(6) /* MX column address order */
+#define MEM_V   BIT(5) /* MV row / column exchange */
+#define MEM_L   BIT(4) /* ML vertical refresh order */
+#define MEM_H   BIT(2) /* MH horizontal refresh order */
 #define MEM_BGR (3) /* RGB-BGR Order */
 static int set_var(struct fbtft_par *par)
 {
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, 0x36, (1 << MEM_X) | (par->bgr << MEM_BGR));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MEM_X | (par->bgr << MEM_BGR));
 		break;
 	case 270:
-		write_reg(par, 0x36,
-			(1 << MEM_V) | (1 << MEM_L) | (par->bgr << MEM_BGR));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MEM_V | MEM_L | (par->bgr << MEM_BGR));
 		break;
 	case 180:
-		write_reg(par, 0x36, (1 << MEM_Y) | (par->bgr << MEM_BGR));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MEM_Y | (par->bgr << MEM_BGR));
 		break;
 	case 90:
-		write_reg(par, 0x36, (1 << MEM_Y) | (1 << MEM_X) |
-				     (1 << MEM_V) | (par->bgr << MEM_BGR));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MEM_Y | MEM_X | MEM_V | (par->bgr << MEM_BGR));
 		break;
 	}
 
@@ -116,10 +116,10 @@
 }
 
 /*
-  Gamma string format:
-    Positive: Par1 Par2 [...] Par15
-    Negative: Par1 Par2 [...] Par15
-*/
+ * Gamma string format:
+ *  Positive: Par1 Par2 [...] Par15
+ *  Negative: Par1 Par2 [...] Par15
+ */
 #define CURVE(num, idx)  curves[num * par->gamma.num_values + idx]
 static int set_gamma(struct fbtft_par *par, unsigned long *curves)
 {
@@ -127,14 +127,15 @@
 
 	for (i = 0; i < par->gamma.num_curves; i++)
 		write_reg(par, 0xE0 + i,
-			CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
-			CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
-			CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
-			CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
-			CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
+			  CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
+			  CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
+			  CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
+			  CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
+			  CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
 
 	return 0;
 }
+
 #undef CURVE
 
 static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 6368486..242adb3 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -27,9 +28,8 @@
 #define HEIGHT		480
 
 static int default_init_sequence[] = {
-
 	/* SLP_OUT - Sleep out */
-	-1, 0x11,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
 	-2, 50,
 	/* Power setting */
 	-1, 0xD0, 0x07, 0x42, 0x18,
@@ -42,44 +42,47 @@
 	/* Frame rate & inv. */
 	-1, 0xC5, 0x03,
 	/* Pixel format */
-	-1, 0x3A, 0x55,
+	-1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
 	/* Gamma */
 	-1, 0xC8, 0x00, 0x32, 0x36, 0x45, 0x06, 0x16,
 		  0x37, 0x75, 0x77, 0x54, 0x0C, 0x00,
 	/* DISP_ON */
-	-1, 0x29,
+	-1, MIPI_DCS_SET_DISPLAY_ON,
 	-3
 };
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* column address */
-	write_reg(par, 0x2a, xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
 
-	/* Row address */
-	write_reg(par, 0x2b, ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
 
-	/* memory write */
-	write_reg(par, 0x2c);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 #define HFLIP 0x01
 #define VFLIP 0x02
-#define ROWxCOL 0x20
+#define ROW_X_COL 0x20
 static int set_var(struct fbtft_par *par)
 {
 	switch (par->info->var.rotate) {
 	case 270:
-		write_reg(par, 0x36, ROWxCOL | HFLIP | VFLIP | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  ROW_X_COL | HFLIP | VFLIP | (par->bgr << 3));
 		break;
 	case 180:
-		write_reg(par, 0x36, VFLIP | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  VFLIP | (par->bgr << 3));
 		break;
 	case 90:
-		write_reg(par, 0x36, ROWxCOL | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  ROW_X_COL | (par->bgr << 3));
 		break;
 	default:
-		write_reg(par, 0x36, HFLIP | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  HFLIP | (par->bgr << 3));
 		break;
 	}
 
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index d9dfff6..fa38d88 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -28,11 +29,10 @@
 static int default_init_sequence[] = {
 	/* Interface Mode Control */
 	-1, 0xb0, 0x0,
-	/* Sleep OUT */
-	-1, 0x11,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
 	-2, 250,
 	/* Interface Pixel Format */
-	-1, 0x3A, 0x55,
+	-1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
 	/* Power Control 3 */
 	-1, 0xC2, 0x44,
 	/* VCOM Control 1 */
@@ -46,40 +46,41 @@
 	/* Digital Gamma Control 1 */
 	-1, 0xE2, 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
 		  0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00,
-	/* Sleep OUT */
-	-1, 0x11,
-	/* Display ON */
-	-1, 0x29,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
+	-1, MIPI_DCS_SET_DISPLAY_ON,
 	/* end marker */
 	-3
 };
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column address */
-	write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
 
-	/* Row address */
-	write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 static int set_var(struct fbtft_par *par)
 {
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, 0x36, 0x80 | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  0x80 | (par->bgr << 3));
 		break;
 	case 90:
-		write_reg(par, 0x36, 0x20 | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  0x20 | (par->bgr << 3));
 		break;
 	case 180:
-		write_reg(par, 0x36, 0x40 | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  0x40 | (par->bgr << 3));
 		break;
 	case 270:
-		write_reg(par, 0x36, 0xE0 | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  0xE0 | (par->bgr << 3));
 		break;
 	default:
 		break;
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index da85057e..3113355 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -50,7 +51,7 @@
 
 	-1, 0xf3, 0x00, 0x00,
 
-	-1, 0x11,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
 	-2, 50,
 
 	-1, 0xf3, 0x00, 0x01,
@@ -79,18 +80,18 @@
 
 	/* initializing sequence */
 
-	-1, 0x36, 0x08,
+	-1, MIPI_DCS_SET_ADDRESS_MODE, 0x08,
 
-	-1, 0x35, 0x00,
+	-1, MIPI_DCS_SET_TEAR_ON, 0x00,
 
-	-1, 0x3a, 0x05,
+	-1, MIPI_DCS_SET_PIXEL_FORMAT, 0x05,
 
-	/* gamma setting sequence */
-	-1, 0x26, 0x01,	/* preset gamma curves, possible values 0x01, 0x02, 0x04, 0x08 */
+	/* gamma setting - possible values 0x01, 0x02, 0x04, 0x08 */
+	-1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
 
 	-2, 150,
-	-1, 0x29,
-	-1, 0x2c,
+	-1, MIPI_DCS_SET_DISPLAY_ON,
+	-1, MIPI_DCS_WRITE_MEMORY_START,
 	/* end marker */
 	-3
 
@@ -98,14 +99,13 @@
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column address */
-	write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
 
-	/* Row address */
-	write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 #define MY BIT(7)
@@ -113,7 +113,7 @@
 #define MV BIT(5)
 static int set_var(struct fbtft_par *par)
 {
-	/* MADCTL - Memory data access control
+	/* Memory data access control (0x36h)
 	     RGB/BGR:
 		1. Mode selection pin SRGB
 			RGB H/W pin for color filter setting: 0=RGB, 1=BGR
@@ -121,16 +121,20 @@
 			RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MX | MY | (par->bgr << 3));
 		break;
 	case 270:
-		write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MY | MV | (par->bgr << 3));
 		break;
 	case 180:
-		write_reg(par, 0x36, par->bgr << 3);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  par->bgr << 3);
 		break;
 	case 90:
-		write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MX | MV | (par->bgr << 3));
 		break;
 	}
 
diff --git a/drivers/staging/fbtft/fb_ssd1305.c b/drivers/staging/fbtft/fb_ssd1305.c
new file mode 100644
index 0000000..4b38c3f
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1305.c
@@ -0,0 +1,216 @@
+/*
+ * FB driver for the SSD1305 OLED Controller
+ *
+ * based on SSD1306 driver by Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME		"fb_ssd1305"
+
+#define WIDTH 128
+#define HEIGHT 64
+
+/*
+ * write_reg() caveat:
+ *
+ *    This doesn't work because D/C has to be LOW for both values:
+ *      write_reg(par, val1, val2);
+ *
+ *    Do it like this:
+ *      write_reg(par, val1);
+ *      write_reg(par, val2);
+ */
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+	par->fbtftops.reset(par);
+
+	if (par->gamma.curves[0] == 0) {
+		mutex_lock(&par->gamma.lock);
+		if (par->info->var.yres == 64)
+			par->gamma.curves[0] = 0xCF;
+		else
+			par->gamma.curves[0] = 0x8F;
+		mutex_unlock(&par->gamma.lock);
+	}
+
+	/* Set Display OFF */
+	write_reg(par, 0xAE);
+
+	/* Set Display Clock Divide Ratio/ Oscillator Frequency */
+	write_reg(par, 0xD5);
+	write_reg(par, 0x80);
+
+	/* Set Multiplex Ratio */
+	write_reg(par, 0xA8);
+	if (par->info->var.yres == 64)
+		write_reg(par, 0x3F);
+	else
+		write_reg(par, 0x1F);
+
+	/* Set Display Offset */
+	write_reg(par, 0xD3);
+	write_reg(par, 0x0);
+
+	/* Set Display Start Line */
+	write_reg(par, 0x40 | 0x0);
+
+	/* Charge Pump Setting */
+	write_reg(par, 0x8D);
+	/* A[2] = 1b, Enable charge pump during display on */
+	write_reg(par, 0x14);
+
+	/* Set Memory Addressing Mode */
+	write_reg(par, 0x20);
+	/* Vertical addressing mode  */
+	write_reg(par, 0x01);
+
+	/*
+	 * Set Segment Re-map
+	 * column address 127 is mapped to SEG0
+	 */
+	write_reg(par, 0xA0 | ((par->info->var.rotate == 180) ? 0x0 : 0x1));
+
+	/*
+	 * Set COM Output Scan Direction
+	 * remapped mode. Scan from COM[N-1] to COM0
+	 */
+	write_reg(par, ((par->info->var.rotate == 180) ? 0xC8 : 0xC0));
+
+	/* Set COM Pins Hardware Configuration */
+	write_reg(par, 0xDA);
+	if (par->info->var.yres == 64) {
+		/* A[4]=1b, Alternative COM pin configuration */
+		write_reg(par, 0x12);
+	} else {
+		/* A[4]=0b, Sequential COM pin configuration */
+		write_reg(par, 0x02);
+	}
+
+	/* Set Pre-charge Period */
+	write_reg(par, 0xD9);
+	write_reg(par, 0xF1);
+
+	/*
+	 * Entire Display ON
+	 * Resume to RAM content display. Output follows RAM content
+	 */
+	write_reg(par, 0xA4);
+
+	/*
+	 * Set Normal Display
+	 *  0 in RAM: OFF in display panel
+	 *  1 in RAM: ON in display panel
+	 */
+	write_reg(par, 0xA6);
+
+	/* Set Display ON */
+	write_reg(par, 0xAF);
+
+	return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+	/* Set Lower Column Start Address for Page Addressing Mode */
+	write_reg(par, 0x00 | ((par->info->var.rotate == 180) ? 0x0 : 0x4));
+	/* Set Higher Column Start Address for Page Addressing Mode */
+	write_reg(par, 0x10 | 0x0);
+	/* Set Display Start Line */
+	write_reg(par, 0x40 | 0x0);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+	if (on)
+		write_reg(par, 0xAE);
+	else
+		write_reg(par, 0xAF);
+	return 0;
+}
+
+/* Gamma is used to control Contrast */
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+	curves[0] &= 0xFF;
+	/* Set Contrast Control for BANK0 */
+	write_reg(par, 0x81);
+	write_reg(par, curves[0]);
+
+	return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+	u16 *vmem16 = (u16 *)par->info->screen_buffer;
+	u8 *buf = par->txbuf.buf;
+	int x, y, i;
+	int ret;
+
+	for (x = 0; x < par->info->var.xres; x++) {
+		for (y = 0; y < par->info->var.yres / 8; y++) {
+			*buf = 0x00;
+			for (i = 0; i < 8; i++)
+				*buf |= (vmem16[(y * 8 + i) *
+						par->info->var.xres + x] ?
+					 1 : 0) << i;
+			buf++;
+		}
+	}
+
+	/* Write data */
+	gpio_set_value(par->gpio.dc, 1);
+	ret = par->fbtftops.write(par, par->txbuf.buf,
+				  par->info->var.xres * par->info->var.yres /
+				  8);
+	if (ret < 0)
+		dev_err(par->info->device, "write failed and returned: %d\n",
+			ret);
+	return ret;
+}
+
+static struct fbtft_display display = {
+	.regwidth = 8,
+	.width = WIDTH,
+	.height = HEIGHT,
+	.txbuflen = WIDTH * HEIGHT / 8,
+	.gamma_num = 1,
+	.gamma_len = 1,
+	.gamma = "00",
+	.fbtftops = {
+		.write_vmem = write_vmem,
+		.init_display = init_display,
+		.set_addr_win = set_addr_win,
+		.blank = blank,
+		.set_gamma = set_gamma,
+	},
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1305", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1305");
+MODULE_ALIAS("platform:ssd1305");
+
+MODULE_DESCRIPTION("SSD1305 OLED Driver");
+MODULE_AUTHOR("Alexey Mednyy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
new file mode 100644
index 0000000..15078bf
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -0,0 +1,205 @@
+/*
+ * FB driver for the SSD1325 OLED Controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME		"fb_ssd1325"
+
+#define WIDTH 128
+#define HEIGHT 64
+#define GAMMA_NUM   1
+#define GAMMA_LEN   15
+#define DEFAULT_GAMMA "7 1 1 1 1 2 2 3 3 4 4 5 5 6 6"
+
+/*
+ * write_reg() caveat:
+ *
+ *    This doesn't work because D/C has to be LOW for both values:
+ *      write_reg(par, val1, val2);
+ *
+ *    Do it like this:
+ *      write_reg(par, val1);
+ *      write_reg(par, val2);
+ */
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+	par->fbtftops.reset(par);
+
+	gpio_set_value(par->gpio.cs, 0);
+
+	write_reg(par, 0xb3);
+	write_reg(par, 0xf0);
+	write_reg(par, 0xae);
+	write_reg(par, 0xa1);
+	write_reg(par, 0x00);
+	write_reg(par, 0xa8);
+	write_reg(par, 0x3f);
+	write_reg(par, 0xa0);
+	write_reg(par, 0x45);
+	write_reg(par, 0xa2);
+	write_reg(par, 0x40);
+	write_reg(par, 0x75);
+	write_reg(par, 0x00);
+	write_reg(par, 0x3f);
+	write_reg(par, 0x15);
+	write_reg(par, 0x00);
+	write_reg(par, 0x7f);
+	write_reg(par, 0xa4);
+	write_reg(par, 0xaf);
+
+	return 0;
+}
+
+static uint8_t rgb565_to_g16(u16 pixel)
+{
+	u16 b = pixel & 0x1f;
+	u16 g = (pixel & (0x3f << 5)) >> 5;
+	u16 r = (pixel & (0x1f << (5 + 6))) >> (5 + 6);
+
+	pixel = (299 * r + 587 * g + 114 * b) / 195;
+	if (pixel > 255)
+		pixel = 255;
+	return (uint8_t)pixel / 16;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+	fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+		      "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe,
+		      ye);
+
+	write_reg(par, 0x75);
+	write_reg(par, 0x00);
+	write_reg(par, 0x3f);
+	write_reg(par, 0x15);
+	write_reg(par, 0x00);
+	write_reg(par, 0x7f);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+	fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+		      __func__, on ? "true" : "false");
+
+	if (on)
+		write_reg(par, 0xAE);
+	else
+		write_reg(par, 0xAF);
+	return 0;
+}
+
+/*
+ * Grayscale Lookup Table
+ * GS1 - GS15
+ * The "Gamma curve" contains the relative values between the entries
+ * in the Lookup table.
+ *
+ * 0 = Setting of GS1 < Setting of GS2 < Setting of GS3.....<
+ * Setting of GS14 < Setting of GS15
+ */
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+	int i;
+
+	fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+	for (i = 0; i < GAMMA_LEN; i++) {
+		if (i > 0 && curves[i] < 1) {
+			dev_err(par->info->device,
+				"Illegal value in Grayscale Lookup Table at index %d.\n"
+				"Must be greater than 0\n", i);
+			return -EINVAL;
+		}
+		if (curves[i] > 7) {
+			dev_err(par->info->device,
+				"Illegal value(s) in Grayscale Lookup Table.\n"
+				"At index=%d, the accumulated value has exceeded 7\n",
+				i);
+			return -EINVAL;
+		}
+	}
+	write_reg(par, 0xB8);
+	for (i = 0; i < 8; i++)
+		write_reg(par, (curves[i] & 0xFF));
+	return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+	u16 *vmem16 = (u16 *)par->info->screen_buffer;
+	u8 *buf = par->txbuf.buf;
+	u8 n1;
+	u8 n2;
+	int y, x;
+	int ret;
+
+	for (x = 0; x < par->info->var.xres; x++) {
+		if (x % 2)
+			continue;
+		for (y = 0; y < par->info->var.yres; y++) {
+			n1 = rgb565_to_g16(vmem16[y * par->info->var.xres + x]);
+			n2 = rgb565_to_g16(vmem16
+					   [y * par->info->var.xres + x + 1]);
+			*buf = (n1 << 4) | n2;
+			buf++;
+		}
+	}
+
+	gpio_set_value(par->gpio.dc, 1);
+
+	/* Write data */
+	ret = par->fbtftops.write(par, par->txbuf.buf,
+				par->info->var.xres * par->info->var.yres / 2);
+	if (ret < 0)
+		dev_err(par->info->device,
+			"%s: write failed and returned: %d\n", __func__, ret);
+
+	return ret;
+}
+
+static struct fbtft_display display = {
+	.regwidth = 8,
+	.width = WIDTH,
+	.height = HEIGHT,
+	.txbuflen = WIDTH * HEIGHT / 2,
+	.gamma_num = GAMMA_NUM,
+	.gamma_len = GAMMA_LEN,
+	.gamma = DEFAULT_GAMMA,
+	.fbtftops = {
+		.write_vmem = write_vmem,
+		.init_display = init_display,
+		.set_addr_win = set_addr_win,
+		.blank = blank,
+		.set_gamma = set_gamma,
+	},
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1325", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1325");
+MODULE_ALIAS("platform:ssd1325");
+
+MODULE_DESCRIPTION("SSD1325 OLED Driver");
+MODULE_AUTHOR("Alexey Mednyy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index a92b0d0..c5e51fe 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -25,12 +26,10 @@
 			"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
 
 static int default_init_sequence[] = {
-	/* SWRESET - Software reset */
-	-1, 0x01,
+	-1, MIPI_DCS_SOFT_RESET,
 	-2, 150,                               /* delay */
 
-	/* SLPOUT - Sleep out & booster on */
-	-1, 0x11,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
 	-2, 500,                               /* delay */
 
 	/* FRMCTR1 - frame rate control: normal mode
@@ -71,18 +70,14 @@
 	/* VMCTR1 - Power Control */
 	-1, 0xC5, 0x0E,
 
-	/* INVOFF - Display inversion off */
-	-1, 0x20,
+	-1, MIPI_DCS_EXIT_INVERT_MODE,
 
-	/* COLMOD - Interface pixel format */
-	-1, 0x3A, 0x05,
+	-1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
 
-	/* DISPON - Display On */
-	-1, 0x29,
+	-1, MIPI_DCS_SET_DISPLAY_ON,
 	-2, 100,                               /* delay */
 
-	/* NORON - Partial off (Normal) */
-	-1, 0x13,
+	-1, MIPI_DCS_ENTER_NORMAL_MODE,
 	-2, 10,                               /* delay */
 
 	/* end marker */
@@ -91,14 +86,13 @@
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column address */
-	write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
 
-	/* Row address */
-	write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 #define MY BIT(7)
@@ -114,16 +108,20 @@
 		RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
 	switch (par->info->var.rotate) {
 	case 0:
-		write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MX | MY | (par->bgr << 3));
 		break;
 	case 270:
-		write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MY | MV | (par->bgr << 3));
 		break;
 	case 180:
-		write_reg(par, 0x36, par->bgr << 3);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  par->bgr << 3);
 		break;
 	case 90:
-		write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+			  MX | MV | (par->bgr << 3));
 		break;
 	}
 
diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c
index caf263d..097e71c 100644
--- a/drivers/staging/fbtft/fb_tinylcd.c
+++ b/drivers/staging/fbtft/fb_tinylcd.c
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 
@@ -38,7 +39,7 @@
 	write_reg(par, 0xB4, 0x02);
 	write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
 	write_reg(par, 0xB7, 0x07);
-	write_reg(par, 0x36, 0x58);
+	write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58);
 	write_reg(par, 0xF0, 0x36, 0xA5, 0xD3);
 	write_reg(par, 0xE5, 0x80);
 	write_reg(par, 0xE5, 0x01);
@@ -47,24 +48,23 @@
 	write_reg(par, 0xF0, 0x36, 0xA5, 0x53);
 	write_reg(par, 0xE0, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00,
 			     0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
-	write_reg(par, 0x3A, 0x55);
-	write_reg(par, 0x11);
+	write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
 	udelay(250);
-	write_reg(par, 0x29);
+	write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
 
 	return 0;
 }
 
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
-	/* Column address */
-	write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
 
-	/* Row address */
-	write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 static int set_var(struct fbtft_par *par)
@@ -72,19 +72,19 @@
 	switch (par->info->var.rotate) {
 	case 270:
 		write_reg(par, 0xB6, 0x00, 0x02, 0x3B);
-		write_reg(par, 0x36, 0x28);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x28);
 		break;
 	case 180:
 		write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
-		write_reg(par, 0x36, 0x58);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58);
 		break;
 	case 90:
 		write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
-		write_reg(par, 0x36, 0x38);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x38);
 		break;
 	default:
 		write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
-		write_reg(par, 0x36, 0x08);
+		write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x08);
 		break;
 	}
 
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index 212908e..b78045f 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -78,11 +78,11 @@
 	mdelay(10);
 
 	/* set startpoint */
-	/* LCD_START_LINE | (pos & 0x3F) */
 	write_reg(par, LCD_START_LINE);
 
 	/* select orientation BOTTOMVIEW */
 	write_reg(par, LCD_BOTTOMVIEW | 1);
+
 	/* output mode select (turns display upside-down) */
 	write_reg(par, LCD_SCAN_DIR | 0x00);
 
@@ -96,20 +96,14 @@
 	write_reg(par, LCD_BIAS | 0);
 
 	/* power control mode: all features on */
-	/* LCD_POWER_CONTROL | (val&0x07) */
 	write_reg(par, LCD_POWER_CONTROL | 0x07);
 
 	/* set voltage regulator R/R */
-	/* LCD_VOLTAGE | (val&0x07) */
 	write_reg(par, LCD_VOLTAGE | 0x07);
 
 	/* volume mode set */
-	/* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
 	write_reg(par, LCD_VOLUME_MODE);
-	/* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
 	write_reg(par, 0x09);
-	/* ???? */
-	/* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
 	write_reg(par, LCD_NO_OP);
 
 	/* advanced program control */
@@ -125,17 +119,8 @@
 static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
 {
 	/* goto address */
-	/* LCD_PAGE_ADDRESS | ((page) & 0x1F),
-	 (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
-	 LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
 	write_reg(par, LCD_PAGE_ADDRESS);
-	/* LCD_PAGE_ADDRESS | ((page) & 0x1F),
-	 (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
-	  LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
 	write_reg(par, 0x00);
-	/* LCD_PAGE_ADDRESS | ((page) & 0x1F),
-	 (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
-	  LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
 	write_reg(par, LCD_COL_ADDRESS);
 }
 
@@ -156,17 +141,9 @@
 					 1 : 0) << i;
 			buf++;
 		}
-		/* LCD_PAGE_ADDRESS | ((page) & 0x1F),
-		 (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
-		  LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+
 		write_reg(par, LCD_PAGE_ADDRESS | (u8)y);
-		/* LCD_PAGE_ADDRESS | ((page) & 0x1F),
-		 (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
-		  LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
 		write_reg(par, 0x00);
-		/* LCD_PAGE_ADDRESS | ((page) & 0x1F),
-		 (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
-		  LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
 		write_reg(par, LCD_COL_ADDRESS);
 		gpio_set_value(par->gpio.dc, 1);
 		ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index b1e4516..6c9cdd2 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -35,6 +35,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <video/mipi_display.h>
 
 #include "fbtft.h"
 #include "internal.h"
@@ -129,7 +130,8 @@
 	while (gpio->name[0]) {
 		flags = FBTFT_GPIO_NO_MATCH;
 		/* if driver provides match function, try it first,
-		   if no match use our own */
+		 * if no match use our own
+		 */
 		if (par->fbtftops.request_gpios_match)
 			flags = par->fbtftops.request_gpios_match(par, gpio);
 		if (flags == FBTFT_GPIO_NO_MATCH)
@@ -319,16 +321,13 @@
 static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
 			       int ye)
 {
-	/* Column address set */
-	write_reg(par, 0x2A,
-		(xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+	write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+		  (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
 
-	/* Row address set */
-	write_reg(par, 0x2B,
-		(ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+	write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+		  (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
 
-	/* Memory write */
-	write_reg(par, 0x2C);
+	write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
 }
 
 static void fbtft_reset(struct fbtft_par *par)
@@ -520,8 +519,7 @@
 		"%s: count=%zd, ppos=%llu\n", __func__,  count, *ppos);
 	res = fb_sys_write(info, buf, count, ppos);
 
-	/* TODO: only mark changed area
-	   update all for now */
+	/* TODO: only mark changed area update all for now */
 	par->fbtftops.mkdirty(info, -1, 0);
 
 	return res;
@@ -989,8 +987,6 @@
 		par->fbtftops.unregister_backlight(par);
 	if (spi)
 		spi_set_drvdata(spi, NULL);
-	if (par->pdev)
-		platform_set_drvdata(par->pdev, NULL);
 
 	return ret;
 }
@@ -1012,8 +1008,6 @@
 
 	if (spi)
 		spi_set_drvdata(spi, NULL);
-	if (par->pdev)
-		platform_set_drvdata(par->pdev, NULL);
 	if (par->fbtftops.unregister_backlight)
 		par->fbtftops.unregister_backlight(par);
 	fbtft_sysfs_exit(par);
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 3ccdec9..d3bc394 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -20,14 +20,6 @@
 #include <linux/spi/spi.h>
 #include <linux/platform_device.h>
 
-#define FBTFT_NOP		0x00
-#define FBTFT_SWRESET	0x01
-#define FBTFT_RDDID		0x04
-#define FBTFT_RDDST		0x09
-#define FBTFT_CASET		0x2A
-#define FBTFT_RASET		0x2B
-#define FBTFT_RAMWR		0x2C
-
 #define FBTFT_ONBOARD_BACKLIGHT 2
 
 #define FBTFT_GPIO_NO_MATCH		0xFFFF
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 071f79b..241d7c6 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -212,38 +212,63 @@
 	"0F 00 1 7 4 0 0 0 6 7"
 
 static int pitft_init_sequence[] = {
-	-1, 0x01, -2, 5, -1, 0x28, -1, 0xEF,
-	0x03, 0x80, 0x02, -1, 0xCF, 0x00, 0xC1, 0x30,
+	-1, MIPI_DCS_SOFT_RESET,
+	-2, 5,
+	-1, MIPI_DCS_SET_DISPLAY_OFF,
+	-1, 0xEF, 0x03, 0x80, 0x02,
+	-1, 0xCF, 0x00, 0xC1, 0x30,
 	-1, 0xED, 0x64, 0x03, 0x12, 0x81,
 	-1, 0xE8, 0x85, 0x00, 0x78,
 	-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-	-1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00,
-	-1, 0xC0, 0x23, -1, 0xC1, 0x10, -1, 0xC5,
-	0x3e, 0x28, -1, 0xC7, 0x86, -1, 0x3A, 0x55,
-	-1, 0xB1, 0x00, 0x18, -1, 0xB6, 0x08, 0x82,
-	0x27, -1, 0xF2, 0x00, -1, 0x26, 0x01,
-	-1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08,
-	0x4E, 0xF1, 0x37, 0x07, 0x10, 0x03,
-	0x0E, 0x09, 0x00, -1, 0xE1, 0x00, 0x0E, 0x14,
-	0x03, 0x11, 0x07, 0x31, 0xC1, 0x48,
-	0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, -1,
-	0x11, -2, 100, -1, 0x29, -2, 20, -3 };
+	-1, 0xF7, 0x20,
+	-1, 0xEA, 0x00, 0x00,
+	-1, 0xC0, 0x23,
+	-1, 0xC1, 0x10,
+	-1, 0xC5, 0x3E, 0x28,
+	-1, 0xC7, 0x86,
+	-1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
+	-1, 0xB1, 0x00, 0x18,
+	-1, 0xB6, 0x08, 0x82, 0x27,
+	-1, 0xF2, 0x00,
+	-1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
+	-1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
+		0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
+	-1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
+		0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
+	-2, 100,
+	-1, MIPI_DCS_SET_DISPLAY_ON,
+	-2, 20,
+	-3
+};
 
 static int waveshare32b_init_sequence[] = {
 	-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
 	-1, 0xCF, 0x00, 0xC1, 0x30,
-	-1, 0xE8, 0x85, 0x00, 0x78, -1, 0xEA, 0x00,
-	0x00, -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-	-1, 0xF7, 0x20, -1, 0xC0, 0x23, -1, 0xC1,
-	0x10, -1, 0xC5, 0x3e, 0x28, -1, 0xC7, 0x86,
-	-1, 0x36, 0x28, -1, 0x3A, 0x55, -1, 0xB1, 0x00,
-	0x18, -1, 0xB6, 0x08, 0x82, 0x27,
-	-1, 0xF2, 0x00, -1, 0x26, 0x01,
+	-1, 0xE8, 0x85, 0x00, 0x78,
+	-1, 0xEA, 0x00, 0x00,
+	-1, 0xED, 0x64, 0x03, 0x12, 0x81,
+	-1, 0xF7, 0x20,
+	-1, 0xC0, 0x23,
+	-1, 0xC1, 0x10,
+	-1, 0xC5, 0x3E, 0x28,
+	-1, 0xC7, 0x86,
+	-1, MIPI_DCS_SET_ADDRESS_MODE, 0x28,
+	-1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
+	-1, 0xB1, 0x00, 0x18,
+	-1, 0xB6, 0x08, 0x82, 0x27,
+	-1, 0xF2, 0x00,
+	-1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
 	-1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
-	0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
+		0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
 	-1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
-	0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
-	-1, 0x11, -2, 120, -1, 0x29, -1, 0x2c, -3 };
+		0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
+	-1, MIPI_DCS_EXIT_SLEEP_MODE,
+	-2, 120,
+	-1, MIPI_DCS_SET_DISPLAY_ON,
+	-1, MIPI_DCS_WRITE_MEMORY_START,
+	-3
+};
 
 /* Supported displays in alphabetical order */
 static struct fbtft_device_display displays[] = {
@@ -1287,7 +1312,7 @@
 
 static int spi_device_found(struct device *dev, void *data)
 {
-	struct spi_device *spi = container_of(dev, struct spi_device, dev);
+	struct spi_device *spi = to_spi_device(dev);
 
 	dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
 		 dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
@@ -1305,7 +1330,7 @@
 static int p_device_found(struct device *dev, void *data)
 {
 	struct platform_device
-	*pdev = container_of(dev, struct platform_device, dev);
+	*pdev = to_platform_device(dev);
 
 	if (strstr(pdev->name, "fb"))
 		dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 0d779d9..1f95933 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -7,8 +7,9 @@
 #
 
 config FSL_MC_BUS
-	tristate "Freescale Management Complex (MC) bus driver"
+	bool "Freescale Management Complex (MC) bus driver"
 	depends on OF && ARM64
+	select GENERIC_MSI_IRQ_DOMAIN
 	help
 	  Driver to enable the bus infrastructure for the Freescale
           QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
index 25433a9..e731517 100644
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ b/drivers/staging/fsl-mc/bus/Makefile
@@ -13,5 +13,7 @@
 		      dpmng.o \
 		      dprc-driver.o \
 		      mc-allocator.o \
+		      mc-msi.o \
+		      irq-gic-v3-its-fsl-mc-msi.o \
 		      dpmcp.o \
 		      dpbp.o
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 2c4cd70..9380ebc 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -13,6 +13,8 @@
 #include "../include/mc-sys.h"
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
 #include "dprc-cmd.h"
 
 struct dprc_child_objs {
@@ -127,7 +129,7 @@
 {
 	int error;
 	u32 plugged_flag_at_mc =
-			(obj_desc->state & DPRC_OBJ_STATE_PLUGGED);
+			obj_desc->state & DPRC_OBJ_STATE_PLUGGED;
 
 	if (plugged_flag_at_mc !=
 	    (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) {
@@ -241,6 +243,7 @@
  * dprc_scan_objects - Discover objects in a DPRC
  *
  * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @total_irq_count: total number of IRQs needed by objects in the DPRC.
  *
  * Detects objects added and removed from a DPRC and synchronizes the
  * state of the Linux bus driver, MC by adding and removing
@@ -254,11 +257,13 @@
  * populated before they can get allocation requests from probe callbacks
  * of the device drivers for the non-allocatable devices.
  */
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev)
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+		      unsigned int *total_irq_count)
 {
 	int num_child_objects;
 	int dprc_get_obj_failures;
 	int error;
+	unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
 	struct dprc_obj_desc *child_obj_desc_array = NULL;
 
 	error = dprc_get_obj_count(mc_bus_dev->mc_io,
@@ -307,6 +312,7 @@
 				continue;
 			}
 
+			irq_count += obj_desc->irq_count;
 			dev_dbg(&mc_bus_dev->dev,
 				"Discovered object: type %s, id %d\n",
 				obj_desc->type, obj_desc->id);
@@ -319,6 +325,7 @@
 		}
 	}
 
+	*total_irq_count = irq_count;
 	dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
 			    num_child_objects);
 
@@ -344,6 +351,7 @@
 int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
 {
 	int error;
+	unsigned int irq_count;
 	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
 
 	dprc_init_all_resource_pools(mc_bus_dev);
@@ -352,11 +360,25 @@
 	 * Discover objects in the DPRC:
 	 */
 	mutex_lock(&mc_bus->scan_mutex);
-	error = dprc_scan_objects(mc_bus_dev);
+	error = dprc_scan_objects(mc_bus_dev, &irq_count);
 	mutex_unlock(&mc_bus->scan_mutex);
 	if (error < 0)
 		goto error;
 
+	if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+		if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+			dev_warn(&mc_bus_dev->dev,
+				 "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+				 irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+		}
+
+		error = fsl_mc_populate_irq_pool(
+				mc_bus,
+				FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+		if (error < 0)
+			goto error;
+	}
+
 	return 0;
 error:
 	dprc_cleanup_all_resource_pools(mc_bus_dev);
@@ -365,6 +387,230 @@
 EXPORT_SYMBOL_GPL(dprc_scan_container);
 
 /**
+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+/**
+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+{
+	int error;
+	u32 status;
+	struct device *dev = (struct device *)arg;
+	struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+	struct fsl_mc_io *mc_io = mc_dev->mc_io;
+	struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+
+	dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+		irq_num, smp_processor_id());
+
+	if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC)))
+		return IRQ_HANDLED;
+
+	mutex_lock(&mc_bus->scan_mutex);
+	if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
+		goto out;
+
+	error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+				    &status);
+	if (error < 0) {
+		dev_err(dev,
+			"dprc_get_irq_status() failed: %d\n", error);
+		goto out;
+	}
+
+	error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+				      status);
+	if (error < 0) {
+		dev_err(dev,
+			"dprc_clear_irq_status() failed: %d\n", error);
+		goto out;
+	}
+
+	if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+		      DPRC_IRQ_EVENT_OBJ_REMOVED |
+		      DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+		      DPRC_IRQ_EVENT_OBJ_DESTROYED |
+		      DPRC_IRQ_EVENT_OBJ_CREATED)) {
+		unsigned int irq_count;
+
+		error = dprc_scan_objects(mc_dev, &irq_count);
+		if (error < 0) {
+			/*
+			 * If the error is -ENXIO, we ignore it, as it indicates
+			 * that the object scan was aborted, as we detected that
+			 * an object was removed from the DPRC in the MC, while
+			 * we were scanning the DPRC.
+			 */
+			if (error != -ENXIO) {
+				dev_err(dev, "dprc_scan_objects() failed: %d\n",
+					error);
+			}
+
+			goto out;
+		}
+
+		if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+			dev_warn(dev,
+				 "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+				 irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+		}
+	}
+
+out:
+	mutex_unlock(&mc_bus->scan_mutex);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Disable and clear interrupt for a given DPRC object
+ */
+static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+	int error;
+	struct fsl_mc_io *mc_io = mc_dev->mc_io;
+
+	WARN_ON(mc_dev->obj_desc.irq_count != 1);
+
+	/*
+	 * Disable generation of interrupt, while we configure it:
+	 */
+	error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+	if (error < 0) {
+		dev_err(&mc_dev->dev,
+			"Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+			error);
+		return error;
+	}
+
+	/*
+	 * Disable all interrupt causes for the interrupt:
+	 */
+	error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+	if (error < 0) {
+		dev_err(&mc_dev->dev,
+			"Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+			error);
+		return error;
+	}
+
+	/*
+	 * Clear any leftover interrupts:
+	 */
+	error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+	if (error < 0) {
+		dev_err(&mc_dev->dev,
+			"Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+			error);
+		return error;
+	}
+
+	return 0;
+}
+
+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+{
+	int error;
+	struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+	WARN_ON(mc_dev->obj_desc.irq_count != 1);
+
+	/*
+	 * NOTE: devm_request_threaded_irq() invokes the device-specific
+	 * function that programs the MSI physically in the device
+	 */
+	error = devm_request_threaded_irq(&mc_dev->dev,
+					  irq->msi_desc->irq,
+					  dprc_irq0_handler,
+					  dprc_irq0_handler_thread,
+					  IRQF_NO_SUSPEND | IRQF_ONESHOT,
+					  "FSL MC DPRC irq0",
+					  &mc_dev->dev);
+	if (error < 0) {
+		dev_err(&mc_dev->dev,
+			"devm_request_threaded_irq() failed: %d\n",
+			error);
+		return error;
+	}
+
+	return 0;
+}
+
+static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+	int error;
+
+	/*
+	 * Enable all interrupt causes for the interrupt:
+	 */
+	error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+				  ~0x0u);
+	if (error < 0) {
+		dev_err(&mc_dev->dev,
+			"Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+			error);
+
+		return error;
+	}
+
+	/*
+	 * Enable generation of the interrupt:
+	 */
+	error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+	if (error < 0) {
+		dev_err(&mc_dev->dev,
+			"Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+			error);
+
+		return error;
+	}
+
+	return 0;
+}
+
+/*
+ * Setup interrupt for a given DPRC device
+ */
+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
+	int error;
+
+	error = fsl_mc_allocate_irqs(mc_dev);
+	if (error < 0)
+		return error;
+
+	error = disable_dprc_irq(mc_dev);
+	if (error < 0)
+		goto error_free_irqs;
+
+	error = register_dprc_irq_handler(mc_dev);
+	if (error < 0)
+		goto error_free_irqs;
+
+	error = enable_dprc_irq(mc_dev);
+	if (error < 0)
+		goto error_free_irqs;
+
+	return 0;
+
+error_free_irqs:
+	fsl_mc_free_irqs(mc_dev);
+	return error;
+}
+
+/**
  * dprc_probe - callback invoked when a DPRC is being bound to this driver
  *
  * @mc_dev: Pointer to fsl-mc device representing a DPRC
@@ -378,15 +624,24 @@
 {
 	int error;
 	size_t region_size;
+	struct device *parent_dev = mc_dev->dev.parent;
 	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+	bool mc_io_created = false;
+	bool msi_domain_set = false;
 
 	if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
 		return -EINVAL;
 
+	if (WARN_ON(dev_get_msi_domain(&mc_dev->dev)))
+		return -EINVAL;
+
 	if (!mc_dev->mc_io) {
 		/*
 		 * This is a child DPRC:
 		 */
+		if (WARN_ON(parent_dev->bus != &fsl_mc_bus_type))
+			return -EINVAL;
+
 		if (WARN_ON(mc_dev->obj_desc.region_count == 0))
 			return -EINVAL;
 
@@ -396,16 +651,45 @@
 		error = fsl_create_mc_io(&mc_dev->dev,
 					 mc_dev->regions[0].start,
 					 region_size,
-					 NULL, 0, &mc_dev->mc_io);
+					 NULL,
+					 FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+					 &mc_dev->mc_io);
 		if (error < 0)
 			return error;
+
+		mc_io_created = true;
+
+		/*
+		 * Inherit parent MSI domain:
+		 */
+		dev_set_msi_domain(&mc_dev->dev,
+				   dev_get_msi_domain(parent_dev));
+		msi_domain_set = true;
+	} else {
+		/*
+		 * This is a root DPRC
+		 */
+		struct irq_domain *mc_msi_domain;
+
+		if (WARN_ON(parent_dev->bus == &fsl_mc_bus_type))
+			return -EINVAL;
+
+		error = fsl_mc_find_msi_domain(parent_dev,
+					       &mc_msi_domain);
+		if (error < 0) {
+			dev_warn(&mc_dev->dev,
+				 "WARNING: MC bus without interrupt support\n");
+		} else {
+			dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+			msi_domain_set = true;
+		}
 	}
 
 	error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
 			  &mc_dev->mc_handle);
 	if (error < 0) {
 		dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
-		goto error_cleanup_mc_io;
+		goto error_cleanup_msi_domain;
 	}
 
 	mutex_init(&mc_bus->scan_mutex);
@@ -417,17 +701,40 @@
 	if (error < 0)
 		goto error_cleanup_open;
 
+	/*
+	 * Configure interrupt for the DPRC object associated with this MC bus:
+	 */
+	error = dprc_setup_irq(mc_dev);
+	if (error < 0)
+		goto error_cleanup_open;
+
 	dev_info(&mc_dev->dev, "DPRC device bound to driver");
 	return 0;
 
 error_cleanup_open:
 	(void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
 
-error_cleanup_mc_io:
-	fsl_destroy_mc_io(mc_dev->mc_io);
+error_cleanup_msi_domain:
+	if (msi_domain_set)
+		dev_set_msi_domain(&mc_dev->dev, NULL);
+
+	if (mc_io_created) {
+		fsl_destroy_mc_io(mc_dev->mc_io);
+		mc_dev->mc_io = NULL;
+	}
+
 	return error;
 }
 
+/*
+ * Tear down interrupt for a given DPRC object
+ */
+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
+	(void)disable_dprc_irq(mc_dev);
+	fsl_mc_free_irqs(mc_dev);
+}
+
 /**
  * dprc_remove - callback invoked when a DPRC is being unbound from this driver
  *
@@ -441,18 +748,30 @@
 static int dprc_remove(struct fsl_mc_device *mc_dev)
 {
 	int error;
+	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
 
 	if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
 		return -EINVAL;
 	if (WARN_ON(!mc_dev->mc_io))
 		return -EINVAL;
 
+	if (WARN_ON(!mc_bus->irq_resources))
+		return -EINVAL;
+
+	if (dev_get_msi_domain(&mc_dev->dev))
+		dprc_teardown_irq(mc_dev);
+
 	device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
 	dprc_cleanup_all_resource_pools(mc_dev);
 	error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
 	if (error < 0)
 		dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
 
+	if (dev_get_msi_domain(&mc_dev->dev)) {
+		fsl_mc_cleanup_irq_pool(mc_bus);
+		dev_set_msi_domain(&mc_dev->dev, NULL);
+	}
+
 	dev_info(&mc_dev->dev, "DPRC device unbound from driver");
 	return 0;
 }
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
new file mode 100644
index 0000000..4e8e822
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -0,0 +1,127 @@
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "../include/mc-private.h"
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include "../include/mc-sys.h"
+#include "dprc-cmd.h"
+
+static struct irq_chip its_msi_irq_chip = {
+	.name = "fsl-mc-bus-msi",
+	.irq_mask = irq_chip_mask_parent,
+	.irq_unmask = irq_chip_unmask_parent,
+	.irq_eoi = irq_chip_eoi_parent,
+	.irq_set_affinity = msi_domain_set_affinity
+};
+
+static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+				  struct device *dev,
+				  int nvec, msi_alloc_info_t *info)
+{
+	struct fsl_mc_device *mc_bus_dev;
+	struct msi_domain_info *msi_info;
+
+	if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+		return -EINVAL;
+
+	mc_bus_dev = to_fsl_mc_device(dev);
+	if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC)))
+		return -EINVAL;
+
+	/*
+	 * Set the device Id to be passed to the GIC-ITS:
+	 *
+	 * NOTE: This device id corresponds to the IOMMU stream ID
+	 * associated with the DPRC object (ICID).
+	 */
+	info->scratchpad[0].ul = mc_bus_dev->icid;
+	msi_info = msi_get_domain_info(msi_domain->parent);
+	return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops its_fsl_mc_msi_ops = {
+	.msi_prepare = its_fsl_mc_msi_prepare,
+};
+
+static struct msi_domain_info its_fsl_mc_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &its_fsl_mc_msi_ops,
+	.chip	= &its_msi_irq_chip,
+};
+
+static const struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+int __init its_fsl_mc_msi_init(void)
+{
+	struct device_node *np;
+	struct irq_domain *parent;
+	struct irq_domain *mc_msi_domain;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+		if (!parent || !msi_get_domain_info(parent)) {
+			pr_err("%s: unable to locate ITS domain\n",
+			       np->full_name);
+			continue;
+		}
+
+		mc_msi_domain = fsl_mc_msi_create_irq_domain(
+						 of_node_to_fwnode(np),
+						 &its_fsl_mc_msi_domain_info,
+						 parent);
+		if (!mc_msi_domain) {
+			pr_err("%s: unable to create fsl-mc domain\n",
+			       np->full_name);
+			continue;
+		}
+
+		WARN_ON(mc_msi_domain->
+				host_data != &its_fsl_mc_msi_domain_info);
+
+		pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
+	}
+
+	return 0;
+}
+
+void its_fsl_mc_msi_cleanup(void)
+{
+	struct device_node *np;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		struct irq_domain *mc_msi_domain = irq_find_matching_host(
+							np,
+							DOMAIN_BUS_FSL_MC_MSI);
+
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		mc_msi_domain = irq_find_matching_host(np,
+						       DOMAIN_BUS_FSL_MC_MSI);
+		if (mc_msi_domain &&
+		    mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info)
+			irq_domain_remove(mc_msi_domain);
+	}
+}
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index 88d1857..86f8543 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -15,6 +15,7 @@
 #include "../include/dpcon-cmd.h"
 #include "dpmcp-cmd.h"
 #include "dpmcp.h"
+#include <linux/msi.h>
 
 /**
  * fsl_mc_resource_pool_add_device - add allocatable device to a resource
@@ -160,6 +161,7 @@
 	[FSL_MC_POOL_DPMCP] = "dpmcp",
 	[FSL_MC_POOL_DPBP] = "dpbp",
 	[FSL_MC_POOL_DPCON] = "dpcon",
+	[FSL_MC_POOL_IRQ] = "irq",
 };
 
 static int __must_check object_type_to_pool_type(const char *object_type,
@@ -465,6 +467,203 @@
 }
 EXPORT_SYMBOL_GPL(fsl_mc_object_free);
 
+/*
+ * Initialize the interrupt pool associated with a MC bus.
+ * It allocates a block of IRQs from the GIC-ITS
+ */
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+			     unsigned int irq_count)
+{
+	unsigned int i;
+	struct msi_desc *msi_desc;
+	struct fsl_mc_device_irq *irq_resources;
+	struct fsl_mc_device_irq *mc_dev_irq;
+	int error;
+	struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+	struct fsl_mc_resource_pool *res_pool =
+			&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+	if (WARN_ON(irq_count == 0 ||
+		    irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS))
+		return -EINVAL;
+
+	error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+	if (error < 0)
+		return error;
+
+	irq_resources = devm_kzalloc(&mc_bus_dev->dev,
+				     sizeof(*irq_resources) * irq_count,
+				     GFP_KERNEL);
+	if (!irq_resources) {
+		error = -ENOMEM;
+		goto cleanup_msi_irqs;
+	}
+
+	for (i = 0; i < irq_count; i++) {
+		mc_dev_irq = &irq_resources[i];
+
+		/*
+		 * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+		 * by the fsl_mc_msi_write_msg() callback
+		 */
+		mc_dev_irq->resource.type = res_pool->type;
+		mc_dev_irq->resource.data = mc_dev_irq;
+		mc_dev_irq->resource.parent_pool = res_pool;
+		INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+		list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+	}
+
+	for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
+		mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
+		mc_dev_irq->msi_desc = msi_desc;
+		mc_dev_irq->resource.id = msi_desc->irq;
+	}
+
+	res_pool->max_count = irq_count;
+	res_pool->free_count = irq_count;
+	mc_bus->irq_resources = irq_resources;
+	return 0;
+
+cleanup_msi_irqs:
+	fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+	return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+/**
+ * Teardown the interrupt pool associated with an MC bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+{
+	struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+	struct fsl_mc_resource_pool *res_pool =
+			&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+	if (WARN_ON(!mc_bus->irq_resources))
+		return;
+
+	if (WARN_ON(res_pool->max_count == 0))
+		return;
+
+	if (WARN_ON(res_pool->free_count != res_pool->max_count))
+		return;
+
+	INIT_LIST_HEAD(&res_pool->free_list);
+	res_pool->max_count = 0;
+	res_pool->free_count = 0;
+	mc_bus->irq_resources = NULL;
+	fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+/**
+ * It allocates the IRQs required by a given MC object device. The
+ * IRQs are allocated from the interrupt pool associated with the
+ * MC bus that contains the device, if the device is not a DPRC device.
+ * Otherwise, the IRQs are allocated from the interrupt pool associated
+ * with the MC bus that represents the DPRC device itself.
+ */
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+{
+	int i;
+	int irq_count;
+	int res_allocated_count = 0;
+	int error = -EINVAL;
+	struct fsl_mc_device_irq **irqs = NULL;
+	struct fsl_mc_bus *mc_bus;
+	struct fsl_mc_resource_pool *res_pool;
+
+	if (WARN_ON(mc_dev->irqs))
+		return -EINVAL;
+
+	irq_count = mc_dev->obj_desc.irq_count;
+	if (WARN_ON(irq_count == 0))
+		return -EINVAL;
+
+	if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+		mc_bus = to_fsl_mc_bus(mc_dev);
+	else
+		mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+	if (WARN_ON(!mc_bus->irq_resources))
+		return -EINVAL;
+
+	res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+	if (res_pool->free_count < irq_count) {
+		dev_err(&mc_dev->dev,
+			"Not able to allocate %u irqs for device\n", irq_count);
+		return -ENOSPC;
+	}
+
+	irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
+			    GFP_KERNEL);
+	if (!irqs)
+		return -ENOMEM;
+
+	for (i = 0; i < irq_count; i++) {
+		struct fsl_mc_resource *resource;
+
+		error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+						 &resource);
+		if (error < 0)
+			goto error_resource_alloc;
+
+		irqs[i] = to_fsl_mc_irq(resource);
+		res_allocated_count++;
+
+		WARN_ON(irqs[i]->mc_dev);
+		irqs[i]->mc_dev = mc_dev;
+		irqs[i]->dev_irq_index = i;
+	}
+
+	mc_dev->irqs = irqs;
+	return 0;
+
+error_resource_alloc:
+	for (i = 0; i < res_allocated_count; i++) {
+		irqs[i]->mc_dev = NULL;
+		fsl_mc_resource_free(&irqs[i]->resource);
+	}
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+/*
+ * It frees the IRQs that were allocated for a MC object device, by
+ * returning them to the corresponding interrupt pool.
+ */
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
+	int i;
+	int irq_count;
+	struct fsl_mc_bus *mc_bus;
+	struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
+	if (WARN_ON(!irqs))
+		return;
+
+	irq_count = mc_dev->obj_desc.irq_count;
+
+	if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+		mc_bus = to_fsl_mc_bus(mc_dev);
+	else
+		mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+	if (WARN_ON(!mc_bus->irq_resources))
+		return;
+
+	for (i = 0; i < irq_count; i++) {
+		WARN_ON(!irqs[i]->mc_dev);
+		irqs[i]->mc_dev = NULL;
+		fsl_mc_resource_free(&irqs[i]->resource);
+	}
+
+	mc_dev->irqs = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
 /**
  * fsl_mc_allocator_probe - callback invoked when an allocatable device is
  * being added to the system
@@ -557,7 +756,7 @@
 	return fsl_mc_driver_register(&fsl_mc_allocator_driver);
 }
 
-void __exit fsl_mc_allocator_driver_exit(void)
+void fsl_mc_allocator_driver_exit(void)
 {
 	fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
 }
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index 84db55b..5958e0f 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -16,6 +16,8 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/limits.h>
+#include <linux/bitops.h>
+#include <linux/msi.h>
 #include "../include/dpmng.h"
 #include "../include/mc-sys.h"
 #include "dprc-cmd.h"
@@ -246,8 +248,7 @@
 	fsl_mc_get_root_dprc(dev, &root_dprc_dev);
 	if (!root_dprc_dev)
 		return false;
-	else
-		return dev == root_dprc_dev;
+	return dev == root_dprc_dev;
 }
 
 static int get_dprc_icid(struct fsl_mc_io *mc_io,
@@ -472,6 +473,8 @@
 		mc_dev->icid = parent_mc_dev->icid;
 		mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
 		mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+		dev_set_msi_domain(&mc_dev->dev,
+				   dev_get_msi_domain(&parent_mc_dev->dev));
 	}
 
 	/*
@@ -702,7 +705,8 @@
 	mc_portal_phys_addr = res.start;
 	mc_portal_size = resource_size(&res);
 	error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
-				 mc_portal_size, NULL, 0, &mc_io);
+				 mc_portal_size, NULL,
+				 FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
 	if (error < 0)
 		return error;
 
@@ -790,7 +794,6 @@
 static struct platform_driver fsl_mc_bus_driver = {
 	.driver = {
 		   .name = "fsl_mc_bus",
-		   .owner = THIS_MODULE,
 		   .pm = NULL,
 		   .of_match_table = fsl_mc_bus_match_table,
 		   },
@@ -832,8 +835,15 @@
 	if (error < 0)
 		goto error_cleanup_dprc_driver;
 
+	error = its_fsl_mc_msi_init();
+	if (error < 0)
+		goto error_cleanup_mc_allocator;
+
 	return 0;
 
+error_cleanup_mc_allocator:
+	fsl_mc_allocator_driver_exit();
+
 error_cleanup_dprc_driver:
 	dprc_driver_exit();
 
@@ -855,6 +865,7 @@
 	if (WARN_ON(!mc_dev_cache))
 		return;
 
+	its_fsl_mc_msi_cleanup();
 	fsl_mc_allocator_driver_exit();
 	dprc_driver_exit();
 	platform_driver_unregister(&fsl_mc_bus_driver);
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
new file mode 100644
index 0000000..3a8258f
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -0,0 +1,276 @@
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "../include/mc-private.h"
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include "../include/mc-sys.h"
+#include "dprc-cmd.h"
+
+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+				struct msi_desc *desc)
+{
+	arg->desc = desc;
+	arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
+}
+
+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
+	struct msi_domain_ops *ops = info->ops;
+
+	if (WARN_ON(!ops))
+		return;
+
+	/*
+	 * set_desc should not be set by the caller
+	 */
+	if (WARN_ON(ops->set_desc))
+		return;
+
+	ops->set_desc = fsl_mc_msi_set_desc;
+}
+
+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+				   struct fsl_mc_device_irq *mc_dev_irq)
+{
+	int error;
+	struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+	struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
+	struct dprc_irq_cfg irq_cfg;
+
+	/*
+	 * msi_desc->msg.address is 0x0 when this function is invoked in
+	 * the free_irq() code path. In this case, for the MC, we don't
+	 * really need to "unprogram" the MSI, so we just return.
+	 */
+	if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+		return;
+
+	if (WARN_ON(!owner_mc_dev))
+		return;
+
+	irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+			msi_desc->msg.address_lo;
+	irq_cfg.val = msi_desc->msg.data;
+	irq_cfg.user_irq_id = msi_desc->irq;
+
+	if (owner_mc_dev == mc_bus_dev) {
+		/*
+		 * IRQ is for the mc_bus_dev's DPRC itself
+		 */
+		error = dprc_set_irq(mc_bus_dev->mc_io,
+				     MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+				     mc_bus_dev->mc_handle,
+				     mc_dev_irq->dev_irq_index,
+				     &irq_cfg);
+		if (error < 0) {
+			dev_err(&owner_mc_dev->dev,
+				"dprc_set_irq() failed: %d\n", error);
+		}
+	} else {
+		/*
+		 * IRQ is for for a child device of mc_bus_dev
+		 */
+		error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+					 MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+					 mc_bus_dev->mc_handle,
+					 owner_mc_dev->obj_desc.type,
+					 owner_mc_dev->obj_desc.id,
+					 mc_dev_irq->dev_irq_index,
+					 &irq_cfg);
+		if (error < 0) {
+			dev_err(&owner_mc_dev->dev,
+				"dprc_obj_set_irq() failed: %d\n", error);
+		}
+	}
+}
+
+/*
+ * NOTE: This function is invoked with interrupts disabled
+ */
+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+				 struct msi_msg *msg)
+{
+	struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+	struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+	struct fsl_mc_device_irq *mc_dev_irq =
+		&mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+
+	WARN_ON(mc_dev_irq->msi_desc != msi_desc);
+	msi_desc->msg = *msg;
+
+	/*
+	 * Program the MSI (paddr, value) pair in the device:
+	 */
+	__fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+}
+
+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
+	struct irq_chip *chip = info->chip;
+
+	if (WARN_ON((!chip)))
+		return;
+
+	/*
+	 * irq_write_msi_msg should not be set by the caller
+	 */
+	if (WARN_ON(chip->irq_write_msi_msg))
+		return;
+
+	chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
+/**
+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+ * @np:		Optional device-tree node of the interrupt controller
+ * @info:	MSI domain info
+ * @parent:	Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a fsl-mc MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+						struct msi_domain_info *info,
+						struct irq_domain *parent)
+{
+	struct irq_domain *domain;
+
+	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+		fsl_mc_msi_update_dom_ops(info);
+	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+		fsl_mc_msi_update_chip_ops(info);
+
+	domain = msi_create_irq_domain(fwnode, info, parent);
+	if (domain)
+		domain->bus_token = DOMAIN_BUS_FSL_MC_MSI;
+
+	return domain;
+}
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+			   struct irq_domain **mc_msi_domain)
+{
+	struct irq_domain *msi_domain;
+	struct device_node *mc_of_node = mc_platform_dev->of_node;
+
+	msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
+				       DOMAIN_BUS_FSL_MC_MSI);
+	if (!msi_domain) {
+		pr_err("Unable to find fsl-mc MSI domain for %s\n",
+		       mc_of_node->full_name);
+
+		return -ENOENT;
+	}
+
+	*mc_msi_domain = msi_domain;
+	return 0;
+}
+
+static void fsl_mc_msi_free_descs(struct device *dev)
+{
+	struct msi_desc *desc, *tmp;
+
+	list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+		list_del(&desc->list);
+		free_msi_entry(desc);
+	}
+}
+
+static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+
+{
+	unsigned int i;
+	int error;
+	struct msi_desc *msi_desc;
+
+	for (i = 0; i < irq_count; i++) {
+		msi_desc = alloc_msi_entry(dev);
+		if (!msi_desc) {
+			dev_err(dev, "Failed to allocate msi entry\n");
+			error = -ENOMEM;
+			goto cleanup_msi_descs;
+		}
+
+		msi_desc->fsl_mc.msi_index = i;
+		msi_desc->nvec_used = 1;
+		INIT_LIST_HEAD(&msi_desc->list);
+		list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+	}
+
+	return 0;
+
+cleanup_msi_descs:
+	fsl_mc_msi_free_descs(dev);
+	return error;
+}
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+				 unsigned int irq_count)
+{
+	struct irq_domain *msi_domain;
+	int error;
+
+	if (WARN_ON(!list_empty(dev_to_msi_list(dev))))
+		return -EINVAL;
+
+	error = fsl_mc_msi_alloc_descs(dev, irq_count);
+	if (error < 0)
+		return error;
+
+	msi_domain = dev_get_msi_domain(dev);
+	if (WARN_ON(!msi_domain)) {
+		error = -EINVAL;
+		goto cleanup_msi_descs;
+	}
+
+	/*
+	 * NOTE: Calling this function will trigger the invocation of the
+	 * its_fsl_mc_msi_prepare() callback
+	 */
+	error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
+
+	if (error) {
+		dev_err(dev, "Failed to allocate IRQs\n");
+		goto cleanup_msi_descs;
+	}
+
+	return 0;
+
+cleanup_msi_descs:
+	fsl_mc_msi_free_descs(dev);
+	return error;
+}
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
+	struct irq_domain *msi_domain;
+
+	msi_domain = dev_get_msi_domain(dev);
+	if (WARN_ON(!msi_domain))
+		return;
+
+	msi_domain_free_irqs(msi_domain, dev);
+
+	if (WARN_ON(list_empty(dev_to_msi_list(dev))))
+		return;
+
+	fsl_mc_msi_free_descs(dev);
+}
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index c3152f6..94c4927 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -176,7 +176,7 @@
  * @user_irq_id: A user defined number associated with this IRQ
  */
 struct dprc_irq_cfg {
-	     u64		paddr;
+	     phys_addr_t	paddr;
 	     u32		val;
 	     int		user_irq_id;
 };
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
index c706f77..ee5f1d2 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-private.h
@@ -26,6 +26,19 @@
 	 strcmp(_obj_type, "dpmcp") == 0 || \
 	 strcmp(_obj_type, "dpcon") == 0)
 
+struct irq_domain;
+struct msi_domain_info;
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS	256
+
+struct device_node;
+struct irq_domain;
+struct msi_domain_info;
+
 /**
  * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
  * @root_mc_bus_dev: MC object device representing the root DPRC
@@ -79,11 +92,13 @@
  * @resource_pools: array of resource pools (one pool per resource type)
  * for this MC bus. These resources represent allocatable entities
  * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
  * @scan_mutex: Serializes bus scanning
  */
 struct fsl_mc_bus {
 	struct fsl_mc_device mc_dev;
 	struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+	struct fsl_mc_device_irq *irq_resources;
 	struct mutex scan_mutex;    /* serializes bus scanning */
 };
 
@@ -99,7 +114,8 @@
 
 int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
 
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev);
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+		      unsigned int *total_irq_count);
 
 int __init dprc_driver_init(void);
 
@@ -107,7 +123,7 @@
 
 int __init fsl_mc_allocator_driver_init(void);
 
-void __exit fsl_mc_allocator_driver_exit(void);
+void fsl_mc_allocator_driver_exit(void);
 
 int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
 					  enum fsl_mc_pool_type pool_type,
@@ -116,4 +132,25 @@
 
 void fsl_mc_resource_free(struct fsl_mc_resource *resource);
 
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+						struct msi_domain_info *info,
+						struct irq_domain *parent);
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+			   struct irq_domain **mc_msi_domain);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+				 unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+int __init its_fsl_mc_msi_init(void);
+
+void its_fsl_mc_msi_cleanup(void);
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+			     unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+
 #endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index a933291..ac7c1ce 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -14,12 +14,14 @@
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/list.h>
+#include <linux/interrupt.h>
 #include "../include/dprc.h"
 
 #define FSL_MC_VENDOR_FREESCALE	0x1957
 
 struct fsl_mc_device;
 struct fsl_mc_io;
+struct fsl_mc_bus;
 
 /**
  * struct fsl_mc_driver - MC object device driver object
@@ -75,6 +77,7 @@
 	FSL_MC_POOL_DPMCP = 0x0,    /* corresponds to "dpmcp" in the MC */
 	FSL_MC_POOL_DPBP,	    /* corresponds to "dpbp" in the MC */
 	FSL_MC_POOL_DPCON,	    /* corresponds to "dpcon" in the MC */
+	FSL_MC_POOL_IRQ,
 
 	/*
 	 * NOTE: New resource pool types must be added before this entry
@@ -104,6 +107,23 @@
 };
 
 /**
+ * struct fsl_mc_device_irq - MC object device message-based interrupt
+ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @mc_dev: MC object device that owns this interrupt
+ * @dev_irq_index: device-relative IRQ index
+ * @resource: MC generic resource associated with the interrupt
+ */
+struct fsl_mc_device_irq {
+	struct msi_desc *msi_desc;
+	struct fsl_mc_device *mc_dev;
+	u8 dev_irq_index;
+	struct fsl_mc_resource resource;
+};
+
+#define to_fsl_mc_irq(_mc_resource) \
+	container_of(_mc_resource, struct fsl_mc_device_irq, resource)
+
+/**
  * Bit masks for a MC object device (struct fsl_mc_device) flags
  */
 #define FSL_MC_IS_DPRC	0x0001
@@ -124,6 +144,7 @@
  * NULL if none.
  * @obj_desc: MC description of the DPAA device
  * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
  * @resource: generic resource associated with this MC object device, if any.
  *
  * Generic device object for MC object devices that are "attached" to a
@@ -155,6 +176,7 @@
 	struct fsl_mc_io *mc_io;
 	struct dprc_obj_desc obj_desc;
 	struct resource *regions;
+	struct fsl_mc_device_irq **irqs;
 	struct fsl_mc_resource *resource;
 };
 
@@ -198,6 +220,10 @@
 
 void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
 
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
 extern struct bus_type fsl_mc_bus_type;
 
 #endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 7a3347c..4cd3ed3 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -106,7 +106,7 @@
 {
 	struct dma_pending *pending, *next;
 
-	if (fifo->data == NULL)
+	if (!fifo->data)
 		return;
 
 	list_for_each_entry_safe(pending, next, &fifo->pending, link)
@@ -123,7 +123,7 @@
 {
 	struct dma_pending *pending, *next;
 
-	if (fifo->data == NULL)
+	if (!fifo->data)
 		return;
 
 	list_for_each_entry_safe(pending, next, &fifo->pending, link)
@@ -149,7 +149,7 @@
 {
 	int ofs, l;
 
-	if (fifo->data == NULL)
+	if (!fifo->data)
 		return -ENOENT;
 	if (fifo->corrupt)
 		return -ENXIO;
@@ -192,7 +192,7 @@
 {
 	unsigned len, n, ofs, l, limit;
 
-	if (fifo->data == NULL)
+	if (!fifo->data)
 		return -ENOENT;
 	if (fifo->corrupt)
 		return -ENXIO;
@@ -252,7 +252,7 @@
 {
 	struct dma_pending *pending, *next, *tmp;
 
-	if (fifo->data == NULL)
+	if (!fifo->data)
 		return -ENOENT;
 	if (fifo->corrupt)
 		return -ENXIO;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index b676c48..9b23b5c 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -828,7 +828,7 @@
 	rcu_read_unlock();
 }
 
-struct fwtty_port *fwtty_port_get(unsigned index)
+static struct fwtty_port *fwtty_port_get(unsigned index)
 {
 	struct fwtty_port *port;
 
@@ -842,7 +842,6 @@
 	mutex_unlock(&port_table_lock);
 	return port;
 }
-EXPORT_SYMBOL(fwtty_port_get);
 
 static int fwtty_ports_add(struct fw_serial *serial)
 {
@@ -1465,9 +1464,9 @@
 	seq_printf(m, " %s:", dev_name(&peer->unit->device));
 	seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
 	seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
-		   peer->max_payload, (unsigned long long) peer->guid);
-	seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr);
-	seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr);
+		   peer->max_payload, (unsigned long long)peer->guid);
+	seq_printf(m, " mgmt:%012llx", (unsigned long long)peer->mgmt_addr);
+	seq_printf(m, " addr:%012llx", (unsigned long long)peer->status_addr);
 	seq_putc(m, '\n');
 }
 
@@ -1514,7 +1513,7 @@
 	rcu_read_lock();
 	seq_printf(m, "card: %s  guid: %016llx\n",
 		   dev_name(serial->card->device),
-		   (unsigned long long) serial->card->guid);
+		   (unsigned long long)serial->card->guid);
 	list_for_each_entry_rcu(peer, &serial->peer_list, list)
 		fwtty_debugfs_show_peer(m, peer);
 	rcu_read_unlock();
@@ -1986,7 +1985,7 @@
 		 * been probed for any unit devices...
 		 */
 		fwtty_err(card, "unknown card (guid %016llx)\n",
-			  (unsigned long long) card->guid);
+			  (unsigned long long)card->guid);
 		return NULL;
 	}
 
@@ -2016,7 +2015,7 @@
 
 		smp_rmb();
 		fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
-			  g, peer->node_id, (unsigned long long) peer->guid);
+			  g, peer->node_id, (unsigned long long)peer->guid);
 	}
 }
 #else
@@ -2313,7 +2312,7 @@
 	list_add_rcu(&serial->list, &fwserial_list);
 
 	fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
-		     dev_name(card->device), (unsigned long long) card->guid);
+		     dev_name(card->device), (unsigned long long)card->guid);
 
 	err = fwserial_add_peer(serial, unit);
 	if (!err)
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index e13fe33..6fa9365 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -341,7 +341,6 @@
 
 extern struct tty_driver *fwtty_driver;
 
-struct fwtty_port *fwtty_port_get(unsigned index);
 /*
  * Returns the max send async payload size in bytes based on the unit device
  * link speed. Self-limiting asynchronous bandwidth (via reducing the payload)
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 17d148f..98570c6 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -382,7 +382,7 @@
 		/* Check DHCPv4 */
 		if (ip->protocol == IPPROTO_UDP) {
 			struct udphdr *udp =
-					(network_data + sizeof(struct iphdr));
+					network_data + sizeof(struct iphdr);
 			if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
 				nic_type |= NIC_TYPE_F_DHCP;
 		}
@@ -393,12 +393,12 @@
 
 		if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
 			struct icmp6hdr *icmp6 =
-					(network_data + sizeof(struct ipv6hdr));
+					network_data + sizeof(struct ipv6hdr);
 			if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
 				nic_type |= NIC_TYPE_ICMPV6;
 		} else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
 			struct udphdr *udp =
-					(network_data + sizeof(struct ipv6hdr));
+					network_data + sizeof(struct ipv6hdr);
 			if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
 				nic_type |= NIC_TYPE_F_DHCP;
 		}
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 445f836..937010b 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -26,8 +26,6 @@
 
 #include "gdm_mux.h"
 
-static struct workqueue_struct *mux_rx_wq;
-
 static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
 
 #define USB_DEVICE_CDC_DATA(vid, pid) \
@@ -275,7 +273,7 @@
 		r->len = r->urb->actual_length;
 		spin_lock_irqsave(&rx->to_host_lock, flags);
 		list_add_tail(&r->to_host_list, &rx->to_host_list);
-		queue_work(mux_rx_wq, &mux_dev->work_rx.work);
+		schedule_work(&mux_dev->work_rx.work);
 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
 	}
 }
@@ -602,6 +600,8 @@
 	mux_dev = tty_dev->priv_dev;
 	rx = &mux_dev->rx;
 
+	cancel_work_sync(&mux_dev->work_rx.work);
+
 	if (mux_dev->usb_state != PM_NORMAL) {
 		dev_err(intf->usb_dev, "usb suspend - invalid state\n");
 		return -1;
@@ -656,13 +656,6 @@
 
 static int __init gdm_usb_mux_init(void)
 {
-
-	mux_rx_wq = create_workqueue("mux_rx_wq");
-	if (!mux_rx_wq) {
-		pr_err("work queue create fail\n");
-		return -1;
-	}
-
 	register_lte_tty_driver();
 
 	return usb_register(&gdm_mux_driver);
@@ -672,11 +665,6 @@
 {
 	unregister_lte_tty_driver();
 
-	if (mux_rx_wq) {
-		flush_workqueue(mux_rx_wq);
-		destroy_workqueue(mux_rx_wq);
-	}
-
 	usb_deregister(&gdm_mux_driver);
 }
 
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index cad347a..9fc476f 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -28,7 +28,7 @@
 static struct {
 	struct list_head head;
 	int cnt;
-	spinlock_t lock;
+	spinlock_t lock; /* protect structure fields */
 } qos_free_list;
 
 static void init_qos_entry_list(void)
@@ -261,7 +261,7 @@
 	struct list_head send_list;
 	int ret = 0;
 
-	tcph = (struct tcphdr *)iph + iph->ihl*4;
+	tcph = (struct tcphdr *)iph + iph->ihl * 4;
 
 	if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
 		if (qcb->qos_list_cnt && !qos_free_list.cnt) {
@@ -342,17 +342,17 @@
 	if (sub_cmd_evt == QOS_REPORT) {
 		spin_lock_irqsave(&qcb->qos_lock, flags);
 		for (i = 0; i < qcb->qos_list_cnt; i++) {
-			sfid = ((buf[(i*5) + 6] << 24) & 0xff000000);
-			sfid += ((buf[(i*5) + 7] << 16) & 0xff0000);
-			sfid += ((buf[(i*5) + 8] << 8) & 0xff00);
-			sfid += (buf[(i*5) + 9]);
+			sfid = ((buf[(i * 5) + 6] << 24) & 0xff000000);
+			sfid += ((buf[(i * 5) + 7] << 16) & 0xff0000);
+			sfid += ((buf[(i * 5) + 8] << 8) & 0xff00);
+			sfid += (buf[(i * 5) + 9]);
 			index = get_csr(qcb, sfid, 0);
 			if (index == -1) {
 				spin_unlock_irqrestore(&qcb->qos_lock, flags);
 				netdev_err(nic->netdev, "QoS ERROR: No SF\n");
 				return;
 			}
-			qcb->csr[index].qos_buf_count = buf[(i*5) + 10];
+			qcb->csr[index].qos_buf_count = buf[(i * 5) + 10];
 		}
 
 		extract_qos_list(nic, &send_list);
diff --git a/drivers/staging/gdm72xx/gdm_qos.h b/drivers/staging/gdm72xx/gdm_qos.h
index bbc8aab..a4e5c1c 100644
--- a/drivers/staging/gdm72xx/gdm_qos.h
+++ b/drivers/staging/gdm72xx/gdm_qos.h
@@ -62,7 +62,7 @@
 	int			qos_list_cnt;
 	int			qos_null_idx;
 	struct gdm_wimax_csr_s	csr[QOS_MAX];
-	spinlock_t		qos_lock;
+	spinlock_t		qos_lock; /* Protect structure fields */
 	int			qos_limit_size;
 };
 
diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c
index 1f5a087..9449418 100644
--- a/drivers/staging/gdm72xx/gdm_sdio.c
+++ b/drivers/staging/gdm72xx/gdm_sdio.c
@@ -33,10 +33,10 @@
 #define SDU_TX_BUF_SIZE	2048
 #define TX_BUF_SIZE	2048
 #define TX_CHUNK_SIZE	(2048 - TYPE_A_HEADER_SIZE)
-#define RX_BUF_SIZE	(25*1024)
+#define RX_BUF_SIZE	(25 * 1024)
 
 #define TX_HZ		2000
-#define TX_INTERVAL	(NSEC_PER_SEC/TX_HZ)
+#define TX_INTERVAL	(NSEC_PER_SEC / TX_HZ)
 
 static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx)
 {
diff --git a/drivers/staging/gdm72xx/gdm_sdio.h b/drivers/staging/gdm72xx/gdm_sdio.h
index aa7dad2..30428b0 100644
--- a/drivers/staging/gdm72xx/gdm_sdio.h
+++ b/drivers/staging/gdm72xx/gdm_sdio.h
@@ -34,7 +34,7 @@
 	struct list_head	hci_list;
 	ktime_t			sdu_stamp;
 	u8			*sdu_buf;
-	spinlock_t		lock;
+	spinlock_t		lock; /* protect structure fields */
 	int			can_send;
 	int			stop_sdu_tx;
 };
@@ -50,7 +50,7 @@
 	struct list_head	free_list;
 	struct list_head	req_list;
 	u8			*rx_buf;
-	spinlock_t		lock;
+	spinlock_t		lock; /* protect structure fields */
 };
 
 struct sdiowm_dev {
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index 16e497d..f81129d 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -29,7 +29,7 @@
 #define TX_BUF_SIZE		2048
 
 #if defined(CONFIG_WIMAX_GDM72XX_WIMAX2)
-#define RX_BUF_SIZE		(128*1024)	/* For packet aggregation */
+#define RX_BUF_SIZE		(128 * 1024)	/* For packet aggregation */
 #else
 #define RX_BUF_SIZE		2048
 #endif
@@ -544,7 +544,6 @@
 		 idVendor, idProduct);
 	dev_info(&intf->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION);
 
-
 	if (idProduct == EMERGENCY_PID) {
 		ret = usb_emergency(usbdev);
 		goto out;
diff --git a/drivers/staging/gdm72xx/gdm_usb.h b/drivers/staging/gdm72xx/gdm_usb.h
index 8e58a25..d864928 100644
--- a/drivers/staging/gdm72xx/gdm_usb.h
+++ b/drivers/staging/gdm72xx/gdm_usb.h
@@ -18,8 +18,8 @@
 #include <linux/usb.h>
 #include <linux/list.h>
 
-#define B_DIFF_DL_DRV		(1 << 4)
-#define B_DOWNLOAD		(1 << 5)
+#define B_DIFF_DL_DRV		BIT(4)
+#define B_DOWNLOAD		BIT(5)
 #define MAX_NR_SDU_BUF		64
 
 struct usb_tx {
@@ -41,7 +41,7 @@
 #if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
 	struct list_head	pending_list;
 #endif
-	spinlock_t		lock;
+	spinlock_t		lock; /* Protect structure fields */
 };
 
 struct usb_rx {
@@ -56,7 +56,7 @@
 struct rx_cxt {
 	struct list_head	free_list;
 	struct list_head	used_list;
-	spinlock_t		lock;
+	spinlock_t		lock; /* Protect structure fields */
 };
 
 struct usbwm_dev {
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
index ba03f93..2ee6a39 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ b/drivers/staging/gdm72xx/gdm_wimax.c
@@ -26,11 +26,11 @@
 #include "netlink_k.h"
 
 #define gdm_wimax_send(n, d, l)	\
-	(n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, NULL, NULL)
+	n->phy_dev->send_func(n->phy_dev->priv_dev, d, l, NULL, NULL)
 #define gdm_wimax_send_with_cb(n, d, l, c, b)	\
-	(n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, c, b)
+	n->phy_dev->send_func(n->phy_dev->priv_dev, d, l, c, b)
 #define gdm_wimax_rcv_with_cb(n, c, b)	\
-	(n->phy_dev->rcv_func)(n->phy_dev->priv_dev, c, b)
+	n->phy_dev->rcv_func(n->phy_dev->priv_dev, c, b)
 
 #define EVT_MAX_SIZE	2048
 
@@ -45,7 +45,7 @@
 	int ref_cnt;
 	struct sock *sock;
 	struct list_head evtq;
-	spinlock_t evt_lock;
+	spinlock_t evt_lock; /*protect structure fields */
 	struct list_head freeq;
 	struct work_struct ws;
 } wm_event;
@@ -98,21 +98,14 @@
 	return e;
 }
 
-static void put_event_entry(struct evt_entry *e)
-{
-	BUG_ON(!e);
-
-	list_add_tail(&e->list, &wm_event.freeq);
-}
-
 static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
 				int len)
 {
 	struct nic *nic = netdev_priv(dev);
 
 	u8 *buf = msg;
-	u16 hci_cmd =  (buf[0]<<8) | buf[1];
-	u16 hci_len = (buf[2]<<8) | buf[3];
+	u16 hci_cmd =  (buf[0] << 8) | buf[1];
+	u16 hci_len = (buf[2] << 8) | buf[3];
 
 	netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
 
@@ -137,7 +130,7 @@
 
 		spin_lock_irqsave(&wm_event.evt_lock, flags);
 		list_del(&e->list);
-		put_event_entry(e);
+		list_add_tail(&e->list, &wm_event.freeq);
 	}
 
 	spin_unlock_irqrestore(&wm_event.evt_lock, flags);
@@ -193,8 +186,8 @@
 	struct evt_entry *e;
 	unsigned long flags;
 
-	u16 hci_cmd =  ((u8)buf[0]<<8) | (u8)buf[1];
-	u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3];
+	u16 hci_cmd =  ((u8)buf[0] << 8) | (u8)buf[1];
+	u16 hci_len = ((u8)buf[2] << 8) | (u8)buf[3];
 
 	netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
 
@@ -328,7 +321,8 @@
 	hci->length = cpu_to_be16(sizeof(up_down));
 	hci->data[0] = up_down;
 
-	gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE+sizeof(up_down));
+	gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE +
+			     sizeof(up_down));
 }
 
 static int gdm_wimax_open(struct net_device *dev)
@@ -512,7 +506,7 @@
 	hci->cmd_evt = cpu_to_be16(WIMAX_GET_INFO);
 	hci->data[len++] = TLV_T(T_MAC_ADDRESS);
 	hci->length = cpu_to_be16(len);
-	gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len);
+	gdm_wimax_send(nic, hci, HCI_HEADER_SIZE + len);
 
 	val = T_CAPABILITY_WIMAX | T_CAPABILITY_MULTI_CS;
 	#if defined(CONFIG_WIMAX_GDM72XX_QOS)
@@ -531,7 +525,7 @@
 	memcpy(&hci->data[len], &val_be32, TLV_L(T_CAPABILITY));
 	len += TLV_L(T_CAPABILITY);
 	hci->length = cpu_to_be16(len);
-	gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len);
+	gdm_wimax_send(nic, hci, HCI_HEADER_SIZE + len);
 
 	netdev_info(dev, "GDM WiMax Set CAPABILITY: 0x%08X\n", val);
 }
@@ -544,10 +538,10 @@
 	*T = buf[0];
 	if (buf[1] == 0x82) {
 		*L = be16_to_cpu(__U82U16(&buf[2]));
-		next_pos = 1/*type*/+3/*len*/;
+		next_pos = 1/*type*/ + 3/*len*/;
 	} else {
 		*L = buf[1];
-		next_pos = 1/*type*/+1/*len*/;
+		next_pos = 1/*type*/ + 1/*len*/;
 	}
 	*V = &buf[next_pos];
 
diff --git a/drivers/staging/gdm72xx/hci.h b/drivers/staging/gdm72xx/hci.h
index 10a6bfa..b40d5c3 100644
--- a/drivers/staging/gdm72xx/hci.h
+++ b/drivers/staging/gdm72xx/hci.h
@@ -17,7 +17,7 @@
 #define HCI_HEADER_SIZE		4
 #define HCI_VALUE_OFFS		(HCI_HEADER_SIZE)
 #define HCI_MAX_PACKET		2048
-#define HCI_MAX_PARAM		(HCI_MAX_PACKET-HCI_HEADER_SIZE)
+#define HCI_MAX_PARAM		(HCI_MAX_PACKET - HCI_HEADER_SIZE)
 #define HCI_MAX_TLV		32
 
 /* CMD-EVT */
@@ -159,7 +159,6 @@
 #define T_TX_POWER			(0x6a   | (1 << 16))
 #define T_CUR_FREQ			(0x7f	| (4 << 16))
 
-
 /* WIMAX */
 #define T_MAX_SUBSCRIPTION		(0xa1	| (1 << 16))
 #define T_MAX_SF			(0xa2	| (1 << 16))
@@ -199,10 +198,10 @@
 #define T_DUPLEX_MODE			(0xdb	| (4 << 16))
 
 /* T_CAPABILITY */
-#define T_CAPABILITY_MULTI_CS		(1 << 0)
-#define T_CAPABILITY_WIMAX		(1 << 1)
-#define T_CAPABILITY_QOS		(1 << 2)
-#define T_CAPABILITY_AGGREGATION	(1 << 3)
+#define T_CAPABILITY_MULTI_CS		BIT(0)
+#define T_CAPABILITY_WIMAX		BIT(1)
+#define T_CAPABILITY_QOS		BIT(2)
+#define T_CAPABILITY_AGGREGATION	BIT(3)
 
 struct hci_s {
 	__be16	cmd_evt;
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index f3cdaa6..783770b 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -21,7 +21,7 @@
 #include "netlink_k.h"
 
 #if !defined(NLMSG_HDRLEN)
-#define NLMSG_HDRLEN	 ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
+#define NLMSG_HDRLEN	 ((int)NLMSG_ALIGN(sizeof(struct nlmsghdr)))
 #endif
 
 #define ND_MAX_GROUP			30
@@ -29,8 +29,8 @@
 #define ND_NLMSG_SPACE(len)		(nlmsg_total_size(len) + ND_IFINDEX_LEN)
 #define ND_NLMSG_DATA(nlh) \
 	((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
-#define ND_NLMSG_S_LEN(len)		(len+ND_IFINDEX_LEN)
-#define ND_NLMSG_R_LEN(nlh)		(nlh->nlmsg_len-ND_IFINDEX_LEN)
+#define ND_NLMSG_S_LEN(len)		(len + ND_IFINDEX_LEN)
+#define ND_NLMSG_R_LEN(nlh)		(nlh->nlmsg_len - ND_IFINDEX_LEN)
 #define ND_NLMSG_IFIDX(nlh)		nlmsg_data(nlh)
 #define ND_MAX_MSG_LEN			8096
 
@@ -143,7 +143,7 @@
 	NETLINK_CB(skb).portid = 0;
 	NETLINK_CB(skb).dst_group = 0;
 
-	ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
+	ret = netlink_broadcast(sock, skb, 0, group + 1, GFP_ATOMIC);
 
 	if (!ret)
 		return len;
@@ -151,6 +151,5 @@
 		pr_err("netlink_broadcast g=%d, t=%d, l=%d, r=%d\n",
 		       group, type, len, ret);
 	}
-	ret = 0;
-	return ret;
+	return 0;
 }
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
index ba94b5f..5e9b38f 100644
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ b/drivers/staging/gdm72xx/sdio_boot.c
@@ -96,7 +96,7 @@
 		buf[1] = (len >> 8) & 0xff;
 		buf[2] = (len >> 16) & 0xff;
 
-		memcpy(buf+TYPE_A_HEADER_SIZE, firm->data + pos, len);
+		memcpy(buf + TYPE_A_HEADER_SIZE, firm->data + pos, len);
 		ret = sdio_memcpy_toio(func, 0, buf, len + TYPE_A_HEADER_SIZE);
 		if (ret < 0) {
 			dev_err(&func->dev,
diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c
index 39ca34031..99a5c07 100644
--- a/drivers/staging/gdm72xx/usb_boot.c
+++ b/drivers/staging/gdm72xx/usb_boot.c
@@ -292,8 +292,8 @@
 		return -ENOMEM;
 	}
 
-	strcpy(buf+pad_size, type_string);
-	ret = gdm_wibro_send(usbdev, buf, strlen(type_string)+pad_size);
+	strcpy(buf + pad_size, type_string);
+	ret = gdm_wibro_send(usbdev, buf, strlen(type_string) + pad_size);
 	if (ret < 0)
 		goto out;
 
@@ -310,8 +310,8 @@
 		else
 			len = img_len; /* the last chunk of data */
 
-		memcpy(buf+pad_size, firm->data + pos, len);
-		ret = gdm_wibro_send(usbdev, buf, len+pad_size);
+		memcpy(buf + pad_size, firm->data + pos, len);
+		ret = gdm_wibro_send(usbdev, buf, len + pad_size);
 
 		if (ret < 0)
 			goto out;
@@ -319,7 +319,7 @@
 		img_len -= DOWNLOAD_CHUCK;
 		pos += DOWNLOAD_CHUCK;
 
-		ret = em_wait_ack(usbdev, ((len+pad_size) % 512 == 0));
+		ret = em_wait_ack(usbdev, ((len + pad_size) % 512 == 0));
 		if (ret < 0)
 			goto out;
 	}
@@ -357,7 +357,5 @@
 		return ret;
 	dev_info(&usbdev->dev, "GCT Emergency: Filesystem download success.\n");
 
-	ret = em_fw_reset(usbdev);
-
-	return ret;
+	return em_fw_reset(usbdev);
 }
diff --git a/drivers/staging/gdm72xx/usb_ids.h b/drivers/staging/gdm72xx/usb_ids.h
index 7afb9ba..84a6d68 100644
--- a/drivers/staging/gdm72xx/usb_ids.h
+++ b/drivers/staging/gdm72xx/usb_ids.h
@@ -32,10 +32,10 @@
 #define BL_PID_MASK		0xffc0
 
 #define USB_DEVICE_BOOTLOADER(vid, pid)	\
-	{USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)}
+	{USB_DEVICE((vid), ((pid) & BL_PID_MASK) | B_DOWNLOAD)}
 
 #define USB_DEVICE_BOOTLOADER_DRV(vid, pid)	\
-	{USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD|B_DIFF_DL_DRV)}
+	{USB_DEVICE((vid), ((pid) & BL_PID_MASK) | B_DOWNLOAD | B_DIFF_DL_DRV)}
 
 #define USB_DEVICE_CDC_DATA(vid, pid)	\
 	{USB_DEVICE_INTF((vid), (pid), USB_CLASS_CDC_DATA)}
@@ -44,40 +44,40 @@
 	USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID1),
 	USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID1),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x1),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x2),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x3),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x4),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x5),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x6),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x7),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x8),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x9),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xa),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xb),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xc),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xd),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xe),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xf),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x1),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x2),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x3),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x4),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x5),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x6),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x7),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x8),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0x9),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0xa),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0xb),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0xc),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0xd),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0xe),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1 + 0xf),
 
 	USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID2),
 	USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID2),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x1),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x2),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x3),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x4),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x5),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x6),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x7),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x8),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x9),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xa),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xb),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xc),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xd),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xe),
-	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xf),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x1),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x2),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x3),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x4),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x5),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x6),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x7),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x8),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0x9),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0xa),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0xb),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0xc),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0xd),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0xe),
+	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2 + 0xf),
 
 	{USB_DEVICE(GCT_VID, EMERGENCY_PID)},
 	{ }
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index b0927e4..364fdcd 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -63,7 +63,7 @@
 #define AUDIO_READ(data, addr)		(readl(data->reg_base + addr))
 #define AUDIO_WRITE(data, addr, x)	(writel(x, data->reg_base + addr))
 #define AUDIO_WRITE64(data, addr, addr2, x)	\
-	(gf_write_dma_addr((x), data->reg_base + addr, data->reg_base+addr2))
+	(gf_write_dma_addr((x), data->reg_base + addr, data->reg_base + addr2))
 
 /*
  *  temporary variable used between goldfish_audio_probe() and
@@ -280,12 +280,12 @@
 	platform_set_drvdata(pdev, data);
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (r == NULL) {
+	if (!r) {
 		dev_err(&pdev->dev, "platform_get_resource failed\n");
 		return -ENODEV;
 	}
 	data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
-	if (data->reg_base == NULL)
+	if (!data->reg_base)
 		return -ENOMEM;
 
 	data->irq = platform_get_irq(pdev, 0);
@@ -295,7 +295,7 @@
 	}
 	data->buffer_virt = dmam_alloc_coherent(&pdev->dev,
 				COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL);
-	if (data->buffer_virt == NULL) {
+	if (!data->buffer_virt) {
 		dev_err(&pdev->dev, "allocate buffer failed\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index 623353db5..76d60ee 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -27,6 +27,7 @@
 #include <linux/mutex.h>
 #include <linux/goldfish.h>
 #include <asm/div64.h>
+#include <linux/dma-mapping.h>
 
 #include "goldfish_nand_reg.h"
 
@@ -99,11 +100,11 @@
 {
 	loff_t ofs = instr->addr;
 	u32 len = instr->len;
-	u32 rem;
+	s32 rem;
 
 	if (ofs + len > mtd->size)
 		goto invalid_arg;
-	rem = do_div(ofs, mtd->writesize);
+	ofs = div_s64_rem(ofs, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	ofs *= (mtd->writesize + mtd->oobsize);
@@ -132,7 +133,7 @@
 static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
 				  struct mtd_oob_ops *ops)
 {
-	u32 rem;
+	s32 rem;
 
 	if (ofs + ops->len > mtd->size)
 		goto invalid_arg;
@@ -141,7 +142,7 @@
 	if (ops->ooblen + ops->ooboffs > mtd->oobsize)
 		goto invalid_arg;
 
-	rem = do_div(ofs, mtd->writesize);
+	ofs = div_s64_rem(ofs, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	ofs *= (mtd->writesize + mtd->oobsize);
@@ -164,7 +165,7 @@
 static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
 				   struct mtd_oob_ops *ops)
 {
-	u32 rem;
+	s32 rem;
 
 	if (ofs + ops->len > mtd->size)
 		goto invalid_arg;
@@ -173,7 +174,7 @@
 	if (ops->ooblen + ops->ooboffs > mtd->oobsize)
 		goto invalid_arg;
 
-	rem = do_div(ofs, mtd->writesize);
+	ofs = div_s64_rem(ofs, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	ofs *= (mtd->writesize + mtd->oobsize);
@@ -196,12 +197,12 @@
 static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
 			      size_t *retlen, u_char *buf)
 {
-	u32 rem;
+	s32 rem;
 
 	if (from + len > mtd->size)
 		goto invalid_arg;
 
-	rem = do_div(from, mtd->writesize);
+	from = div_s64_rem(from, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	from *= (mtd->writesize + mtd->oobsize);
@@ -218,12 +219,12 @@
 static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
 			       size_t *retlen, const u_char *buf)
 {
-	u32 rem;
+	s32 rem;
 
 	if (to + len > mtd->size)
 		goto invalid_arg;
 
-	rem = do_div(to, mtd->writesize);
+	to = div_s64_rem(to, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	to *= (mtd->writesize + mtd->oobsize);
@@ -239,12 +240,12 @@
 
 static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
-	u32 rem;
+	s32 rem;
 
 	if (ofs >= mtd->size)
 		goto invalid_arg;
 
-	rem = do_div(ofs, mtd->erasesize);
+	ofs = div_s64_rem(ofs, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	ofs *= mtd->erasesize / mtd->writesize;
@@ -260,12 +261,12 @@
 
 static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
-	u32 rem;
+	s32 rem;
 
 	if (ofs >= mtd->size)
 		goto invalid_arg;
 
-	rem = do_div(ofs, mtd->erasesize);
+	ofs = div_s64_rem(ofs, mtd->writesize, &rem);
 	if (rem)
 		goto invalid_arg;
 	ofs *= mtd->erasesize / mtd->writesize;
@@ -284,17 +285,18 @@
 static int nand_setup_cmd_params(struct platform_device *pdev,
 				 struct goldfish_nand *nand)
 {
-	u64 paddr;
+	dma_addr_t dma_handle;
 	unsigned char __iomem  *base = nand->base;
 
-	nand->cmd_params = devm_kzalloc(&pdev->dev,
-					sizeof(struct cmd_params), GFP_KERNEL);
-	if (!nand->cmd_params)
-		return -1;
-
-	paddr = __pa(nand->cmd_params);
-	writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
-	writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
+	nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
+					       sizeof(struct cmd_params),
+					       &dma_handle, GFP_KERNEL);
+	if (!nand->cmd_params) {
+		dev_err(&pdev->dev, "allocate buffer failed\n");
+		return -ENOMEM;
+	}
+	writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
+	writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
 	return 0;
 }
 
@@ -319,7 +321,7 @@
 	mtd->oobavail = mtd->oobsize;
 	mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
 			(mtd->writesize + mtd->oobsize) * mtd->writesize;
-	do_div(mtd->size, mtd->writesize + mtd->oobsize);
+	mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
 	mtd->size *= mtd->writesize;
 	dev_dbg(&pdev->dev,
 		"goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index a3a10f9..fe3a6bc 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -34,7 +34,7 @@
 #define DEVICE_NAME "device"
 #define CLASS_NAME  "fpgaboot"
 
-static uint8_t bits_magic[] = {
+static u8 bits_magic[] = {
 	0x0, 0x9, 0xf, 0xf0, 0xf, 0xf0,
 	0xf, 0xf0, 0xf, 0xf0, 0x0, 0x0, 0x1};
 
@@ -54,7 +54,7 @@
 static void readinfo_bitstream(char *bitdata, char *buf, int *offset)
 {
 	char tbuf[64];
-	int32_t len;
+	s32 len;
 
 	/* read section char */
 	read_bitstream(bitdata, tbuf, offset, 1);
@@ -291,7 +291,7 @@
 	int err;
 	struct fpgaimage	*fimage;
 
-	fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
+	fimage = kmalloc(sizeof(*fimage), GFP_KERNEL);
 	if (!fimage)
 		return -ENOMEM;
 
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
index 17e5c9c..7c7cd84 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
@@ -1,31 +1,3 @@
-
-What:		/sys/bus/iio/devices/device[n]/range
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Hardware dependent ADC Full Scale Range used for some ambient
-		light sensors in calculating lux.
-
-What:		/sys/bus/iio/devices/device[n]/range_available
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Hardware dependent supported vales for ADC Full Scale Range.
-
-What:		/sys/bus/iio/devices/device[n]/adc_resolution
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Hardware dependent ADC resolution of the ambient light sensor
-		used in calculating the lux.
-
-What:		/sys/bus/iio/devices/device[n]/adc_resolution_available
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Hardware dependent list of possible values supported for the
-		adc_resolution of the given sensor.
-
 What:		/sys/bus/iio/devices/device[n]/in_illuminance0[_input|_raw]
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 7939ae6..7a6fed3 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -567,7 +567,7 @@
 {
 	u8 val;
 	int ret;
-	u8 mask = (1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING)));
+	u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
 
 	ret = lis3l02dq_spi_read_reg_8(indio_dev,
 				       LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
@@ -622,7 +622,7 @@
 	u8 val, control;
 	u8 currentlyset;
 	bool changed = false;
-	u8 mask = (1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING)));
+	u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
 
 	mutex_lock(&indio_dev->mlock);
 	/* read current control */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 02e930c..a8f533a 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -216,8 +216,7 @@
 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
 	if (ret)
 		goto error_ret;
-	else
-		return st->rx[0];
+	return st->rx[0];
 error_ret:
 	return ret;
 }
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 1920dc60..d1cb9b9 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -99,8 +99,7 @@
 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
 	if (ret)
 		goto error_ret;
-	else
-		num_available = st->rx[0];
+	num_available = st->rx[0];
 	/*
 	 * num_available is the total number of samples available
 	 * i.e. number of time points * number of channels.
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index b9519be..deff899 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -59,12 +59,12 @@
 	  temperature sensors and ADC.
 
 config AD7192
-	tristate "Analog Devices AD7190 AD7192 AD7195 ADC driver"
+	tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver"
 	depends on SPI
 	select AD_SIGMA_DELTA
 	help
 	  Say yes here to build support for Analog Devices AD7190,
-	  AD7192 or AD7195 SPI analog to digital converters (ADC).
+	  AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC).
 	  If unsure, say N (but it's safe to say "Y").
 
 	  To compile this driver as a module, choose M here: the
@@ -92,20 +92,6 @@
 	  activate only one via device tree selection.  Provides direct access
 	  via sysfs.
 
-config MXS_LRADC
-	tristate "Freescale i.MX23/i.MX28 LRADC"
-	depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM
-	depends on INPUT
-	select STMP_DEVICE
-	select IIO_BUFFER
-	select IIO_TRIGGERED_BUFFER
-	help
-	  Say yes here to build support for i.MX23/i.MX28 LRADC convertor
-	  built into these chips.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called mxs-lradc.
-
 config SPEAR_ADC
 	tristate "ST SPEAr ADC"
 	depends on PLAT_SPEAR || COMPILE_TEST
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 0c87ce3..3cdd83c 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -12,5 +12,4 @@
 obj-$(CONFIG_AD7192) += ad7192.o
 obj-$(CONFIG_AD7280) += ad7280a.o
 obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
-obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
 obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 9221103..f843f19 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -1,7 +1,7 @@
 /*
- * AD7190 AD7192 AD7195 SPI ADC driver
+ * AD7190 AD7192 AD7193 AD7195 SPI ADC driver
  *
- * Copyright 2011-2012 Analog Devices Inc.
+ * Copyright 2011-2015 Analog Devices Inc.
  *
  * Licensed under the GPL-2.
  */
@@ -92,26 +92,43 @@
 
 #define AD7192_CONF_CHOP	BIT(23) /* CHOP enable */
 #define AD7192_CONF_REFSEL	BIT(20) /* REFIN1/REFIN2 Reference Select */
-#define AD7192_CONF_CHAN(x)	(((1 << (x)) & 0xFF) << 8) /* Channel select */
-#define AD7192_CONF_CHAN_MASK	(0xFF << 8) /* Channel select mask */
+#define AD7192_CONF_CHAN(x)	((x) << 8) /* Channel select */
+#define AD7192_CONF_CHAN_MASK	(0x7FF << 8) /* Channel select mask */
 #define AD7192_CONF_BURN	BIT(7) /* Burnout current enable */
 #define AD7192_CONF_REFDET	BIT(6) /* Reference detect enable */
 #define AD7192_CONF_BUF		BIT(4) /* Buffered Mode Enable */
 #define AD7192_CONF_UNIPOLAR	BIT(3) /* Unipolar/Bipolar Enable */
 #define AD7192_CONF_GAIN(x)	((x) & 0x7) /* Gain Select */
 
-#define AD7192_CH_AIN1P_AIN2M	0 /* AIN1(+) - AIN2(-) */
-#define AD7192_CH_AIN3P_AIN4M	1 /* AIN3(+) - AIN4(-) */
-#define AD7192_CH_TEMP		2 /* Temp Sensor */
-#define AD7192_CH_AIN2P_AIN2M	3 /* AIN2(+) - AIN2(-) */
-#define AD7192_CH_AIN1		4 /* AIN1 - AINCOM */
-#define AD7192_CH_AIN2		5 /* AIN2 - AINCOM */
-#define AD7192_CH_AIN3		6 /* AIN3 - AINCOM */
-#define AD7192_CH_AIN4		7 /* AIN4 - AINCOM */
+#define AD7192_CH_AIN1P_AIN2M	BIT(0) /* AIN1(+) - AIN2(-) */
+#define AD7192_CH_AIN3P_AIN4M	BIT(1) /* AIN3(+) - AIN4(-) */
+#define AD7192_CH_TEMP		BIT(2) /* Temp Sensor */
+#define AD7192_CH_AIN2P_AIN2M	BIT(3) /* AIN2(+) - AIN2(-) */
+#define AD7192_CH_AIN1		BIT(4) /* AIN1 - AINCOM */
+#define AD7192_CH_AIN2		BIT(5) /* AIN2 - AINCOM */
+#define AD7192_CH_AIN3		BIT(6) /* AIN3 - AINCOM */
+#define AD7192_CH_AIN4		BIT(7) /* AIN4 - AINCOM */
+
+#define AD7193_CH_AIN1P_AIN2M	0x000  /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M	0x001  /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M	0x002  /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M	0x004  /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_TEMP		0x100 /* Temp senseor */
+#define AD7193_CH_AIN2P_AIN2M	0x200 /* AIN2(+) - AIN2(-) */
+#define AD7193_CH_AIN1		0x401 /* AIN1 - AINCOM */
+#define AD7193_CH_AIN2		0x402 /* AIN2 - AINCOM */
+#define AD7193_CH_AIN3		0x404 /* AIN3 - AINCOM */
+#define AD7193_CH_AIN4		0x408 /* AIN4 - AINCOM */
+#define AD7193_CH_AIN5		0x410 /* AIN5 - AINCOM */
+#define AD7193_CH_AIN6		0x420 /* AIN6 - AINCOM */
+#define AD7193_CH_AIN7		0x440 /* AIN7 - AINCOM */
+#define AD7193_CH_AIN8		0x480 /* AIN7 - AINCOM */
+#define AD7193_CH_AINCOM	0x600 /* AINCOM - AINCOM */
 
 /* ID Register Bit Designations (AD7192_REG_ID) */
 #define ID_AD7190		0x4
 #define ID_AD7192		0x0
+#define ID_AD7193		0x2
 #define ID_AD7195		0x6
 #define AD7192_ID_MASK		0x0F
 
@@ -236,7 +253,7 @@
 			st->mclk = pdata->ext_clk_hz;
 		else
 			st->mclk = AD7192_INT_FREQ_MHZ;
-			break;
+		break;
 	default:
 		ret = -EINVAL;
 		goto out;
@@ -607,6 +624,24 @@
 	IIO_CHAN_SOFT_TIMESTAMP(8),
 };
 
+static const struct iio_chan_spec ad7193_channels[] = {
+	AD_SD_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M, 24, 32, 0),
+	AD_SD_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M, 24, 32, 0),
+	AD_SD_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M, 24, 32, 0),
+	AD_SD_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M, 24, 32, 0),
+	AD_SD_TEMP_CHANNEL(4, AD7193_CH_TEMP, 24, 32, 0),
+	AD_SD_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M, 24, 32, 0),
+	AD_SD_CHANNEL(6, 1, AD7193_CH_AIN1, 24, 32, 0),
+	AD_SD_CHANNEL(7, 2, AD7193_CH_AIN2, 24, 32, 0),
+	AD_SD_CHANNEL(8, 3, AD7193_CH_AIN3, 24, 32, 0),
+	AD_SD_CHANNEL(9, 4, AD7193_CH_AIN4, 24, 32, 0),
+	AD_SD_CHANNEL(10, 5, AD7193_CH_AIN5, 24, 32, 0),
+	AD_SD_CHANNEL(11, 6, AD7193_CH_AIN6, 24, 32, 0),
+	AD_SD_CHANNEL(12, 7, AD7193_CH_AIN7, 24, 32, 0),
+	AD_SD_CHANNEL(13, 8, AD7193_CH_AIN8, 24, 32, 0),
+	IIO_CHAN_SOFT_TIMESTAMP(14),
+};
+
 static int ad7192_probe(struct spi_device *spi)
 {
 	const struct ad7192_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -651,8 +686,18 @@
 	indio_dev->dev.parent = &spi->dev;
 	indio_dev->name = spi_get_device_id(spi)->name;
 	indio_dev->modes = INDIO_DIRECT_MODE;
-	indio_dev->channels = ad7192_channels;
-	indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
+
+	switch (st->devid) {
+	case ID_AD7193:
+		indio_dev->channels = ad7193_channels;
+		indio_dev->num_channels = ARRAY_SIZE(ad7193_channels);
+		break;
+	default:
+		indio_dev->channels = ad7192_channels;
+		indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
+		break;
+	}
+
 	if (st->devid == ID_AD7195)
 		indio_dev->info = &ad7195_info;
 	else
@@ -699,6 +744,7 @@
 static const struct spi_device_id ad7192_id[] = {
 	{"ad7190", ID_AD7190},
 	{"ad7192", ID_AD7192},
+	{"ad7193", ID_AD7193},
 	{"ad7195", ID_AD7195},
 	{}
 };
@@ -715,5 +761,5 @@
 module_spi_driver(ad7192_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
+MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7193, AD7195 ADC");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index f45ebed..62e5eca 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -214,8 +214,8 @@
 static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
 			unsigned addr, bool all, unsigned val)
 {
-	unsigned reg = (devaddr << 27 | addr << 21 |
-			(val & 0xFF) << 13 | all << 12);
+	unsigned reg = devaddr << 27 | addr << 21 |
+			(val & 0xFF) << 13 | all << 12;
 
 	reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
 	st->buf[0] = cpu_to_be32(reg);
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index ec89d05..cca9469 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -85,8 +85,6 @@
 	int (*read_block)(struct device *, int, void *);
 };
 
-void ad7606_suspend(struct iio_dev *indio_dev);
-void ad7606_resume(struct iio_dev *indio_dev);
 struct iio_dev *ad7606_probe(struct device *dev, int irq,
 			      void __iomem *base_address, unsigned id,
 			      const struct ad7606_bus_ops *bops);
@@ -101,4 +99,12 @@
 
 int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
 void ad7606_ring_cleanup(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_PM_SLEEP
+extern const struct dev_pm_ops ad7606_pm_ops;
+#define AD7606_PM_OPS (&ad7606_pm_ops)
+#else
+#define AD7606_PM_OPS NULL
+#endif
+
 #endif /* IIO_ADC_AD7606_H_ */
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 2c9d8b7..fe6caee 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -250,7 +250,8 @@
 		},						\
 	}
 
-static const struct iio_chan_spec ad7606_8_channels[] = {
+static const struct iio_chan_spec ad7606_channels[] = {
+	IIO_CHAN_SOFT_TIMESTAMP(8),
 	AD7606_CHANNEL(0),
 	AD7606_CHANNEL(1),
 	AD7606_CHANNEL(2),
@@ -259,25 +260,6 @@
 	AD7606_CHANNEL(5),
 	AD7606_CHANNEL(6),
 	AD7606_CHANNEL(7),
-	IIO_CHAN_SOFT_TIMESTAMP(8),
-};
-
-static const struct iio_chan_spec ad7606_6_channels[] = {
-	AD7606_CHANNEL(0),
-	AD7606_CHANNEL(1),
-	AD7606_CHANNEL(2),
-	AD7606_CHANNEL(3),
-	AD7606_CHANNEL(4),
-	AD7606_CHANNEL(5),
-	IIO_CHAN_SOFT_TIMESTAMP(6),
-};
-
-static const struct iio_chan_spec ad7606_4_channels[] = {
-	AD7606_CHANNEL(0),
-	AD7606_CHANNEL(1),
-	AD7606_CHANNEL(2),
-	AD7606_CHANNEL(3),
-	IIO_CHAN_SOFT_TIMESTAMP(4),
 };
 
 static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
@@ -287,20 +269,20 @@
 	[ID_AD7606_8] = {
 		.name = "ad7606",
 		.int_vref_mv = 2500,
-		.channels = ad7606_8_channels,
-		.num_channels = 8,
+		.channels = ad7606_channels,
+		.num_channels = 9,
 	},
 	[ID_AD7606_6] = {
 		.name = "ad7606-6",
 		.int_vref_mv = 2500,
-		.channels = ad7606_6_channels,
-		.num_channels = 6,
+		.channels = ad7606_channels,
+		.num_channels = 7,
 	},
 	[ID_AD7606_4] = {
 		.name = "ad7606-4",
 		.int_vref_mv = 2500,
-		.channels = ad7606_4_channels,
-		.num_channels = 4,
+		.channels = ad7606_channels,
+		.num_channels = 5,
 	},
 };
 
@@ -578,8 +560,11 @@
 }
 EXPORT_SYMBOL_GPL(ad7606_remove);
 
-void ad7606_suspend(struct iio_dev *indio_dev)
+#ifdef CONFIG_PM_SLEEP
+
+static int ad7606_suspend(struct device *dev)
 {
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct ad7606_state *st = iio_priv(indio_dev);
 
 	if (gpio_is_valid(st->pdata->gpio_stby)) {
@@ -587,11 +572,13 @@
 			gpio_set_value(st->pdata->gpio_range, 1);
 		gpio_set_value(st->pdata->gpio_stby, 0);
 	}
-}
-EXPORT_SYMBOL_GPL(ad7606_suspend);
 
-void ad7606_resume(struct iio_dev *indio_dev)
+	return 0;
+}
+
+static int ad7606_resume(struct device *dev)
 {
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct ad7606_state *st = iio_priv(indio_dev);
 
 	if (gpio_is_valid(st->pdata->gpio_stby)) {
@@ -602,8 +589,14 @@
 		gpio_set_value(st->pdata->gpio_stby, 1);
 		ad7606_reset(st);
 	}
+
+	return 0;
 }
-EXPORT_SYMBOL_GPL(ad7606_resume);
+
+SIMPLE_DEV_PM_OPS(ad7606_pm_ops, ad7606_suspend, ad7606_resume);
+EXPORT_SYMBOL_GPL(ad7606_pm_ops);
+
+#endif
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index adc370e..84d2393 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -90,36 +90,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int ad7606_par_suspend(struct device *dev)
-{
-	struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
-	ad7606_suspend(indio_dev);
-
-	return 0;
-}
-
-static int ad7606_par_resume(struct device *dev)
-{
-	struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
-	ad7606_resume(indio_dev);
-
-	return 0;
-}
-
-static const struct dev_pm_ops ad7606_pm_ops = {
-	.suspend = ad7606_par_suspend,
-	.resume  = ad7606_par_resume,
-};
-
-#define AD7606_PAR_PM_OPS (&ad7606_pm_ops)
-
-#else
-#define AD7606_PAR_PM_OPS NULL
-#endif  /* CONFIG_PM */
-
 static const struct platform_device_id ad7606_driver_ids[] = {
 	{
 		.name		= "ad7606-8",
@@ -142,7 +112,7 @@
 	.id_table = ad7606_driver_ids,
 	.driver = {
 		.name	 = "ad7606",
-		.pm    = AD7606_PAR_PM_OPS,
+		.pm	 = AD7606_PM_OPS,
 	},
 };
 
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index cbb36317..d873a51 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -62,36 +62,6 @@
 	return ad7606_remove(indio_dev, spi->irq);
 }
 
-#ifdef CONFIG_PM
-static int ad7606_spi_suspend(struct device *dev)
-{
-	struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
-	ad7606_suspend(indio_dev);
-
-	return 0;
-}
-
-static int ad7606_spi_resume(struct device *dev)
-{
-	struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
-	ad7606_resume(indio_dev);
-
-	return 0;
-}
-
-static const struct dev_pm_ops ad7606_pm_ops = {
-	.suspend = ad7606_spi_suspend,
-	.resume  = ad7606_spi_resume,
-};
-
-#define AD7606_SPI_PM_OPS (&ad7606_pm_ops)
-
-#else
-#define AD7606_SPI_PM_OPS NULL
-#endif
-
 static const struct spi_device_id ad7606_id[] = {
 	{"ad7606-8", ID_AD7606_8},
 	{"ad7606-6", ID_AD7606_6},
@@ -103,7 +73,7 @@
 static struct spi_driver ad7606_driver = {
 	.driver = {
 		.name = "ad7606",
-		.pm    = AD7606_SPI_PM_OPS,
+		.pm = AD7606_PM_OPS,
 	},
 	.probe = ad7606_spi_probe,
 	.remove = ad7606_spi_remove,
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 2226051..ac3735c 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -296,14 +296,14 @@
 		dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
 		return -EINVAL;
 	} else if (chip->channel_id == 0) {
-		if (ret || value < AD7816_BOUND_VALUE_MIN ||
+		if (value < AD7816_BOUND_VALUE_MIN ||
 		    value > AD7816_BOUND_VALUE_MAX)
 			return -EINVAL;
 
 		data = (u8)(value - AD7816_BOUND_VALUE_MIN +
 			AD7816_BOUND_VALUE_BASE);
 	} else {
-		if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255)
+		if (value < AD7816_BOUND_VALUE_BASE || value > 255)
 			return -EINVAL;
 
 		data = (u8)value;
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index 712cae0..f2c0065 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -288,7 +288,7 @@
 	st->adc_base_spear3xx =
 		(struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx;
 
-	st->clk = clk_get(dev, NULL);
+	st->clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(st->clk)) {
 		dev_err(dev, "failed getting clock\n");
 		goto errout1;
@@ -297,28 +297,28 @@
 	ret = clk_prepare_enable(st->clk);
 	if (ret) {
 		dev_err(dev, "failed enabling clock\n");
-		goto errout2;
+		goto errout1;
 	}
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq <= 0) {
 		dev_err(dev, "failed getting interrupt resource\n");
 		ret = -EINVAL;
-		goto errout3;
+		goto errout2;
 	}
 
 	ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME,
 			       st);
 	if (ret < 0) {
 		dev_err(dev, "failed requesting interrupt\n");
-		goto errout3;
+		goto errout2;
 	}
 
 	if (of_property_read_u32(np, "sampling-frequency",
 				 &st->sampling_freq)) {
 		dev_err(dev, "sampling-frequency missing in DT\n");
 		ret = -EINVAL;
-		goto errout3;
+		goto errout2;
 	}
 
 	/*
@@ -348,16 +348,14 @@
 
 	ret = iio_device_register(indio_dev);
 	if (ret)
-		goto errout3;
+		goto errout2;
 
 	dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
 
 	return 0;
 
-errout3:
-	clk_disable_unprepare(st->clk);
 errout2:
-	clk_put(st->clk);
+	clk_disable_unprepare(st->clk);
 errout1:
 	iounmap(st->adc_base_spear6xx);
 	return ret;
@@ -370,7 +368,6 @@
 
 	iio_device_unregister(indio_dev);
 	clk_disable_unprepare(st->clk);
-	clk_put(st->clk);
 	iounmap(st->adc_base_spear6xx);
 
 	return 0;
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index e8d0ff2..f6b9a10 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -21,8 +21,8 @@
  */
 
 #define AD7150_STATUS              0
-#define AD7150_STATUS_OUT1         (1 << 3)
-#define AD7150_STATUS_OUT2         (1 << 5)
+#define AD7150_STATUS_OUT1         BIT(3)
+#define AD7150_STATUS_OUT2         BIT(5)
 #define AD7150_CH1_DATA_HIGH       1
 #define AD7150_CH2_DATA_HIGH       3
 #define AD7150_CH1_AVG_HIGH        5
@@ -36,7 +36,7 @@
 #define AD7150_CH2_TIMEOUT         13
 #define AD7150_CH2_SETUP           14
 #define AD7150_CFG                 15
-#define AD7150_CFG_FIX             (1 << 7)
+#define AD7150_CFG_FIX             BIT(7)
 #define AD7150_PD_TIMER            16
 #define AD7150_CH1_CAPDAC          17
 #define AD7150_CH2_CAPDAC          18
@@ -160,8 +160,9 @@
 
 /* lock should be held */
 static int ad7150_write_event_params(struct iio_dev *indio_dev,
-	 unsigned int chan, enum iio_event_type type,
-	 enum iio_event_direction dir)
+				     unsigned int chan,
+				     enum iio_event_type type,
+				     enum iio_event_direction dir)
 {
 	int ret;
 	u16 value;
@@ -209,8 +210,9 @@
 }
 
 static int ad7150_write_event_config(struct iio_dev *indio_dev,
-	const struct iio_chan_spec *chan, enum iio_event_type type,
-	enum iio_event_direction dir, int state)
+				     const struct iio_chan_spec *chan,
+				     enum iio_event_type type,
+				     enum iio_event_direction dir, int state)
 {
 	u8 thresh_type, cfg, adaptive;
 	int ret;
@@ -302,11 +304,11 @@
 }
 
 static int ad7150_write_event_value(struct iio_dev *indio_dev,
-				   const struct iio_chan_spec *chan,
-				   enum iio_event_type type,
-				   enum iio_event_direction dir,
-				   enum iio_event_info info,
-				   int val, int val2)
+				    const struct iio_chan_spec *chan,
+				    enum iio_event_type type,
+				    enum iio_event_direction dir,
+				    enum iio_event_info info,
+				    int val, int val2)
 {
 	int ret;
 	struct ad7150_chip_info *chip = iio_priv(indio_dev);
@@ -365,9 +367,9 @@
 }
 
 static ssize_t ad7150_store_timeout(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct ad7150_chip_info *chip = iio_priv(indio_dev);
@@ -580,7 +582,7 @@
  */
 
 static int ad7150_probe(struct i2c_client *client,
-		const struct i2c_device_id *id)
+			const struct i2c_device_id *id)
 {
 	int ret;
 	struct ad7150_chip_info *chip;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 2c5d277..5771d4ee 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -529,8 +529,8 @@
 
 		val /= 338646;
 
-		chip->capdac[chan->channel][chan->differential] = (val > 0 ?
-			AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0);
+		chip->capdac[chan->channel][chan->differential] = val > 0 ?
+			AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0;
 
 		ret = i2c_smbus_write_byte_data(chip->client,
 			AD7746_REG_CAPDACA,
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 10c43dda..d1218d8 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -558,7 +558,7 @@
 }
 
 static const struct iio_info ad5933_info = {
-	.read_raw = &ad5933_read_raw,
+	.read_raw = ad5933_read_raw,
 	.attrs = &ad5933_attribute_group,
 	.driver_module = THIS_MODULE,
 };
@@ -616,9 +616,9 @@
 }
 
 static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
-	.preenable = &ad5933_ring_preenable,
-	.postenable = &ad5933_ring_postenable,
-	.postdisable = &ad5933_ring_postdisable,
+	.preenable = ad5933_ring_preenable,
+	.postenable = ad5933_ring_postenable,
+	.postdisable = ad5933_ring_postdisable,
 };
 
 static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index bbf7e35cb..03dbfb6 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -183,7 +183,7 @@
 
 	/* Set mode */
 	status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1,
-			mode << COMMMAND1_OPMODE_SHIFT);
+			      mode << COMMMAND1_OPMODE_SHIFT);
 	if (status) {
 		dev_err(chip->dev,
 			"Error in setting operating mode err %d\n", status);
@@ -241,7 +241,7 @@
 }
 
 static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
-		int *near_ir)
+				      int *near_ir)
 {
 	int status;
 	int prox_data = -1;
@@ -249,15 +249,15 @@
 
 	/* Do proximity sensing with required scheme */
 	status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
-			COMMANDII_SCHEME_MASK,
-			scheme << COMMANDII_SCHEME_SHIFT);
+				    COMMANDII_SCHEME_MASK,
+				    scheme << COMMANDII_SCHEME_SHIFT);
 	if (status) {
 		dev_err(chip->dev, "Error in setting operating mode\n");
 		return status;
 	}
 
 	prox_data = isl29018_read_sensor_input(chip,
-					COMMMAND1_OPMODE_PROX_ONCE);
+					       COMMMAND1_OPMODE_PROX_ONCE);
 	if (prox_data < 0)
 		return prox_data;
 
@@ -280,7 +280,7 @@
 }
 
 static ssize_t show_scale_available(struct device *dev,
-			struct device_attribute *attr, char *buf)
+				    struct device_attribute *attr, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -297,7 +297,7 @@
 }
 
 static ssize_t show_int_time_available(struct device *dev,
-			struct device_attribute *attr, char *buf)
+				       struct device_attribute *attr, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -314,18 +314,22 @@
 
 /* proximity scheme */
 static ssize_t show_prox_infrared_suppression(struct device *dev,
-			struct device_attribute *attr, char *buf)
+					      struct device_attribute *attr,
+					      char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct isl29018_chip *chip = iio_priv(indio_dev);
 
-	/* return the "proximity scheme" i.e. if the chip does on chip
-	infrared suppression (1 means perform on chip suppression) */
+	/*
+	 * return the "proximity scheme" i.e. if the chip does on chip
+	 * infrared suppression (1 means perform on chip suppression)
+	 */
 	return sprintf(buf, "%d\n", chip->prox_scheme);
 }
 
 static ssize_t store_prox_infrared_suppression(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -338,8 +342,10 @@
 		return -EINVAL;
 	}
 
-	/* get the  "proximity scheme" i.e. if the chip does on chip
-	infrared suppression (1 means perform on chip suppression) */
+	/*
+	 * get the  "proximity scheme" i.e. if the chip does on chip
+	 * infrared suppression (1 means perform on chip suppression)
+	 */
 	mutex_lock(&chip->lock);
 	chip->prox_scheme = val;
 	mutex_unlock(&chip->lock);
@@ -413,7 +419,8 @@
 			break;
 		case IIO_PROXIMITY:
 			ret = isl29018_read_proximity_ir(chip,
-					chip->prox_scheme, val);
+							 chip->prox_scheme,
+							 val);
 			break;
 		default:
 			break;
@@ -614,15 +621,15 @@
 static const struct iio_info isl29018_info = {
 	.attrs = &isl29018_group,
 	.driver_module = THIS_MODULE,
-	.read_raw = &isl29018_read_raw,
-	.write_raw = &isl29018_write_raw,
+	.read_raw = isl29018_read_raw,
+	.write_raw = isl29018_write_raw,
 };
 
 static const struct iio_info isl29023_info = {
 	.attrs = &isl29023_group,
 	.driver_module = THIS_MODULE,
-	.read_raw = &isl29018_read_raw,
-	.write_raw = &isl29018_write_raw,
+	.read_raw = isl29018_read_raw,
+	.write_raw = isl29018_write_raw,
 };
 
 static bool is_volatile_reg(struct device *dev, unsigned int reg)
@@ -699,13 +706,13 @@
 	if (!id)
 		return NULL;
 
-	*data = (int) id->driver_data;
+	*data = (int)id->driver_data;
 
 	return dev_name(dev);
 }
 
 static int isl29018_probe(struct i2c_client *client,
-			 const struct i2c_device_id *id)
+			  const struct i2c_device_id *id)
 {
 	struct isl29018_chip *chip;
 	struct iio_dev *indio_dev;
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 32ae112..6e2ba45 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -81,7 +81,7 @@
 };
 
 static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
-			unsigned int sampling)
+					unsigned int sampling)
 {
 	static unsigned int prox_period[] = {800, 400, 200, 100, 75, 50, 12, 0};
 	int sel;
@@ -103,7 +103,7 @@
 	if (enable)
 		val = CONFIGURE_PROX_EN;
 	ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
-			CONFIGURE_PROX_EN_MASK, val);
+				 CONFIGURE_PROX_EN_MASK, val);
 	if (ret < 0)
 		return ret;
 
@@ -122,24 +122,27 @@
 }
 
 static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
-	enum als_ir_mode mode)
+				    enum als_ir_mode mode)
 {
 	int ret = 0;
 
 	switch (mode) {
 	case MODE_ALS:
 		ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
-			CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_ALS);
+					 CONFIGURE_ALS_IR_MODE_MASK,
+					 CONFIGURE_ALS_IR_MODE_ALS);
 		if (ret < 0)
 			return ret;
 
 		ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
-			CONFIGURE_ALS_RANGE_MASK, CONFIGURE_ALS_RANGE_HIGH_LUX);
+					 CONFIGURE_ALS_RANGE_MASK,
+					 CONFIGURE_ALS_RANGE_HIGH_LUX);
 		break;
 
 	case MODE_IR:
 		ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
-			CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_IR);
+					 CONFIGURE_ALS_IR_MODE_MASK,
+					 CONFIGURE_ALS_IR_MODE_IR);
 		break;
 
 	case MODE_NONE:
@@ -152,7 +155,7 @@
 
 	/* Enable the ALS/IR */
 	ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
-			CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
+				 CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
 	if (ret < 0)
 		return ret;
 
@@ -193,7 +196,7 @@
 	ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
 	if (ret < 0) {
 		dev_err(chip->dev, "Error in reading register %d, error %d\n",
-				ISL29028_REG_PROX_DATA, ret);
+			ISL29028_REG_PROX_DATA, ret);
 		return ret;
 	}
 	*prox = data;
@@ -264,7 +267,8 @@
 
 /* Channel IO */
 static int isl29028_write_raw(struct iio_dev *indio_dev,
-	     struct iio_chan_spec const *chan, int val, int val2, long mask)
+			      struct iio_chan_spec const *chan,
+			      int val, int val2, long mask)
 {
 	struct isl29028_chip *chip = iio_priv(indio_dev);
 	int ret = -EINVAL;
@@ -323,7 +327,8 @@
 }
 
 static int isl29028_read_raw(struct iio_dev *indio_dev,
-	     struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+			     struct iio_chan_spec const *chan,
+			     int *val, int *val2, long mask)
 {
 	struct isl29028_chip *chip = iio_priv(indio_dev);
 	int ret = -EINVAL;
@@ -406,8 +411,8 @@
 static const struct iio_info isl29028_info = {
 	.attrs = &isl29108_group,
 	.driver_module = THIS_MODULE,
-	.read_raw = &isl29028_read_raw,
-	.write_raw = &isl29028_write_raw,
+	.read_raw = isl29028_read_raw,
+	.write_raw = isl29028_write_raw,
 };
 
 static int isl29028_chip_init(struct isl29028_chip *chip)
@@ -476,7 +481,7 @@
 };
 
 static int isl29028_probe(struct i2c_client *client,
-	const struct i2c_device_id *id)
+			  const struct i2c_device_id *id)
 {
 	struct isl29028_chip *chip;
 	struct iio_dev *indio_dev;
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 3100d96..05b4ad4 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -240,8 +240,10 @@
 		}
 	}
 
-	/* clear status, really interrupt status (interrupts are off), but
-	 * we use the bit anyway - don't forget 0x80 - this is a command*/
+	/*
+	 * clear status, really interrupt status (interrupts are off), but
+	 * we use the bit anyway - don't forget 0x80 - this is a command
+	 */
 	ret = i2c_smbus_write_byte(chip->client,
 				   (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
 				    TSL258X_CMD_ALS_INT_CLR));
@@ -265,13 +267,14 @@
 
 	if (!ch0) {
 		/* have no data, so return LAST VALUE */
-		ret = chip->als_cur_info.lux = 0;
+		ret = 0;
+		chip->als_cur_info.lux = 0;
 		goto out_unlock;
 	}
 	/* calculate ratio */
 	ratio = (ch1 << 15) / ch0;
 	/* convert to unscaled lux using the pointer to the table */
-	for (p = (struct taos_lux *) taos_device_lux;
+	for (p = (struct taos_lux *)taos_device_lux;
 	     p->ratio != 0 && p->ratio < ratio; p++)
 		;
 
@@ -290,7 +293,8 @@
 	/* note: lux is 31 bit max at this point */
 	if (ch1lux > ch0lux) {
 		dev_dbg(&chip->client->dev, "No Data - Return last value\n");
-		ret = chip->als_cur_info.lux = 0;
+		ret = 0;
+		chip->als_cur_info.lux = 0;
 		goto out_unlock;
 	}
 
@@ -378,7 +382,7 @@
 		dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
 		return lux_val;
 	}
-	gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target)
+	gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target)
 			* chip->taos_settings.als_gain_trim) / lux_val);
 
 	if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
@@ -387,9 +391,9 @@
 			gain_trim_val);
 		return -ENODATA;
 	}
-	chip->taos_settings.als_gain_trim = (int) gain_trim_val;
+	chip->taos_settings.als_gain_trim = (int)gain_trim_val;
 
-	return (int) gain_trim_val;
+	return (int)gain_trim_val;
 }
 
 /*
@@ -429,8 +433,10 @@
 	chip->als_saturation = als_count * 922; /* 90% of full scale */
 	chip->als_time_scale = (als_time + 25) / 50;
 
-	/* TSL258x Specific power-on / adc enable sequence
-	 * Power on the device 1st. */
+	/*
+	 * TSL258x Specific power-on / adc enable sequence
+	 * Power on the device 1st.
+	 */
 	utmp = TSL258X_CNTL_PWR_ON;
 	ret = i2c_smbus_write_byte_data(chip->client,
 					TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
@@ -439,8 +445,10 @@
 		return ret;
 	}
 
-	/* Use the following shadow copy for our delay before enabling ADC.
-	 * Write all the registers. */
+	/*
+	 * Use the following shadow copy for our delay before enabling ADC.
+	 * Write all the registers.
+	 */
 	for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
 		ret = i2c_smbus_write_byte_data(chip->client,
 						TSL258X_CMD_REG + i,
@@ -453,8 +461,10 @@
 	}
 
 	usleep_range(3000, 3500);
-	/* NOW enable the ADC
-	 * initialize the desired mode of operation */
+	/*
+	 * NOW enable the ADC
+	 * initialize the desired mode of operation
+	 */
 	utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
 	ret = i2c_smbus_write_byte_data(chip->client,
 					TSL258X_CMD_REG | TSL258X_CNTRL,
@@ -482,7 +492,7 @@
 /* Sysfs Interface Functions */
 
 static ssize_t taos_power_state_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+				     struct device_attribute *attr, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -491,7 +501,8 @@
 }
 
 static ssize_t taos_power_state_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+				      struct device_attribute *attr,
+				      const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	int value;
@@ -508,7 +519,7 @@
 }
 
 static ssize_t taos_gain_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+			      struct device_attribute *attr, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -533,7 +544,8 @@
 }
 
 static ssize_t taos_gain_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+			       struct device_attribute *attr,
+			       const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -564,13 +576,14 @@
 }
 
 static ssize_t taos_gain_available_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+					struct device_attribute *attr,
+					char *buf)
 {
 	return sprintf(buf, "%s\n", "1 8 16 111");
 }
 
 static ssize_t taos_als_time_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+				  struct device_attribute *attr, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -579,7 +592,8 @@
 }
 
 static ssize_t taos_als_time_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -600,14 +614,15 @@
 }
 
 static ssize_t taos_als_time_available_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+					    struct device_attribute *attr,
+					    char *buf)
 {
 	return sprintf(buf, "%s\n",
 		"50 100 150 200 250 300 350 400 450 500 550 600 650");
 }
 
 static ssize_t taos_als_trim_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+				  struct device_attribute *attr, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -616,7 +631,8 @@
 }
 
 static ssize_t taos_als_trim_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -632,7 +648,8 @@
 }
 
 static ssize_t taos_als_cal_target_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+					struct device_attribute *attr,
+					char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -641,7 +658,8 @@
 }
 
 static ssize_t taos_als_cal_target_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+					 struct device_attribute *attr,
+					 const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -657,7 +675,7 @@
 }
 
 static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
-	char *buf)
+			     char *buf)
 {
 	int ret;
 
@@ -669,7 +687,8 @@
 }
 
 static ssize_t taos_do_calibrate(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+				 struct device_attribute *attr,
+				 const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	int value;
@@ -684,7 +703,7 @@
 }
 
 static ssize_t taos_luxtable_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
+				  struct device_attribute *attr, char *buf)
 {
 	int i;
 	int offset = 0;
@@ -695,8 +714,10 @@
 				  taos_device_lux[i].ch0,
 				  taos_device_lux[i].ch1);
 		if (taos_device_lux[i].ratio == 0) {
-			/* We just printed the first "0" entry.
-			 * Now get rid of the extra "," and break. */
+			/*
+			 * We just printed the first "0" entry.
+			 * Now get rid of the extra "," and break.
+			 */
 			offset--;
 			break;
 		}
@@ -707,11 +728,12 @@
 }
 
 static ssize_t taos_luxtable_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t len)
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int value[ARRAY_SIZE(taos_device_lux)*3 + 1];
+	int value[ARRAY_SIZE(taos_device_lux) * 3 + 1];
 	int n;
 
 	get_options(buf, ARRAY_SIZE(value), value);
@@ -809,7 +831,7 @@
 	struct iio_dev *indio_dev;
 
 	if (!i2c_check_functionality(clientp->adapter,
-		I2C_FUNC_SMBUS_BYTE_DATA)) {
+				     I2C_FUNC_SMBUS_BYTE_DATA)) {
 		dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
 		return -EOPNOTSUPP;
 	}
@@ -846,7 +868,7 @@
 
 	if (!taos_tsl258x_device(buf)) {
 		dev_info(&clientp->dev,
-			"i2c device found but does not match expected id in taos_probe()\n");
+			 "i2c device found but does not match expected id in taos_probe()\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 5b1c165..2a62718 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -687,9 +687,9 @@
 
 	/* Set the gain based on tsl2x7x_settings struct */
 	chip->tsl2x7x_config[TSL2X7X_GAIN] =
-		(chip->tsl2x7x_settings.als_gain |
+		chip->tsl2x7x_settings.als_gain |
 			(TSL2X7X_mA100 | TSL2X7X_DIODE1)
-			| ((chip->tsl2x7x_settings.prox_gain) << 2));
+			| ((chip->tsl2x7x_settings.prox_gain) << 2);
 
 	/* set chip struct re scaling and saturation */
 	chip->als_saturation = als_count * 922; /* 90% of full scale */
@@ -983,7 +983,7 @@
 
 	result.fract /= 3;
 	chip->tsl2x7x_settings.als_time =
-			(TSL2X7X_MAX_TIMER_CNT - (u8)result.fract);
+			TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
 
 	dev_info(&chip->client->dev, "%s: als time = %d",
 		__func__, chip->tsl2x7x_settings.als_time);
diff --git a/drivers/staging/iio/magnetometer/hmc5843.h b/drivers/staging/iio/magnetometer/hmc5843.h
index 06f35d3..76a5d74 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.h
+++ b/drivers/staging/iio/magnetometer/hmc5843.h
@@ -7,8 +7,7 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * */
+ */
 
 #ifndef HMC5843_CORE_H
 #define HMC5843_CORE_H
@@ -38,7 +37,7 @@
  * @regmap:		hardware access register maps
  * @variant:		describe chip variants
  * @buffer:		3x 16-bit channels + padding + 64-bit timestamp
- **/
+ */
 struct hmc5843_data {
 	struct device *dev;
 	struct mutex lock;
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/staging/iio/magnetometer/hmc5843_core.c
index 394bc14..9ee9a42 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_core.c
+++ b/drivers/staging/iio/magnetometer/hmc5843_core.c
@@ -554,9 +554,9 @@
 
 static const struct iio_info hmc5843_info = {
 	.attrs = &hmc5843_group,
-	.read_raw = &hmc5843_read_raw,
-	.write_raw = &hmc5843_write_raw,
-	.write_raw_get_fmt = &hmc5843_write_raw_get_fmt,
+	.read_raw = hmc5843_read_raw,
+	.write_raw = hmc5843_write_raw,
+	.write_raw_get_fmt = hmc5843_write_raw_get_fmt,
 	.driver_module = THIS_MODULE,
 };
 
@@ -565,14 +565,14 @@
 int hmc5843_common_suspend(struct device *dev)
 {
 	return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
-			HMC5843_MODE_CONVERSION_CONTINUOUS);
+				HMC5843_MODE_SLEEP);
 }
 EXPORT_SYMBOL(hmc5843_common_suspend);
 
 int hmc5843_common_resume(struct device *dev)
 {
 	return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
-				HMC5843_MODE_SLEEP);
+		HMC5843_MODE_CONVERSION_CONTINUOUS);
 }
 EXPORT_SYMBOL(hmc5843_common_resume);
 
diff --git a/drivers/staging/iio/magnetometer/hmc5843_i2c.c b/drivers/staging/iio/magnetometer/hmc5843_i2c.c
index 3e06ceb..3de7f44 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/staging/iio/magnetometer/hmc5843_i2c.c
@@ -7,8 +7,7 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * */
+ */
 
 #include <linux/module.h>
 #include <linux/i2c.h>
diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/staging/iio/magnetometer/hmc5843_spi.c
index 8be1980..535f03a 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/staging/iio/magnetometer/hmc5843_spi.c
@@ -6,8 +6,7 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * */
+ */
 
 #include <linux/module.h>
 #include <linux/spi/spi.h>
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 595e711..82b2d88 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -31,7 +31,7 @@
 /* input clock on serial interface */
 #define AD2S1200_HZ	8192000
 /* clock period in nano second */
-#define AD2S1200_TSCLK	(1000000000/AD2S1200_HZ)
+#define AD2S1200_TSCLK	(1000000000 / AD2S1200_HZ)
 
 struct ad2s1200_state {
 	struct mutex lock;
@@ -42,10 +42,10 @@
 };
 
 static int ad2s1200_read_raw(struct iio_dev *indio_dev,
-			   struct iio_chan_spec const *chan,
-			   int *val,
-			   int *val2,
-			   long m)
+			     struct iio_chan_spec const *chan,
+			     int *val,
+			     int *val2,
+			     long m)
 {
 	int ret = 0;
 	s16 vel;
@@ -113,7 +113,7 @@
 					    DRV_NAME);
 		if (ret) {
 			dev_err(&spi->dev, "request gpio pin %d failed\n",
-							pins[pn]);
+				pins[pn]);
 			return ret;
 		}
 	}
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index d97aa28..6b99263 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -67,7 +67,7 @@
 /* default input clock on serial interface */
 #define AD2S1210_DEF_CLKIN	8192000
 /* clock period in nano second */
-#define AD2S1210_DEF_TCK	(1000000000/AD2S1210_DEF_CLKIN)
+#define AD2S1210_DEF_TCK	(1000000000 / AD2S1210_DEF_CLKIN)
 #define AD2S1210_DEF_EXCIT	10000
 
 enum ad2s1210_mode {
@@ -98,6 +98,7 @@
 	[MOD_VEL] = { 0, 1 },
 	[MOD_CONFIG] = { 1, 0 },
 };
+
 static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
 				     struct ad2s1210_state *st)
 {
@@ -123,7 +124,7 @@
 
 /* read value from one of the registers */
 static int ad2s1210_config_read(struct ad2s1210_state *st,
-		       unsigned char address)
+				unsigned char address)
 {
 	struct spi_transfer xfer = {
 		.len = 2,
@@ -176,9 +177,9 @@
 static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st)
 {
 	gpio_set_value(st->pdata->res[0],
-		       ad2s1210_res_pins[(st->resolution - 10)/2][0]);
+		       ad2s1210_res_pins[(st->resolution - 10) / 2][0]);
 	gpio_set_value(st->pdata->res[1],
-		       ad2s1210_res_pins[(st->resolution - 10)/2][1]);
+		       ad2s1210_res_pins[(st->resolution - 10) / 2][1]);
 }
 
 static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
@@ -282,8 +283,8 @@
 }
 
 static ssize_t ad2s1210_store_control(struct device *dev,
-			struct device_attribute *attr,
-			const char *buf, size_t len)
+				      struct device_attribute *attr,
+				      const char *buf, size_t len)
 {
 	struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
 	unsigned char udata;
@@ -318,9 +319,9 @@
 		data = ad2s1210_read_resolution_pin(st);
 		if (data != st->resolution)
 			dev_warn(dev, "ad2s1210: resolution settings not match\n");
-	} else
+	} else {
 		ad2s1210_set_resolution_pin(st);
-
+	}
 	ret = len;
 	st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS);
 
@@ -330,7 +331,8 @@
 }
 
 static ssize_t ad2s1210_show_resolution(struct device *dev,
-			struct device_attribute *attr, char *buf)
+					struct device_attribute *attr,
+					char *buf)
 {
 	struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
 
@@ -338,8 +340,8 @@
 }
 
 static ssize_t ad2s1210_store_resolution(struct device *dev,
-			struct device_attribute *attr,
-			const char *buf, size_t len)
+					 struct device_attribute *attr,
+					 const char *buf, size_t len)
 {
 	struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
 	unsigned char data;
@@ -379,8 +381,9 @@
 		data = ad2s1210_read_resolution_pin(st);
 		if (data != st->resolution)
 			dev_warn(dev, "ad2s1210: resolution settings not match\n");
-	} else
+	} else {
 		ad2s1210_set_resolution_pin(st);
+	}
 	ret = len;
 error_ret:
 	mutex_unlock(&st->lock);
@@ -389,7 +392,7 @@
 
 /* read the fault register since last sample */
 static ssize_t ad2s1210_show_fault(struct device *dev,
-			struct device_attribute *attr, char *buf)
+				   struct device_attribute *attr, char *buf)
 {
 	struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
 	int ret;
@@ -441,7 +444,8 @@
 }
 
 static ssize_t ad2s1210_store_reg(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t len)
+				  struct device_attribute *attr,
+				  const char *buf, size_t len)
 {
 	struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
 	unsigned char data;
@@ -497,7 +501,7 @@
 
 	switch (chan->type) {
 	case IIO_ANGL:
-		pos = be16_to_cpup((__be16 *) st->rx);
+		pos = be16_to_cpup((__be16 *)st->rx);
 		if (st->hysteresis)
 			pos >>= 16 - st->resolution;
 		*val = pos;
@@ -505,7 +509,7 @@
 		break;
 	case IIO_ANGL_VEL:
 		negative = st->rx[0] & 0x80;
-		vel = be16_to_cpup((__be16 *) st->rx);
+		vel = be16_to_cpup((__be16 *)st->rx);
 		vel >>= 16 - st->resolution;
 		if (vel & 0x8000) {
 			negative = (0xffff >> st->resolution) << st->resolution;
@@ -560,7 +564,6 @@
 		       ad2s1210_show_reg, ad2s1210_store_reg,
 		       AD2S1210_REG_LOT_LOW_THRD);
 
-
 static const struct iio_chan_spec ad2s1210_channels[] = {
 	{
 		.type = IIO_ANGL,
@@ -672,7 +675,7 @@
 	struct ad2s1210_state *st;
 	int ret;
 
-	if (spi->dev.platform_data == NULL)
+	if (!spi->dev.platform_data)
 		return -EINVAL;
 
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 0d8a91e..5c598c8d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -51,8 +51,6 @@
 #define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
 			   ((hexnum) >> 8 & 0xf))
 
-#define LUSTRE_SRV_LNET_PID      LUSTRE_LNET_PID
-
 #include <linux/list.h>
 
 /* need both kernel and user-land acceptor */
@@ -77,7 +75,7 @@
 	int (*p_close)(unsigned long, void *);
 	int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
 	int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
-	int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void *);
+	int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *);
 };
 
 /*
@@ -90,7 +88,6 @@
  * Defined by platform
  */
 int unshare_fs_struct(void);
-sigset_t cfs_get_blocked_sigs(void);
 sigset_t cfs_block_allsigs(void);
 sigset_t cfs_block_sigs(unsigned long sigs);
 sigset_t cfs_block_sigsinv(unsigned long sigs);
@@ -115,7 +112,6 @@
 #include "libcfs_prim.h"
 #include "libcfs_time.h"
 #include "libcfs_string.h"
-#include "libcfs_kernelcomm.h"
 #include "libcfs_workitem.h"
 #include "libcfs_hash.h"
 #include "libcfs_fail.h"
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 1530b04..9e62c59 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index a1787bb..98430e7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -106,7 +106,7 @@
 #define S_LOV		0x00020000
 #define S_LQUOTA	0x00040000
 #define S_OSD		0x00080000
-/* unused */
+#define S_LFSCK		0x00100000
 /* unused */
 /* unused */
 #define S_LMV		0x00800000 /* b_new_cmd */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 485ab26..f788631 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -41,11 +41,16 @@
 #ifndef __LIBCFS_IOCTL_H__
 #define __LIBCFS_IOCTL_H__
 
-#define LIBCFS_IOCTL_VERSION 0x0001000a
+#define LIBCFS_IOCTL_VERSION	0x0001000a
+#define LIBCFS_IOCTL_VERSION2	0x0001000b
 
-struct libcfs_ioctl_data {
+struct libcfs_ioctl_hdr {
 	__u32 ioc_len;
 	__u32 ioc_version;
+};
+
+struct libcfs_ioctl_data {
+	struct libcfs_ioctl_hdr ioc_hdr;
 
 	__u64 ioc_nid;
 	__u64 ioc_u64[1];
@@ -61,20 +66,15 @@
 	char *ioc_inlbuf2;
 
 	__u32 ioc_plen1; /* buffers in userspace */
-	char *ioc_pbuf1;
+	void __user *ioc_pbuf1;
 	__u32 ioc_plen2; /* buffers in userspace */
-	char *ioc_pbuf2;
+	void __user *ioc_pbuf2;
 
 	char ioc_bulk[0];
 };
 
 #define ioc_priority ioc_u32[0]
 
-struct libcfs_ioctl_hdr {
-	__u32 ioc_len;
-	__u32 ioc_version;
-};
-
 struct libcfs_debug_ioctl_data {
 	struct libcfs_ioctl_hdr hdr;
 	unsigned int subs;
@@ -90,7 +90,7 @@
 
 struct libcfs_ioctl_handler {
 	struct list_head item;
-	int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
+	int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
 };
 
 #define DECLARE_IOCTL_HANDLER(ident, func)		      \
@@ -102,7 +102,6 @@
 /* FIXME check conflict with lustre_lib.h */
 #define LIBCFS_IOC_DEBUG_MASK	     _IOWR('f', 250, long)
 
-/* ioctls for manipulating snapshots 30- */
 #define IOC_LIBCFS_TYPE		   'e'
 #define IOC_LIBCFS_MIN_NR		 30
 /* libcfs ioctls */
@@ -113,17 +112,14 @@
 /* lnet ioctls */
 #define IOC_LIBCFS_GET_NI		  _IOWR('e', 50, long)
 #define IOC_LIBCFS_FAIL_NID		_IOWR('e', 51, long)
-#define IOC_LIBCFS_ADD_ROUTE	       _IOWR('e', 52, long)
-#define IOC_LIBCFS_DEL_ROUTE	       _IOWR('e', 53, long)
-#define IOC_LIBCFS_GET_ROUTE	       _IOWR('e', 54, long)
 #define IOC_LIBCFS_NOTIFY_ROUTER	   _IOWR('e', 55, long)
 #define IOC_LIBCFS_UNCONFIGURE	     _IOWR('e', 56, long)
-#define IOC_LIBCFS_PORTALS_COMPATIBILITY   _IOWR('e', 57, long)
+/*	#define IOC_LIBCFS_PORTALS_COMPATIBILITY   _IOWR('e', 57, long) */
 #define IOC_LIBCFS_LNET_DIST	       _IOWR('e', 58, long)
 #define IOC_LIBCFS_CONFIGURE	       _IOWR('e', 59, long)
 #define IOC_LIBCFS_TESTPROTOCOMPAT	 _IOWR('e', 60, long)
 #define IOC_LIBCFS_PING		    _IOWR('e', 61, long)
-#define IOC_LIBCFS_DEBUG_PEER	      _IOWR('e', 62, long)
+/*	#define IOC_LIBCFS_DEBUG_PEER	      _IOWR('e', 62, long) */
 #define IOC_LIBCFS_LNETST		  _IOWR('e', 63, long)
 /* lnd ioctls */
 #define IOC_LIBCFS_REGISTER_MYNID	  _IOWR('e', 70, long)
@@ -138,7 +134,25 @@
 #define IOC_LIBCFS_DEL_INTERFACE	   _IOWR('e', 79, long)
 #define IOC_LIBCFS_GET_INTERFACE	   _IOWR('e', 80, long)
 
-#define IOC_LIBCFS_MAX_NR			     80
+/*
+ * DLC Specific IOCTL numbers.
+ * In order to maintain backward compatibility with any possible external
+ * tools which might be accessing the IOCTL numbers, a new group of IOCTL
+ * number have been allocated.
+ */
+#define IOCTL_CONFIG_SIZE		struct lnet_ioctl_config_data
+#define IOC_LIBCFS_ADD_ROUTE		_IOWR(IOC_LIBCFS_TYPE, 81, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_DEL_ROUTE		_IOWR(IOC_LIBCFS_TYPE, 82, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_ROUTE		_IOWR(IOC_LIBCFS_TYPE, 83, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_ADD_NET		_IOWR(IOC_LIBCFS_TYPE, 84, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_DEL_NET		_IOWR(IOC_LIBCFS_TYPE, 85, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_NET		_IOWR(IOC_LIBCFS_TYPE, 86, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_CONFIG_RTR		_IOWR(IOC_LIBCFS_TYPE, 87, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_ADD_BUF		_IOWR(IOC_LIBCFS_TYPE, 88, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_BUF		_IOWR(IOC_LIBCFS_TYPE, 89, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_PEER_INFO	_IOWR(IOC_LIBCFS_TYPE, 90, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_LNET_STATS	_IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_MAX_NR		91
 
 static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
 {
@@ -149,9 +163,9 @@
 	return len;
 }
 
-static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
+static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
 {
-	if (data->ioc_len > (1<<30)) {
+	if (data->ioc_hdr.ioc_len > (1 << 30)) {
 		CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
 		return 1;
 	}
@@ -187,7 +201,7 @@
 		CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
 		return 1;
 	}
-	if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_len) {
+	if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
 		CERROR("LIBCFS ioctl: packlen != ioc_len\n");
 		return 1;
 	}
@@ -207,7 +221,9 @@
 
 int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
 int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
-int libcfs_ioctl_popdata(void *arg, void *buf, int size);
+int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
+			     __u32 *buf_len);
+int libcfs_ioctl_popdata(void __user *arg, void *buf, int size);
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
 
 #endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
deleted file mode 100644
index 41f3d81..0000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- *
- * libcfs/include/libcfs/libcfs_kernelcomm.h
- *
- * Kernel <-> userspace communication routines.
- * The definitions below are used in the kernel and userspace.
- *
- */
-
-#ifndef __LIBCFS_KERNELCOMM_H__
-#define __LIBCFS_KERNELCOMM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-/* KUC message header.
- * All current and future KUC messages should use this header.
- * To avoid having to include Lustre headers from libcfs, define this here.
- */
-struct kuc_hdr {
-	__u16 kuc_magic;
-	__u8  kuc_transport;  /* Each new Lustre feature should use a different
-				 transport */
-	__u8  kuc_flags;
-	__u16 kuc_msgtype;    /* Message type or opcode, transport-specific */
-	__u16 kuc_msglen;     /* Including header */
-} __aligned(sizeof(__u64));
-
-#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
-
-#define KUC_MAGIC  0x191C /*Lustre9etLinC */
-#define KUC_FL_BLOCK 0x01   /* Wait for send */
-
-/* kuc_msgtype values are defined in each transport */
-enum kuc_transport_type {
-	KUC_TRANSPORT_GENERIC   = 1,
-	KUC_TRANSPORT_HSM       = 2,
-	KUC_TRANSPORT_CHANGELOG = 3,
-};
-
-enum kuc_generic_message_type {
-	KUC_MSG_SHUTDOWN = 1,
-};
-
-/* prototype for callback function on kuc groups */
-typedef int (*libcfs_kkuc_cb_t)(__u32 data, void *cb_arg);
-
-/* KUC Broadcast Groups. This determines which userspace process hears which
- * messages.  Mutliple transports may be used within a group, or multiple
- * groups may use the same transport.  Broadcast
- * groups need not be used if e.g. a UID is specified instead;
- * use group 0 to signify unicast.
- */
-#define KUC_GRP_HSM	   0x02
-#define KUC_GRP_MAX	   KUC_GRP_HSM
-
-/* Kernel methods */
-int libcfs_kkuc_msg_put(struct file *fp, void *payload);
-int libcfs_kkuc_group_put(int group, void *payload);
-int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group,
-				 __u32 data);
-int libcfs_kkuc_group_rem(int uid, int group);
-int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
-				     void *cb_arg);
-
-#define LK_FLG_STOP 0x01
-
-/* kernelcomm control structure, passed from userspace to kernel */
-typedef struct lustre_kernelcomm {
-	__u32 lk_wfd;
-	__u32 lk_rfd;
-	__u32 lk_uid;
-	__u32 lk_group;
-	__u32 lk_data;
-	__u32 lk_flags;
-} __packed lustre_kernelcomm;
-
-/* Userspace methods */
-int libcfs_ukuc_start(lustre_kernelcomm *l, int groups);
-int libcfs_ukuc_stop(lustre_kernelcomm *l);
-int libcfs_ukuc_msg_get(lustre_kernelcomm *l, char *buf, int maxsize,
-			       int transport);
-
-#endif /* __LIBCFS_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index a80d993..dab4862 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -387,11 +387,6 @@
  * Support for temporary event tracing with minimal Heisenberg effect.
  * -------------------------------------------------------------------- */
 
-struct libcfs_device_userstate {
-	int	   ldu_memhog_pages;
-	struct page   *ldu_memhog_root_page;
-};
-
 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
 
 static inline int cfs_size_round4(int val)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index d8d2e7d..e02cde5 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -44,8 +44,6 @@
 #define __LIBCFS_STRING_H__
 
 /* libcfs_string.c */
-/* string comparison ignoring case */
-int cfs_strncasecmp(const char *s1, const char *s2, size_t n);
 /* Convert a text string to a bitmask */
 int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
 		 int *oldmask, int minmask, int allmask);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index aac5900..d94b266 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -118,9 +118,6 @@
 #define CDEBUG_STACK() (0L)
 #endif /* __x86_64__ */
 
-/* initial pid  */
-#define LUSTRE_LNET_PID	  12345
-
 #define __current_nesting_level() (0)
 
 /**
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index 520209f..c04979a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index 75285fd..cb0d6b4 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -48,7 +48,8 @@
 
 /** \defgroup lnet_init_fini Initialization and cleanup
  * The LNet must be properly initialized before any LNet calls can be made.
- * @{ */
+ * @{
+ */
 int LNetNIInit(lnet_pid_t requested_pid);
 int LNetNIFini(void);
 /** @} lnet_init_fini */
@@ -71,7 +72,8 @@
  * it's an entry in the portals table of a process.
  *
  * \see LNetMEAttach
- * @{ */
+ * @{
+ */
 int LNetGetId(unsigned int index, lnet_process_id_t *id);
 int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order);
 void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
@@ -89,7 +91,8 @@
  * incoming requests based on process ID or the match bits provided in the
  * request. MEs can be dynamically inserted into a match list by LNetMEAttach()
  * and LNetMEInsert(), and removed from its list by LNetMEUnlink().
- * @{ */
+ * @{
+ */
 int LNetMEAttach(unsigned int      portal,
 		 lnet_process_id_t match_id_in,
 		 __u64		   match_bits_in,
@@ -120,7 +123,8 @@
  * The LNet API provides two operations to create MDs: LNetMDAttach()
  * and LNetMDBind(); one operation to unlink and release the resources
  * associated with a MD: LNetMDUnlink().
- * @{ */
+ * @{
+ */
 int LNetMDAttach(lnet_handle_me_t  current_in,
 		 lnet_md_t	   md_in,
 		 lnet_unlink_t     unlink_in,
@@ -154,7 +158,8 @@
  * event from an EQ, and LNetEQWait() can be used to block a process until
  * an EQ has at least one event. LNetEQPoll() can be used to test or wait
  * on multiple EQs.
- * @{ */
+ * @{
+ */
 int LNetEQAlloc(unsigned int       count_in,
 		lnet_eq_handler_t  handler,
 		lnet_handle_eq_t  *handle_out);
@@ -172,7 +177,8 @@
  *
  * The LNet API provides two data movement operations: LNetPut()
  * and LNetGet().
- * @{ */
+ * @{
+ */
 int LNetPut(lnet_nid_t	      self,
 	    lnet_handle_md_t  md_in,
 	    lnet_ack_req_t    ack_req_in,
@@ -192,11 +198,12 @@
 
 /** \defgroup lnet_misc Miscellaneous operations.
  * Miscellaneous operations.
- * @{ */
-
+ * @{
+ */
 int LNetSetLazyPortal(int portal);
 int LNetClearLazyPortal(int portal);
 int LNetCtl(unsigned int cmd, void *arg);
+void LNetDebugPeer(lnet_process_id_t id);
 
 /** @} lnet_misc */
 
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
new file mode 100644
index 0000000..84a19e9
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -0,0 +1,122 @@
+/*
+ * LGPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library.
+ *
+ * LGPL HEADER END
+ *
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ */
+/*
+ * Author: Amir Shehata <amir.shehata@intel.com>
+ */
+
+#ifndef LNET_DLC_H
+#define LNET_DLC_H
+
+#include "../libcfs/libcfs_ioctl.h"
+#include "types.h"
+
+#define MAX_NUM_SHOW_ENTRIES	32
+#define LNET_MAX_STR_LEN	128
+#define LNET_MAX_SHOW_NUM_CPT	128
+#define LNET_UNDEFINED_HOPS	((__u32) -1)
+
+struct lnet_ioctl_net_config {
+	char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
+	__u32 ni_status;
+	__u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
+};
+
+#define LNET_TINY_BUF_IDX	0
+#define LNET_SMALL_BUF_IDX	1
+#define LNET_LARGE_BUF_IDX	2
+
+/* # different router buffer pools */
+#define LNET_NRBPOOLS		(LNET_LARGE_BUF_IDX + 1)
+
+struct lnet_ioctl_pool_cfg {
+	struct {
+		__u32 pl_npages;
+		__u32 pl_nbuffers;
+		__u32 pl_credits;
+		__u32 pl_mincredits;
+	} pl_pools[LNET_NRBPOOLS];
+	__u32 pl_routing;
+};
+
+struct lnet_ioctl_config_data {
+	struct libcfs_ioctl_hdr cfg_hdr;
+
+	__u32 cfg_net;
+	__u32 cfg_count;
+	__u64 cfg_nid;
+	__u32 cfg_ncpts;
+
+	union {
+		struct {
+			__u32 rtr_hop;
+			__u32 rtr_priority;
+			__u32 rtr_flags;
+		} cfg_route;
+		struct {
+			char net_intf[LNET_MAX_STR_LEN];
+			__s32 net_peer_timeout;
+			__s32 net_peer_tx_credits;
+			__s32 net_peer_rtr_credits;
+			__s32 net_max_tx_credits;
+			__u32 net_cksum_algo;
+			__u32 net_pad;
+		} cfg_net;
+		struct {
+			__u32 buf_enable;
+			__s32 buf_tiny;
+			__s32 buf_small;
+			__s32 buf_large;
+		} cfg_buffers;
+	} cfg_config_u;
+
+	char cfg_bulk[0];
+};
+
+struct lnet_ioctl_peer {
+	struct libcfs_ioctl_hdr pr_hdr;
+	__u32 pr_count;
+	__u32 pr_pad;
+	__u64 pr_nid;
+
+	union {
+		struct {
+			char cr_aliveness[LNET_MAX_STR_LEN];
+			__u32 cr_refcount;
+			__u32 cr_ni_peer_tx_credits;
+			__u32 cr_peer_tx_credits;
+			__u32 cr_peer_rtr_credits;
+			__u32 cr_peer_min_rtr_credits;
+			__u32 cr_peer_tx_qnob;
+			__u32 cr_ncpt;
+		} pr_peer_credits;
+	} pr_lnd_u;
+};
+
+struct lnet_ioctl_lnet_stats {
+	struct libcfs_ioctl_hdr st_hdr;
+	struct lnet_counters st_cntrs;
+};
+
+#endif /* LNET_DLC_H */
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index b67a660..a5f1aec 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -39,6 +39,7 @@
 #include "api.h"
 #include "lnet.h"
 #include "lib-types.h"
+#include "lib-dlc.h"
 
 extern lnet_t	the_lnet;	/* THE network */
 
@@ -64,6 +65,19 @@
 /** exclusive lock */
 #define LNET_LOCK_EX		CFS_PERCPT_LOCK_EX
 
+static inline int lnet_is_route_alive(lnet_route_t *route)
+{
+	/* gateway is down */
+	if (!route->lr_gateway->lp_alive)
+		return 0;
+	/* no NI status, assume it's alive */
+	if ((route->lr_gateway->lp_ping_feats &
+	     LNET_PING_FEAT_NI_STATUS) == 0)
+		return 1;
+	/* has NI status, check # down NIs */
+	return route->lr_downis == 0;
+}
+
 static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh)
 {
 	return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
@@ -72,25 +86,26 @@
 
 static inline int lnet_md_exhausted(lnet_libmd_t *md)
 {
-	return (md->md_threshold == 0 ||
-		((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
+	return (!md->md_threshold ||
+		((md->md_options & LNET_MD_MAX_SIZE) &&
 		 md->md_offset + md->md_max_size > md->md_length));
 }
 
 static inline int lnet_md_unlinkable(lnet_libmd_t *md)
 {
-	/* Should unlink md when its refcount is 0 and either:
+	/*
+	 * Should unlink md when its refcount is 0 and either:
 	 *  - md has been flagged for deletion (by auto unlink or
 	 *    LNetM[DE]Unlink, in the latter case md may not be exhausted).
 	 *  - auto unlink is on and md is exhausted.
 	 */
-	if (md->md_refcount != 0)
+	if (md->md_refcount)
 		return 0;
 
-	if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
+	if (md->md_flags & LNET_MD_FLAG_ZOMBIE)
 		return 1;
 
-	return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
+	return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) &&
 		lnet_md_exhausted(md));
 }
 
@@ -102,8 +117,10 @@
 {
 	unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK;
 
-	/* LNET_CPT_NUMBER doesn't have to be power2, which means we can
-	 * get illegal cpt from it's invalid cookie */
+	/*
+	 * LNET_CPT_NUMBER doesn't have to be power2, which means we can
+	 * get illegal cpt from it's invalid cookie
+	 */
 	return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER;
 }
 
@@ -183,18 +200,17 @@
 	unsigned int size;
 	unsigned int niov;
 
-	if ((umd->options & LNET_MD_KIOV) != 0) {
+	if (umd->options & LNET_MD_KIOV) {
 		niov = umd->length;
 		size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
 	} else {
-		niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
-		       umd->length : 1;
+		niov = umd->options & LNET_MD_IOVEC ? umd->length : 1;
 		size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
 	}
 
 	LIBCFS_ALLOC(md, size);
 
-	if (md != NULL) {
+	if (md) {
 		/* Set here in case of early free */
 		md->md_options = umd->options;
 		md->md_niov = niov;
@@ -209,7 +225,7 @@
 {
 	unsigned int size;
 
-	if ((md->md_options & LNET_MD_KIOV) != 0)
+	if (md->md_options & LNET_MD_KIOV)
 		size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
 	else
 		size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
@@ -264,7 +280,7 @@
 static inline void
 lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq)
 {
-	if (eq == NULL) {
+	if (!eq) {
 		LNetInvalidateHandle(handle);
 		return;
 	}
@@ -278,7 +294,7 @@
 	lnet_libhandle_t *lh;
 
 	lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
-	if (lh == NULL)
+	if (!lh)
 		return NULL;
 
 	return lh_entry(lh, lnet_eq_t, eq_lh);
@@ -300,7 +316,7 @@
 	cpt = lnet_cpt_of_cookie(handle->cookie);
 	lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
 				handle->cookie);
-	if (lh == NULL)
+	if (!lh)
 		return NULL;
 
 	return lh_entry(lh, lnet_libmd_t, md_lh);
@@ -319,7 +335,7 @@
 	cpt = lnet_cpt_of_cookie(wh->wh_object_cookie);
 	lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
 				wh->wh_object_cookie);
-	if (lh == NULL)
+	if (!lh)
 		return NULL;
 
 	return lh_entry(lh, lnet_libmd_t, md_lh);
@@ -341,7 +357,7 @@
 	cpt = lnet_cpt_of_cookie(handle->cookie);
 	lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt],
 				handle->cookie);
-	if (lh == NULL)
+	if (!lh)
 		return NULL;
 
 	return lh_entry(lh, lnet_me_t, me_lh);
@@ -361,14 +377,14 @@
 {
 	LASSERT(lp->lp_refcount > 0);
 	lp->lp_refcount--;
-	if (lp->lp_refcount == 0)
+	if (!lp->lp_refcount)
 		lnet_destroy_peer_locked(lp);
 }
 
 static inline int
 lnet_isrouter(lnet_peer_t *lp)
 {
-	return lp->lp_rtr_refcount != 0;
+	return lp->lp_rtr_refcount ? 1 : 0;
 }
 
 static inline void
@@ -406,6 +422,8 @@
 }
 
 void lnet_ni_free(lnet_ni_t *ni);
+lnet_ni_t *
+lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist);
 
 static inline int
 lnet_nid2peerhash(lnet_nid_t nid)
@@ -430,6 +448,8 @@
 lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
 lnet_ni_t *lnet_net2ni(__u32 net);
 
+extern int portal_rotor;
+
 int lnet_init(void);
 void lnet_fini(void);
 
@@ -443,11 +463,26 @@
 void lnet_destroy_routes(void);
 int lnet_get_route(int idx, __u32 *net, __u32 *hops,
 		   lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
+int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid,
+			int *peer_timeout, int *peer_tx_credits,
+			int *peer_rtr_cr, int *max_tx_credits,
+			struct lnet_ioctl_net_config *net_config);
+int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
+
 void lnet_router_debugfs_init(void);
 void lnet_router_debugfs_fini(void);
 int  lnet_rtrpools_alloc(int im_a_router);
-void lnet_rtrpools_free(void);
+void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages);
+int lnet_rtrpools_adjust(int tiny, int small, int large);
+int lnet_rtrpools_enable(void);
+void lnet_rtrpools_disable(void);
+void lnet_rtrpools_free(int keep_pools);
 lnet_remotenet_t *lnet_find_net_locked(__u32 net);
+int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
+		    __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
+		    __s32 credits);
+int lnet_dyn_del_ni(__u32 net);
+int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
 
 int lnet_islocalnid(lnet_nid_t nid);
 int lnet_islocalnet(__u32 net);
@@ -466,6 +501,8 @@
 int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid);
 void lnet_return_tx_credits_locked(lnet_msg_t *msg);
 void lnet_return_rx_credits_locked(lnet_msg_t *msg);
+void lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp);
+void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt);
 
 /* portals functions */
 /* portals attributes */
@@ -660,27 +697,30 @@
 void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net);
 void lnet_swap_pinginfo(lnet_ping_info_t *info);
 
-int lnet_ping_target_init(void);
-void lnet_ping_target_fini(void);
-int lnet_ping(lnet_process_id_t id, int timeout_ms,
-	      lnet_process_id_t *ids, int n_ids);
-
 int lnet_parse_ip2nets(char **networksp, char *ip2nets);
 int lnet_parse_routes(char *route_str, int *im_a_router);
 int lnet_parse_networks(struct list_head *nilist, char *networks);
+int lnet_net_unique(__u32 net, struct list_head *nilist);
 
 int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
 lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
 				   lnet_nid_t nid);
-void lnet_peer_tables_cleanup(void);
+void lnet_peer_tables_cleanup(lnet_ni_t *ni);
 void lnet_peer_tables_destroy(void);
 int lnet_peer_tables_create(void);
 void lnet_debug_peer(lnet_nid_t nid);
+int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
+		       char alivness[LNET_MAX_STR_LEN],
+		       __u32 *cpt_iter, __u32 *refcount,
+		       __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+		       __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis,
+		       __u32 *peer_tx_qnob);
 
 static inline void
 lnet_peer_set_alive(lnet_peer_t *lp)
 {
-	lp->lp_last_alive = lp->lp_last_query = jiffies;
+	lp->lp_last_query = jiffies;
+	lp->lp_last_alive = jiffies;
 	if (!lp->lp_alive)
 		lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
 }
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 3bb9468..07b8db1 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -38,7 +38,6 @@
 #include <linux/kthread.h>
 #include <linux/uio.h>
 #include <linux/types.h>
-#include <net/sock.h>
 
 #include "types.h"
 
@@ -85,8 +84,7 @@
 	unsigned int	msg_receiving:1;	/* being received */
 	unsigned int	msg_txcredit:1;		/* taken an NI send credit */
 	unsigned int	msg_peertxcredit:1;	/* taken a peer send credit */
-	unsigned int	msg_rtrcredit:1;	/* taken a global
-						   router credit */
+	unsigned int	msg_rtrcredit:1;	/* taken a global router credit */
 	unsigned int	msg_peerrtrcredit:1;	/* taken a peer router credit */
 	unsigned int	msg_onactivelist:1;	/* on the activelist */
 
@@ -113,7 +111,7 @@
 } lnet_libhandle_t;
 
 #define lh_entry(ptr, type, member) \
-	((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
+	((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
 
 typedef struct lnet_eq {
 	struct list_head	  eq_list;
@@ -190,7 +188,8 @@
 	void (*lnd_shutdown)(struct lnet_ni *ni);
 	int  (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
 
-	/* In data movement APIs below, payload buffers are described as a set
+	/*
+	 * In data movement APIs below, payload buffers are described as a set
 	 * of 'niov' fragments which are...
 	 * EITHER
 	 *    in virtual memory (struct iovec *iov != NULL)
@@ -201,30 +200,36 @@
 	 * fragments to start from
 	 */
 
-	/* Start sending a preformatted message.  'private' is NULL for PUT and
+	/*
+	 * Start sending a preformatted message.  'private' is NULL for PUT and
 	 * GET messages; otherwise this is a response to an incoming message
 	 * and 'private' is the 'private' passed to lnet_parse().  Return
 	 * non-zero for immediate failure, otherwise complete later with
-	 * lnet_finalize() */
+	 * lnet_finalize()
+	 */
 	int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg);
 
-	/* Start receiving 'mlen' bytes of payload data, skipping the following
+	/*
+	 * Start receiving 'mlen' bytes of payload data, skipping the following
 	 * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
 	 * lnet_parse().  Return non-zero for immediate failure, otherwise
 	 * complete later with lnet_finalize().  This also gives back a receive
-	 * credit if the LND does flow control. */
+	 * credit if the LND does flow control.
+	 */
 	int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
 			int delayed, unsigned int niov,
 			struct kvec *iov, lnet_kiov_t *kiov,
 			unsigned int offset, unsigned int mlen,
 			unsigned int rlen);
 
-	/* lnet_parse() has had to delay processing of this message
+	/*
+	 * lnet_parse() has had to delay processing of this message
 	 * (e.g. waiting for a forwarding buffer or send credits).  Give the
 	 * LND a chance to free urgently needed resources.  If called, return 0
 	 * for success and do NOT give back a receive credit; that has to wait
 	 * until lnd_recv() gets called.  On failure return < 0 and
-	 * release resources; lnd_recv() will not be called. */
+	 * release resources; lnd_recv() will not be called.
+	 */
 	int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
 			      lnet_msg_t *msg, void **new_privatep);
 
@@ -272,11 +277,14 @@
 
 #define LNET_PROTO_PING_MATCHBITS	0x8000000000000000LL
 
-/* NB: value of these features equal to LNET_PROTO_PING_VERSION_x
- * of old LNet, so there shouldn't be any compatibility issue */
+/*
+ * NB: value of these features equal to LNET_PROTO_PING_VERSION_x
+ * of old LNet, so there shouldn't be any compatibility issue
+ */
 #define LNET_PING_FEAT_INVAL		(0)		/* no feature */
 #define LNET_PING_FEAT_BASE		(1 << 0)	/* just a ping */
 #define LNET_PING_FEAT_NI_STATUS	(1 << 1)	/* return NI status */
+#define LNET_PING_FEAT_RTE_DISABLED	(1 << 2)	/* Routing enabled */
 
 #define LNET_PING_FEAT_MASK		(LNET_PING_FEAT_BASE | \
 					 LNET_PING_FEAT_NI_STATUS)
@@ -343,13 +351,17 @@
 struct lnet_peer_table {
 	int			 pt_version;	/* /proc validity stamp */
 	int			 pt_number;	/* # peers extant */
+	/* # zombies to go to deathrow (and not there yet) */
+	int			 pt_zombies;
 	struct list_head	 pt_deathrow;	/* zombie peers */
 	struct list_head	*pt_hash;	/* NID->peer hash */
 };
 
-/* peer aliveness is enabled only on routers for peers in a network where the
- * lnet_ni_t::ni_peertimeout has been set to a positive value */
-#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
+/*
+ * peer aliveness is enabled only on routers for peers in a network where the
+ * lnet_ni_t::ni_peertimeout has been set to a positive value
+ */
+#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
 					 (lp)->lp_ni->ni_peertimeout > 0)
 
 typedef struct {
@@ -384,7 +396,10 @@
 	struct list_head	rbp_msgs;	/* messages blocking
 						   for a buffer */
 	int			rbp_npages;	/* # pages in each buffer */
-	int			rbp_nbuffers;	/* # buffers */
+	/* requested number of buffers */
+	int			rbp_req_nbuffers;
+	/* # buffers actually allocated */
+	int			rbp_nbuffers;
 	int			rbp_credits;	/* # free buffers /
 						     blocked messages */
 	int			rbp_mincredits;	/* low water mark */
@@ -398,7 +413,12 @@
 
 #define LNET_PEER_HASHSIZE	503	/* prime! */
 
-#define LNET_NRBPOOLS		3	/* # different router buffer pools */
+#define LNET_TINY_BUF_IDX	0
+#define LNET_SMALL_BUF_IDX	1
+#define LNET_LARGE_BUF_IDX	2
+
+/* # different router buffer pools */
+#define LNET_NRBPOOLS		(LNET_LARGE_BUF_IDX + 1)
 
 enum {
 	/* Didn't match anything */
@@ -433,12 +453,16 @@
 #define LNET_MT_HASH_BITS		8
 #define LNET_MT_HASH_SIZE		(1 << LNET_MT_HASH_BITS)
 #define LNET_MT_HASH_MASK		(LNET_MT_HASH_SIZE - 1)
-/* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
- * the last entry is reserved for MEs with ignore-bits */
+/*
+ * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
+ * the last entry is reserved for MEs with ignore-bits
+ */
 #define LNET_MT_HASH_IGNORE		LNET_MT_HASH_SIZE
-/* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
+/*
+ * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
  * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
- * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
+ * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE]
+ */
 #define LNET_MT_BITS_U64		6	/* 2^6 bits */
 #define LNET_MT_EXHAUSTED_BITS		(LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
 #define LNET_MT_EXHAUSTED_BMAP		((1 << LNET_MT_EXHAUSTED_BITS) + 1)
@@ -448,8 +472,10 @@
 	/* reserved for upcoming patches, CPU partition ID */
 	unsigned int		 mt_cpt;
 	unsigned int		 mt_portal;	/* portal index */
-	/* match table is set as "enabled" if there's non-exhausted MD
-	 * attached on mt_mhash, it's only valid for wildcard portal */
+	/*
+	 * match table is set as "enabled" if there's non-exhausted MD
+	 * attached on mt_mhash, it's only valid for wildcard portal
+	 */
 	unsigned int		 mt_enabled;
 	/* bitmap to flag whether MEs on mt_hash are exhausted or not */
 	__u64			 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
@@ -553,8 +579,6 @@
 	/* dying LND instances */
 	struct list_head		  ln_nis_zombie;
 	lnet_ni_t			 *ln_loni;	/* the loopback NI */
-	/* NI to wait for events in */
-	lnet_ni_t			 *ln_eq_waitni;
 
 	/* remote networks with routes to them */
 	struct list_head		 *ln_remote_nets_hash;
@@ -584,8 +608,6 @@
 
 	struct mutex			  ln_api_mutex;
 	struct mutex			  ln_lnd_mutex;
-	int				  ln_init;	/* lnet_init()
-							   called? */
 	/* Have I called LNetNIInit myself? */
 	int				  ln_niinit_self;
 	/* LNetNIInit/LNetNIFini counter */
@@ -600,12 +622,24 @@
 	/* registered LNDs */
 	struct list_head		  ln_lnds;
 
-	/* space for network names */
-	char				 *ln_network_tokens;
-	int				  ln_network_tokens_nob;
 	/* test protocol compatibility flags */
 	int				  ln_testprotocompat;
 
+	/*
+	 * 0 - load the NIs from the mod params
+	 * 1 - do not load the NIs from the mod params
+	 * Reverse logic to ensure that other calls to LNetNIInit
+	 * need no change
+	 */
+	bool				  ln_nis_from_mod_params;
+
+	/*
+	 * waitq for router checker.  As long as there are no routes in
+	 * the list, the router checker will sleep on this queue.  when
+	 * routes are added the thread will wake up
+	 */
+	wait_queue_head_t		  ln_rc_waitq;
+
 } lnet_t;
 
 #endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
index bdd69b2..4b64f62 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetctl.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
@@ -10,10 +10,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * header for lnet ioctl
  */
 #ifndef _LNETCTL_H_
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index fd1e0fd..4170445 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -245,20 +245,20 @@
 	int		 lstio_ses_force;	/* IN: force create ? */
 	/** IN: session features */
 	unsigned	 lstio_ses_feats;
-	lst_sid_t	*lstio_ses_idp;		/* OUT: session id */
+	lst_sid_t __user *lstio_ses_idp;	/* OUT: session id */
 	int		 lstio_ses_nmlen;	/* IN: name length */
-	char		*lstio_ses_namep;	/* IN: session name */
+	char __user	 *lstio_ses_namep;	/* IN: session name */
 } lstio_session_new_args_t;
 
 /* query current session */
 typedef struct {
-	lst_sid_t		*lstio_ses_idp;		/* OUT: session id */
-	int			*lstio_ses_keyp;	/* OUT: local key */
+	lst_sid_t __user	*lstio_ses_idp;		/* OUT: session id */
+	int __user		*lstio_ses_keyp;	/* OUT: local key */
 	/** OUT: session features */
-	unsigned		*lstio_ses_featp;
-	lstcon_ndlist_ent_t	*lstio_ses_ndinfo;	/* OUT: */
+	unsigned __user		*lstio_ses_featp;
+	lstcon_ndlist_ent_t __user *lstio_ses_ndinfo;	/* OUT: */
 	int			 lstio_ses_nmlen;	/* IN: name length */
-	char			*lstio_ses_namep;	/* OUT: session name */
+	char __user		*lstio_ses_namep;	/* OUT: session name */
 } lstio_session_info_args_t;
 
 /* delete a session */
@@ -283,26 +283,26 @@
 	int			 lstio_dbg_timeout;	/* IN: timeout of
 							       debug */
 	int			 lstio_dbg_nmlen;	/* IN: len of name */
-	char			*lstio_dbg_namep;	/* IN: name of
+	char __user		*lstio_dbg_namep;	/* IN: name of
 							       group|batch */
 	int			 lstio_dbg_count;	/* IN: # of test nodes
 							       to debug */
-	lnet_process_id_t	*lstio_dbg_idsp;	/* IN: id of test
+	lnet_process_id_t __user *lstio_dbg_idsp;	/* IN: id of test
 							       nodes */
-	struct list_head	*lstio_dbg_resultp;	/* OUT: list head of
+	struct list_head __user	*lstio_dbg_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_debug_args_t;
 
 typedef struct {
-	int	 lstio_grp_key;		/* IN: session key */
-	int	 lstio_grp_nmlen;	/* IN: name length */
-	char	*lstio_grp_namep;	/* IN: group name */
+	int		 lstio_grp_key;		/* IN: session key */
+	int		 lstio_grp_nmlen;	/* IN: name length */
+	char __user	*lstio_grp_namep;	/* IN: group name */
 } lstio_group_add_args_t;
 
 typedef struct {
-	int	 lstio_grp_key;		/* IN: session key */
-	int	 lstio_grp_nmlen;	/* IN: name length */
-	char	*lstio_grp_namep;	/* IN: group name */
+	int		 lstio_grp_key;		/* IN: session key */
+	int		 lstio_grp_nmlen;	/* IN: name length */
+	char __user	*lstio_grp_namep;	/* IN: group name */
 } lstio_group_del_args_t;
 
 #define LST_GROUP_CLEAN		1	/* remove inactive nodes in the group */
@@ -315,22 +315,22 @@
 	int			 lstio_grp_opc;		/* IN: OPC */
 	int			 lstio_grp_args;	/* IN: arguments */
 	int			 lstio_grp_nmlen;	/* IN: name length */
-	char			*lstio_grp_namep;	/* IN: group name */
+	char __user		*lstio_grp_namep;	/* IN: group name */
 	int			 lstio_grp_count;	/* IN: # of nodes id */
-	lnet_process_id_t	*lstio_grp_idsp;	/* IN: array of nodes */
-	struct list_head	*lstio_grp_resultp;	/* OUT: list head of
+	lnet_process_id_t __user *lstio_grp_idsp;	/* IN: array of nodes */
+	struct list_head __user	*lstio_grp_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_group_update_args_t;
 
 typedef struct {
 	int			 lstio_grp_key;		/* IN: session key */
 	int			 lstio_grp_nmlen;	/* IN: name length */
-	char			*lstio_grp_namep;	/* IN: group name */
+	char __user		*lstio_grp_namep;	/* IN: group name */
 	int			 lstio_grp_count;	/* IN: # of nodes */
 	/** OUT: session features */
-	unsigned		*lstio_grp_featp;
-	lnet_process_id_t	*lstio_grp_idsp;	/* IN: nodes */
-	struct list_head	*lstio_grp_resultp;	/* OUT: list head of
+	unsigned __user		*lstio_grp_featp;
+	lnet_process_id_t __user *lstio_grp_idsp;	/* IN: nodes */
+	struct list_head __user	*lstio_grp_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_group_nodes_args_t;
 
@@ -338,18 +338,18 @@
 	int	 lstio_grp_key;		/* IN: session key */
 	int	 lstio_grp_idx;		/* IN: group idx */
 	int	 lstio_grp_nmlen;	/* IN: name len */
-	char	*lstio_grp_namep;	/* OUT: name */
+	char __user *lstio_grp_namep;	/* OUT: name */
 } lstio_group_list_args_t;
 
 typedef struct {
 	int			 lstio_grp_key;		/* IN: session key */
 	int			 lstio_grp_nmlen;	/* IN: name len */
-	char			*lstio_grp_namep;	/* IN: name */
-	lstcon_ndlist_ent_t	*lstio_grp_entp;	/* OUT: description of
+	char __user		*lstio_grp_namep;	/* IN: name */
+	lstcon_ndlist_ent_t __user *lstio_grp_entp;	/* OUT: description of
 								group */
-	int			*lstio_grp_idxp;	/* IN/OUT: node index */
-	int			*lstio_grp_ndentp;	/* IN/OUT: # of nodent */
-	lstcon_node_ent_t	*lstio_grp_dentsp;	/* OUT: nodent array */
+	int __user		*lstio_grp_idxp;	/* IN/OUT: node index */
+	int __user		*lstio_grp_ndentp;	/* IN/OUT: # of nodent */
+	lstcon_node_ent_t __user *lstio_grp_dentsp;	/* OUT: nodent array */
 } lstio_group_info_args_t;
 
 #define LST_DEFAULT_BATCH	"batch"			/* default batch name */
@@ -357,13 +357,13 @@
 typedef struct {
 	int	 lstio_bat_key;		/* IN: session key */
 	int	 lstio_bat_nmlen;	/* IN: name length */
-	char	*lstio_bat_namep;	/* IN: batch name */
+	char __user *lstio_bat_namep;	/* IN: batch name */
 } lstio_batch_add_args_t;
 
 typedef struct {
 	int	 lstio_bat_key;		/* IN: session key */
 	int	 lstio_bat_nmlen;	/* IN: name length */
-	char	*lstio_bat_namep;	/* IN: batch name */
+	char __user *lstio_bat_namep;	/* IN: batch name */
 } lstio_batch_del_args_t;
 
 typedef struct {
@@ -371,8 +371,8 @@
 	int			 lstio_bat_timeout;	/* IN: timeout for
 							       the batch */
 	int			 lstio_bat_nmlen;	/* IN: name length */
-	char			*lstio_bat_namep;	/* IN: batch name */
-	struct list_head	*lstio_bat_resultp;	/* OUT: list head of
+	char __user		*lstio_bat_namep;	/* IN: batch name */
+	struct list_head __user	*lstio_bat_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_batch_run_args_t;
 
@@ -381,8 +381,8 @@
 	int			 lstio_bat_force;	/* IN: abort unfinished
 							       test RPC */
 	int			 lstio_bat_nmlen;	/* IN: name length */
-	char			*lstio_bat_namep;	/* IN: batch name */
-	struct list_head	*lstio_bat_resultp;	/* OUT: list head of
+	char __user		*lstio_bat_namep;	/* IN: batch name */
+	struct list_head __user	*lstio_bat_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_batch_stop_args_t;
 
@@ -394,8 +394,8 @@
 	int			 lstio_bat_timeout;	/* IN: timeout for
 							       waiting */
 	int			 lstio_bat_nmlen;	/* IN: name length */
-	char			*lstio_bat_namep;	/* IN: batch name */
-	struct list_head	*lstio_bat_resultp;	/* OUT: list head of
+	char __user		*lstio_bat_namep;	/* IN: batch name */
+	struct list_head __user	*lstio_bat_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_batch_query_args_t;
 
@@ -403,21 +403,21 @@
 	int	 lstio_bat_key;		/* IN: session key */
 	int	 lstio_bat_idx;		/* IN: index */
 	int	 lstio_bat_nmlen;	/* IN: name length */
-	char	*lstio_bat_namep;	/* IN: batch name */
+	char __user *lstio_bat_namep;	/* IN: batch name */
 } lstio_batch_list_args_t;
 
 typedef struct {
 	int			 lstio_bat_key;		/* IN: session key */
 	int			 lstio_bat_nmlen;	/* IN: name length */
-	char			*lstio_bat_namep;	/* IN: name */
+	char __user		*lstio_bat_namep;	/* IN: name */
 	int			 lstio_bat_server;	/* IN: query server
 							       or not */
 	int			 lstio_bat_testidx;	/* IN: test index */
-	lstcon_test_batch_ent_t	*lstio_bat_entp;	/* OUT: batch ent */
+	lstcon_test_batch_ent_t __user *lstio_bat_entp;	/* OUT: batch ent */
 
-	int			*lstio_bat_idxp;	/* IN/OUT: index of node */
-	int			*lstio_bat_ndentp;	/* IN/OUT: # of nodent */
-	lstcon_node_ent_t	*lstio_bat_dentsp;	/* array of nodent */
+	int __user		*lstio_bat_idxp;	/* IN/OUT: index of node */
+	int __user		*lstio_bat_ndentp;	/* IN/OUT: # of nodent */
+	lstcon_node_ent_t __user *lstio_bat_dentsp;	/* array of nodent */
 } lstio_batch_info_args_t;
 
 /* add stat in session */
@@ -427,10 +427,10 @@
 							       stat request */
 	int			 lstio_sta_nmlen;	/* IN: group name
 							       length */
-	char			*lstio_sta_namep;	/* IN: group name */
+	char __user		*lstio_sta_namep;	/* IN: group name */
 	int			 lstio_sta_count;	/* IN: # of pid */
-	lnet_process_id_t	*lstio_sta_idsp;	/* IN: pid */
-	struct list_head	*lstio_sta_resultp;	/* OUT: list head of
+	lnet_process_id_t __user *lstio_sta_idsp;	/* IN: pid */
+	struct list_head __user	*lstio_sta_resultp;	/* OUT: list head of
 								result buffer */
 } lstio_stat_args_t;
 
@@ -445,7 +445,7 @@
 typedef struct {
 	int		  lstio_tes_key;	/* IN: session key */
 	int		  lstio_tes_bat_nmlen;	/* IN: batch name len */
-	char		 *lstio_tes_bat_name;	/* IN: batch name */
+	char __user	 *lstio_tes_bat_name;	/* IN: batch name */
 	int		  lstio_tes_type;	/* IN: test type */
 	int		  lstio_tes_oneside;	/* IN: one sided test */
 	int		  lstio_tes_loop;	/* IN: loop count */
@@ -457,20 +457,20 @@
 						       destination groups */
 	int		  lstio_tes_sgrp_nmlen;	/* IN: source group
 						       name length */
-	char		 *lstio_tes_sgrp_name;	/* IN: group name */
+	char __user	 *lstio_tes_sgrp_name;	/* IN: group name */
 	int		  lstio_tes_dgrp_nmlen;	/* IN: destination group
 						       name length */
-	char		 *lstio_tes_dgrp_name;	/* IN: group name */
+	char __user	 *lstio_tes_dgrp_name;	/* IN: group name */
 
 	int		  lstio_tes_param_len;	/* IN: param buffer len */
-	void		 *lstio_tes_param;	/* IN: parameter for specified
+	void __user	 *lstio_tes_param;	/* IN: parameter for specified
 						       test:
 						       lstio_bulk_param_t,
 						       lstio_ping_param_t,
 						       ... more */
-	int		 *lstio_tes_retp;	/* OUT: private returned
+	int __user	 *lstio_tes_retp;	/* OUT: private returned
 							value */
-	struct list_head *lstio_tes_resultp;	/* OUT: list head of
+	struct list_head __user *lstio_tes_resultp;/* OUT: list head of
 							result buffer */
 } lstio_test_args_t;
 
diff --git a/drivers/staging/lustre/include/linux/lnet/nidstr.h b/drivers/staging/lustre/include/linux/lnet/nidstr.h
index 4fc9ddc..937fcc9 100644
--- a/drivers/staging/lustre/include/linux/lnet/nidstr.h
+++ b/drivers/staging/lustre/include/linux/lnet/nidstr.h
@@ -34,8 +34,10 @@
  *  Lustre Network Driver types.
  */
 enum {
-	/* Only add to these values (i.e. don't ever change or redefine them):
-	 * network addresses depend on them... */
+	/*
+	 * Only add to these values (i.e. don't ever change or redefine them):
+	 * network addresses depend on them...
+	 */
 	QSWLND		= 1,
 	SOCKLND		= 2,
 	GMLND		= 3,
@@ -67,6 +69,7 @@
 	return libcfs_lnd2str_r(lnd, libcfs_next_nidstring(),
 				LNET_NIDSTR_SIZE);
 }
+
 int libcfs_str2lnd(const char *str);
 char *libcfs_net2str_r(__u32 net, char *buf, size_t buf_size);
 static inline char *libcfs_net2str(__u32 net)
@@ -74,12 +77,14 @@
 	return libcfs_net2str_r(net, libcfs_next_nidstring(),
 				LNET_NIDSTR_SIZE);
 }
+
 char *libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size);
 static inline char *libcfs_nid2str(lnet_nid_t nid)
 {
 	return libcfs_nid2str_r(nid, libcfs_next_nidstring(),
 				LNET_NIDSTR_SIZE);
 }
+
 __u32 libcfs_str2net(const char *str);
 lnet_nid_t libcfs_str2nid(const char *str);
 int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
index 599c9f6..bc32403 100644
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -85,14 +85,17 @@
 {
 	msg->ksm_csum = 0;
 	msg->ksm_type = type;
-	msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1]  = 0;
+	msg->ksm_zc_cookies[0] = 0;
+	msg->ksm_zc_cookies[1] = 0;
 }
 
 #define KSOCK_MSG_NOOP	0xC0	/* ksm_u empty */
 #define KSOCK_MSG_LNET	0xC1	/* lnet msg */
 
-/* We need to know this number to parse hello msg from ksocklnd in
- * other LND (usocklnd, for example) */
+/*
+ * We need to know this number to parse hello msg from ksocklnd in
+ * other LND (usocklnd, for example)
+ */
 #define KSOCK_PROTO_V2	2
 #define KSOCK_PROTO_V3	3
 
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 1163018..81d01f1 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -36,10 +36,12 @@
 #include <linux/types.h>
 
 /** \addtogroup lnet
- * @{ */
+ * @{
+ */
 
 /** \addtogroup lnet_addr
- * @{ */
+ * @{
+ */
 
 /** Portal reserved for LNet's own use.
  * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
@@ -116,10 +118,12 @@
 	lnet_pid_t	pid;
 } WIRE_ATTR lnet_process_id_packed_t;
 
-/* The wire handle's interface cookie only matches one network interface in
+/*
+ * The wire handle's interface cookie only matches one network interface in
  * one epoch (i.e. new cookie when the interface restarts or the node
  * reboots).  The object cookie only matches one object on that interface
- * during that object's lifetime (i.e. no cookie re-use). */
+ * during that object's lifetime (i.e. no cookie re-use).
+ */
 typedef struct {
 	__u64	wh_interface_cookie;
 	__u64	wh_object_cookie;
@@ -133,10 +137,12 @@
 	LNET_MSG_HELLO,
 } lnet_msg_type_t;
 
-/* The variant fields of the portals message header are aligned on an 8
+/*
+ * The variant fields of the portals message header are aligned on an 8
  * byte boundary in the message header.  Note that all types used in these
  * wire structs MUST be fixed size and the smaller types are placed at the
- * end. */
+ * end.
+ */
 typedef struct lnet_ack {
 	lnet_handle_wire_t	dst_wmd;
 	__u64			match_bits;
@@ -185,7 +191,8 @@
 	} msg;
 } WIRE_ATTR lnet_hdr_t;
 
-/* A HELLO message contains a magic number and protocol version
+/*
+ * A HELLO message contains a magic number and protocol version
  * code in the header's dest_nid, the peer's NID in the src_nid, and
  * LNET_MSG_HELLO in the type field.  All other common fields are zero
  * (including payload_size; i.e. no payload).
@@ -208,8 +215,10 @@
 #define LNET_PROTO_PING_MAGIC		0x70696E67 /* 'ping' */
 
 /* Placeholder for a future "unified" protocol across all LNDs */
-/* Current LNDs that receive a request with this magic will respond with a
- * "stub" reply using their current protocol */
+/*
+ * Current LNDs that receive a request with this magic will respond with a
+ * "stub" reply using their current protocol
+ */
 #define LNET_PROTO_MAGIC		0x45726963 /* ! */
 
 #define LNET_PROTO_TCP_VERSION_MAJOR	1
@@ -258,7 +267,7 @@
 
 #define LNET_MAX_INTERFACES    16
 
-/*
+/**
  * Objects maintained by the LNet are accessed through handles. Handle types
  * have names of the form lnet_handle_xx_t, where xx is one of the two letter
  * object type codes ('eq' for event queue, 'md' for memory descriptor, and
@@ -318,7 +327,8 @@
 /** @} lnet_addr */
 
 /** \addtogroup lnet_me
- * @{ */
+ * @{
+ */
 
 /**
  * Specifies whether the match entry or memory descriptor should be unlinked
@@ -348,7 +358,8 @@
 /** @} lnet_me */
 
 /** \addtogroup lnet_md
- * @{ */
+ * @{
+ */
 
 /**
  * Defines the visible parts of a memory descriptor. Values of this type
@@ -450,9 +461,11 @@
 	lnet_handle_eq_t eq_handle;
 } lnet_md_t;
 
-/* Max Transfer Unit (minimum supported everywhere).
+/*
+ * Max Transfer Unit (minimum supported everywhere).
  * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
- * these limits are system wide and not interface-local. */
+ * these limits are system wide and not interface-local.
+ */
 #define LNET_MTU_BITS	20
 #define LNET_MTU	(1 << LNET_MTU_BITS)
 
@@ -506,7 +519,8 @@
 /** @} lnet_md */
 
 /** \addtogroup lnet_eq
- * @{ */
+ * @{
+ */
 
 /**
  * Six types of events can be logged in an event queue.
@@ -640,7 +654,8 @@
 /** @} lnet_eq */
 
 /** \addtogroup lnet_data
- * @{ */
+ * @{
+ */
 
 /**
  * Specify whether an acknowledgment should be sent by target when the PUT
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index cb74ae7..98570b3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -63,7 +63,7 @@
 		sum = ((sum << 1) | (sum >> 31)) + *c++;
 
 	/* ensure I don't return 0 (== no checksum) */
-	return (sum == 0) ? 1 : sum;
+	return !sum ? 1 : sum;
 }
 
 static char *kiblnd_msgtype2str(int type)
@@ -145,7 +145,7 @@
 	int i;
 
 	LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
-		 msg->ibm_type == IBLND_MSG_PUT_ACK);
+		msg->ibm_type == IBLND_MSG_PUT_ACK);
 
 	rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
 			      &msg->ibm_u.get.ibgm_rd :
@@ -189,8 +189,10 @@
 {
 	kib_net_t *net = ni->ni_data;
 
-	/* CAVEAT EMPTOR! all message fields not set here should have been
-	 * initialised previously. */
+	/*
+	 * CAVEAT EMPTOR! all message fields not set here should have been
+	 * initialised previously.
+	 */
 	msg->ibm_magic    = IBLND_MSG_MAGIC;
 	msg->ibm_version  = version;
 	/*   ibm_type */
@@ -249,11 +251,13 @@
 		return -EPROTO;
 	}
 
-	/* checksum must be computed with ibm_cksum zero and BEFORE anything
-	 * gets flipped */
+	/*
+	 * checksum must be computed with ibm_cksum zero and BEFORE anything
+	 * gets flipped
+	 */
 	msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
 	msg->ibm_cksum = 0;
-	if (msg_cksum != 0 &&
+	if (msg_cksum &&
 	    msg_cksum != kiblnd_cksum(msg, msg_nob)) {
 		CERROR("Bad checksum\n");
 		return -EPROTO;
@@ -326,17 +330,15 @@
 	int cpt = lnet_cpt_of_nid(nid);
 	unsigned long flags;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 	LASSERT(nid != LNET_NID_ANY);
 
 	LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
-	if (peer == NULL) {
+	if (!peer) {
 		CERROR("Cannot allocate peer\n");
 		return -ENOMEM;
 	}
 
-	memset(peer, 0, sizeof(*peer));	 /* zero flags etc */
-
 	peer->ibp_ni = ni;
 	peer->ibp_nid = nid;
 	peer->ibp_error = 0;
@@ -350,7 +352,7 @@
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	/* always called with a ref on ni, which prevents ni being shutdown */
-	LASSERT(net->ibn_shutdown == 0);
+	LASSERT(!net->ibn_shutdown);
 
 	/* npeers only grows with the global lock held */
 	atomic_inc(&net->ibn_npeers);
@@ -365,33 +367,36 @@
 {
 	kib_net_t *net = peer->ibp_ni->ni_data;
 
-	LASSERT(net != NULL);
-	LASSERT(atomic_read(&peer->ibp_refcount) == 0);
+	LASSERT(net);
+	LASSERT(!atomic_read(&peer->ibp_refcount));
 	LASSERT(!kiblnd_peer_active(peer));
-	LASSERT(peer->ibp_connecting == 0);
-	LASSERT(peer->ibp_accepting == 0);
+	LASSERT(!peer->ibp_connecting);
+	LASSERT(!peer->ibp_accepting);
 	LASSERT(list_empty(&peer->ibp_conns));
 	LASSERT(list_empty(&peer->ibp_tx_queue));
 
 	LIBCFS_FREE(peer, sizeof(*peer));
 
-	/* NB a peer's connections keep a reference on their peer until
+	/*
+	 * NB a peer's connections keep a reference on their peer until
 	 * they are destroyed, so we can be assured that _all_ state to do
 	 * with this peer has been cleaned up when its refcount drops to
-	 * zero. */
+	 * zero.
+	 */
 	atomic_dec(&net->ibn_npeers);
 }
 
 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
 {
-	/* the caller is responsible for accounting the additional reference
-	 * that this creates */
+	/*
+	 * the caller is responsible for accounting the additional reference
+	 * that this creates
+	 */
 	struct list_head *peer_list = kiblnd_nid2peerlist(nid);
 	struct list_head *tmp;
 	kib_peer_t *peer;
 
 	list_for_each(tmp, peer_list) {
-
 		peer = list_entry(tmp, kib_peer_t, ibp_list);
 
 		LASSERT(peer->ibp_connecting > 0 || /* creating conns */
@@ -431,13 +436,11 @@
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
-
 		list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
 			LASSERT(peer->ibp_connecting > 0 ||
-				 peer->ibp_accepting > 0 ||
-				 !list_empty(&peer->ibp_conns));
+				peer->ibp_accepting > 0 ||
+				!list_empty(&peer->ibp_conns));
 
 			if (peer->ibp_ni != ni)
 				continue;
@@ -474,8 +477,10 @@
 		}
 		/* NB closing peer's last conn unlinked it. */
 	}
-	/* NB peer now unlinked; might even be freed if the peer table had the
-	 * last ref on it. */
+	/*
+	 * NB peer now unlinked; might even be freed if the peer table had the
+	 * last ref on it.
+	 */
 }
 
 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
@@ -493,7 +498,8 @@
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	if (nid != LNET_NID_ANY) {
-		lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+		lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+		hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
 	} else {
 		lo = 0;
 		hi = kiblnd_data.kib_peer_hash_size - 1;
@@ -503,8 +509,8 @@
 		list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
 			LASSERT(peer->ibp_connecting > 0 ||
-				 peer->ibp_accepting > 0 ||
-				 !list_empty(&peer->ibp_conns));
+				peer->ibp_accepting > 0 ||
+				!list_empty(&peer->ibp_conns));
 
 			if (peer->ibp_ni != ni)
 				continue;
@@ -516,7 +522,7 @@
 				LASSERT(list_empty(&peer->ibp_conns));
 
 				list_splice_init(&peer->ibp_tx_queue,
-						     &zombies);
+						 &zombies);
 			}
 
 			kiblnd_del_peer_locked(peer);
@@ -544,11 +550,10 @@
 
 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
 		list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
 			LASSERT(peer->ibp_connecting > 0 ||
-				 peer->ibp_accepting > 0 ||
-				 !list_empty(&peer->ibp_conns));
+				peer->ibp_accepting > 0 ||
+				!list_empty(&peer->ibp_conns));
 
 			if (peer->ibp_ni != ni)
 				continue;
@@ -558,7 +563,7 @@
 					continue;
 
 				conn = list_entry(ctmp, kib_conn_t,
-						      ibc_list);
+						  ibc_list);
 				kiblnd_conn_addref(conn);
 				read_unlock_irqrestore(
 					&kiblnd_data.kib_global_lock,
@@ -597,12 +602,12 @@
 	int mtu;
 
 	/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
-	if (cmid->route.path_rec == NULL)
+	if (!cmid->route.path_rec)
 		return;
 
 	mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
 	LASSERT(mtu >= 0);
-	if (mtu != 0)
+	if (mtu)
 		cmid->route.path_rec->mtu = mtu;
 }
 
@@ -619,13 +624,13 @@
 		return 0;
 
 	mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
-	if (mask == NULL)
+	if (!mask)
 		return 0;
 
 	/* hash NID to CPU id in this partition... */
 	off = do_div(nid, cpumask_weight(mask));
 	for_each_cpu(i, mask) {
-		if (off-- == 0)
+		if (!off--)
 			return i % vectors;
 	}
 
@@ -634,15 +639,17 @@
 }
 
 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
-				int state, int version)
+			       int state, int version)
 {
-	/* CAVEAT EMPTOR:
+	/*
+	 * CAVEAT EMPTOR:
 	 * If the new conn is created successfully it takes over the caller's
 	 * ref on 'peer'.  It also "owns" 'cmid' and destroys it when it itself
 	 * is destroyed.  On failure, the caller's ref on 'peer' remains and
 	 * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
 	 * to destroy 'cmid' here since I'm called from the CM which still has
-	 * its ref on 'cmid'). */
+	 * its ref on 'cmid').
+	 */
 	rwlock_t *glock = &kiblnd_data.kib_global_lock;
 	kib_net_t *net = peer->ibp_ni->ni_data;
 	kib_dev_t *dev;
@@ -656,7 +663,7 @@
 	int rc;
 	int i;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 	LASSERT(!in_interrupt());
 
 	dev = net->ibn_dev;
@@ -668,14 +675,14 @@
 
 	LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
 			 sizeof(*init_qp_attr));
-	if (init_qp_attr == NULL) {
+	if (!init_qp_attr) {
 		CERROR("Can't allocate qp_attr for %s\n",
 		       libcfs_nid2str(peer->ibp_nid));
 		goto failed_0;
 	}
 
 	LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
-	if (conn == NULL) {
+	if (!conn) {
 		CERROR("Can't allocate connection for %s\n",
 		       libcfs_nid2str(peer->ibp_nid));
 		goto failed_1;
@@ -697,7 +704,7 @@
 
 	LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
 			 sizeof(*conn->ibc_connvars));
-	if (conn->ibc_connvars == NULL) {
+	if (!conn->ibc_connvars) {
 		CERROR("Can't allocate in-progress connection state\n");
 		goto failed_2;
 	}
@@ -732,14 +739,14 @@
 
 	LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
 			 IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
-	if (conn->ibc_rxs == NULL) {
+	if (!conn->ibc_rxs) {
 		CERROR("Cannot allocate RX buffers\n");
 		goto failed_2;
 	}
 
 	rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
 				IBLND_RX_MSG_PAGES(version));
-	if (rc != 0)
+	if (rc)
 		goto failed_2;
 
 	kiblnd_map_rx_descs(conn);
@@ -758,7 +765,7 @@
 	conn->ibc_cq = cq;
 
 	rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't request completion notificiation: %d\n", rc);
 		goto failed_2;
 	}
@@ -777,7 +784,7 @@
 	conn->ibc_sched = sched;
 
 	rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
 		       rc, init_qp_attr->cap.max_send_wr,
 		       init_qp_attr->cap.max_recv_wr);
@@ -794,21 +801,25 @@
 	for (i = 0; i < IBLND_RX_MSGS(version); i++) {
 		rc = kiblnd_post_rx(&conn->ibc_rxs[i],
 				    IBLND_POSTRX_NO_CREDIT);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't post rxmsg: %d\n", rc);
 
 			/* Make posted receives complete */
 			kiblnd_abort_receives(conn);
 
-			/* correct # of posted buffers
-			 * NB locking needed now I'm racing with completion */
+			/*
+			 * correct # of posted buffers
+			 * NB locking needed now I'm racing with completion
+			 */
 			spin_lock_irqsave(&sched->ibs_lock, flags);
 			conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
 			spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-			/* cmid will be destroyed by CM(ofed) after cm_callback
+			/*
+			 * cmid will be destroyed by CM(ofed) after cm_callback
 			 * returned, so we can't refer it anymore
-			 * (by kiblnd_connd()->kiblnd_destroy_conn) */
+			 * (by kiblnd_connd()->kiblnd_destroy_conn)
+			 */
 			rdma_destroy_qp(conn->ibc_cmid);
 			conn->ibc_cmid = NULL;
 
@@ -822,7 +833,7 @@
 
 	/* Init successful! */
 	LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
-		 state == IBLND_CONN_PASSIVE_WAIT);
+		state == IBLND_CONN_PASSIVE_WAIT);
 	conn->ibc_state = state;
 
 	/* 1 more conn */
@@ -844,15 +855,15 @@
 	int rc;
 
 	LASSERT(!in_interrupt());
-	LASSERT(atomic_read(&conn->ibc_refcount) == 0);
+	LASSERT(!atomic_read(&conn->ibc_refcount));
 	LASSERT(list_empty(&conn->ibc_early_rxs));
 	LASSERT(list_empty(&conn->ibc_tx_noops));
 	LASSERT(list_empty(&conn->ibc_tx_queue));
 	LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
 	LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
 	LASSERT(list_empty(&conn->ibc_active_txs));
-	LASSERT(conn->ibc_noops_posted == 0);
-	LASSERT(conn->ibc_nsends_posted == 0);
+	LASSERT(!conn->ibc_noops_posted);
+	LASSERT(!conn->ibc_nsends_posted);
 
 	switch (conn->ibc_state) {
 	default:
@@ -861,7 +872,7 @@
 
 	case IBLND_CONN_DISCONNECTED:
 		/* connvars should have been freed already */
-		LASSERT(conn->ibc_connvars == NULL);
+		LASSERT(!conn->ibc_connvars);
 		break;
 
 	case IBLND_CONN_INIT:
@@ -869,28 +880,28 @@
 	}
 
 	/* conn->ibc_cmid might be destroyed by CM already */
-	if (cmid != NULL && cmid->qp != NULL)
+	if (cmid && cmid->qp)
 		rdma_destroy_qp(cmid);
 
-	if (conn->ibc_cq != NULL) {
+	if (conn->ibc_cq) {
 		rc = ib_destroy_cq(conn->ibc_cq);
-		if (rc != 0)
+		if (rc)
 			CWARN("Error destroying CQ: %d\n", rc);
 	}
 
-	if (conn->ibc_rx_pages != NULL)
+	if (conn->ibc_rx_pages)
 		kiblnd_unmap_rx_descs(conn);
 
-	if (conn->ibc_rxs != NULL) {
+	if (conn->ibc_rxs) {
 		LIBCFS_FREE(conn->ibc_rxs,
 			    IBLND_RX_MSGS(conn->ibc_version)
 			      * sizeof(kib_rx_t));
 	}
 
-	if (conn->ibc_connvars != NULL)
+	if (conn->ibc_connvars)
 		LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
 
-	if (conn->ibc_hdev != NULL)
+	if (conn->ibc_hdev)
 		kiblnd_hdev_decref(conn->ibc_hdev);
 
 	/* See CAVEAT EMPTOR above in kiblnd_create_conn */
@@ -927,7 +938,7 @@
 }
 
 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
-				     int version, __u64 incarnation)
+				    int version, __u64 incarnation)
 {
 	kib_conn_t *conn;
 	struct list_head *ctmp;
@@ -967,20 +978,20 @@
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-	if (nid != LNET_NID_ANY)
-		lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
-	else {
+	if (nid != LNET_NID_ANY) {
+		lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+		hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+	} else {
 		lo = 0;
 		hi = kiblnd_data.kib_peer_hash_size - 1;
 	}
 
 	for (i = lo; i <= hi; i++) {
 		list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-
 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
 			LASSERT(peer->ibp_connecting > 0 ||
-				 peer->ibp_accepting > 0 ||
-				 !list_empty(&peer->ibp_conns));
+				peer->ibp_accepting > 0 ||
+				!list_empty(&peer->ibp_conns));
 
 			if (peer->ibp_ni != ni)
 				continue;
@@ -998,7 +1009,7 @@
 	if (nid == LNET_NID_ANY)
 		return 0;
 
-	return (count == 0) ? -ENOENT : 0;
+	return !count ? -ENOENT : 0;
 }
 
 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
@@ -1027,14 +1038,14 @@
 
 		rc = 0;
 		conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
-		if (conn == NULL) {
+		if (!conn) {
 			rc = -ENOENT;
 			break;
 		}
 
-		LASSERT(conn->ibc_cmid != NULL);
+		LASSERT(conn->ibc_cmid);
 		data->ioc_nid = conn->ibc_peer->ibp_nid;
-		if (conn->ibc_cmid->route.path_rec == NULL)
+		if (!conn->ibc_cmid->route.path_rec)
 			data->ioc_u32[0] = 0; /* iWarp has no path MTU */
 		else
 			data->ioc_u32[0] =
@@ -1065,7 +1076,7 @@
 	read_lock_irqsave(glock, flags);
 
 	peer = kiblnd_find_peer_locked(nid);
-	if (peer != NULL) {
+	if (peer) {
 		LASSERT(peer->ibp_connecting > 0 || /* creating conns */
 			 peer->ibp_accepting > 0 ||
 			 !list_empty(&peer->ibp_conns));  /* active conn */
@@ -1074,12 +1085,14 @@
 
 	read_unlock_irqrestore(glock, flags);
 
-	if (last_alive != 0)
+	if (last_alive)
 		*when = last_alive;
 
-	/* peer is not persistent in hash, trigger peer creation
-	 * and connection establishment with a NULL tx */
-	if (peer == NULL)
+	/*
+	 * peer is not persistent in hash, trigger peer creation
+	 * and connection establishment with a NULL tx
+	 */
+	if (!peer)
 		kiblnd_launch_tx(ni, NULL, nid);
 
 	CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
@@ -1093,7 +1106,7 @@
 	int i;
 
 	for (i = 0; i < npages; i++) {
-		if (p->ibp_pages[i] != NULL)
+		if (p->ibp_pages[i])
 			__free_page(p->ibp_pages[i]);
 	}
 
@@ -1107,7 +1120,7 @@
 
 	LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
 			 offsetof(kib_pages_t, ibp_pages[npages]));
-	if (p == NULL) {
+	if (!p) {
 		CERROR("Can't allocate descriptor for %d pages\n", npages);
 		return -ENOMEM;
 	}
@@ -1119,7 +1132,7 @@
 		p->ibp_pages[i] = alloc_pages_node(
 				    cfs_cpt_spread_node(lnet_cpt_table(), cpt),
 				    GFP_NOFS, 0);
-		if (p->ibp_pages[i] == NULL) {
+		if (!p->ibp_pages[i]) {
 			CERROR("Can't allocate page %d of %d\n", i, npages);
 			kiblnd_free_pages(p);
 			return -ENOMEM;
@@ -1135,8 +1148,8 @@
 	kib_rx_t *rx;
 	int i;
 
-	LASSERT(conn->ibc_rxs != NULL);
-	LASSERT(conn->ibc_hdev != NULL);
+	LASSERT(conn->ibc_rxs);
+	LASSERT(conn->ibc_hdev);
 
 	for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
 		rx = &conn->ibc_rxs[i];
@@ -1174,7 +1187,7 @@
 						       IBLND_MSG_SIZE,
 						       DMA_FROM_DEVICE);
 		LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
-						   rx->rx_msgaddr));
+						  rx->rx_msgaddr));
 		KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
 
 		CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
@@ -1198,9 +1211,9 @@
 	kib_tx_t *tx;
 	int i;
 
-	LASSERT(tpo->tpo_pool.po_allocated == 0);
+	LASSERT(!tpo->tpo_pool.po_allocated);
 
-	if (hdev == NULL)
+	if (!hdev)
 		return;
 
 	for (i = 0; i < tpo->tpo_pool.po_size; i++) {
@@ -1224,7 +1237,7 @@
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 	while (dev->ibd_failover) {
 		read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-		if (i++ % 50 == 0)
+		if (!(i++ % 50))
 			CDEBUG(D_NET, "%s: Wait for failover\n",
 			       dev->ibd_ifname);
 		schedule_timeout(cfs_time_seconds(1) / 100);
@@ -1252,7 +1265,7 @@
 	int ipage;
 	int i;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 
 	dev = net->ibn_dev;
 
@@ -1260,7 +1273,7 @@
 	CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
 
 	/* No fancy arithmetic when we do the buffer calculations */
-	CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
+	CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE));
 
 	tpo->tpo_hdev = kiblnd_current_hdev(dev);
 
@@ -1275,7 +1288,7 @@
 			tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
 			IBLND_MSG_SIZE, DMA_TO_DEVICE);
 		LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
-						   tx->tx_msgaddr));
+						  tx->tx_msgaddr));
 		KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
 
 		list_add(&tx->tx_list, &pool->po_free_list);
@@ -1295,7 +1308,7 @@
 {
 	__u64 index;
 
-	LASSERT(hdev->ibh_mrs[0] != NULL);
+	LASSERT(hdev->ibh_mrs[0]);
 
 	if (hdev->ibh_nmrs == 1)
 		return hdev->ibh_mrs[0];
@@ -1315,7 +1328,7 @@
 	struct ib_mr *mr;
 	int i;
 
-	LASSERT(hdev->ibh_mrs[0] != NULL);
+	LASSERT(hdev->ibh_mrs[0]);
 
 	if (*kiblnd_tunables.kib_map_on_demand > 0 &&
 	    *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
@@ -1329,10 +1342,10 @@
 		mr = kiblnd_find_dma_mr(hdev,
 					rd->rd_frags[i].rf_addr,
 					rd->rd_frags[i].rf_nob);
-		if (prev_mr == NULL)
+		if (!prev_mr)
 			prev_mr = mr;
 
-		if (mr == NULL || prev_mr != mr) {
+		if (!mr || prev_mr != mr) {
 			/* Can't covered by one single MR */
 			mr = NULL;
 			break;
@@ -1344,15 +1357,15 @@
 
 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
 {
-	LASSERT(pool->fpo_map_count == 0);
+	LASSERT(!pool->fpo_map_count);
 
-	if (pool->fpo_fmr_pool != NULL)
+	if (pool->fpo_fmr_pool)
 		ib_destroy_fmr_pool(pool->fpo_fmr_pool);
 
-	if (pool->fpo_hdev != NULL)
+	if (pool->fpo_hdev)
 		kiblnd_hdev_decref(pool->fpo_hdev);
 
-	LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+	LIBCFS_FREE(pool, sizeof(*pool));
 }
 
 static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
@@ -1387,7 +1400,7 @@
 	kib_dev_t *dev = fps->fps_net->ibn_dev;
 	kib_fmr_pool_t *fpo;
 	struct ib_fmr_pool_param param = {
-		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+		.max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
 		.page_shift        = PAGE_SHIFT,
 		.access            = (IB_ACCESS_LOCAL_WRITE |
 				      IB_ACCESS_REMOTE_WRITE),
@@ -1399,7 +1412,7 @@
 	int rc;
 
 	LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
-	if (fpo == NULL)
+	if (!fpo)
 		return -ENOMEM;
 
 	fpo->fpo_hdev = kiblnd_current_hdev(dev);
@@ -1410,7 +1423,7 @@
 		CERROR("Failed to create FMR pool: %d\n", rc);
 
 		kiblnd_hdev_decref(fpo->fpo_hdev);
-		LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
+		LIBCFS_FREE(fpo, sizeof(*fpo));
 		return rc;
 	}
 
@@ -1424,7 +1437,7 @@
 static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
 				    struct list_head *zombies)
 {
-	if (fps->fps_net == NULL) /* intialized? */
+	if (!fps->fps_net) /* intialized? */
 		return;
 
 	spin_lock(&fps->fps_lock);
@@ -1434,7 +1447,7 @@
 						 kib_fmr_pool_t, fpo_list);
 		fpo->fpo_failed = 1;
 		list_del(&fpo->fpo_list);
-		if (fpo->fpo_map_count == 0)
+		if (!fpo->fpo_map_count)
 			list_add(&fpo->fpo_list, zombies);
 		else
 			list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
@@ -1445,7 +1458,7 @@
 
 static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
 {
-	if (fps->fps_net != NULL) { /* initialized? */
+	if (fps->fps_net) { /* initialized? */
 		kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
 		kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
 	}
@@ -1458,7 +1471,7 @@
 	kib_fmr_pool_t *fpo;
 	int rc;
 
-	memset(fps, 0, sizeof(kib_fmr_poolset_t));
+	memset(fps, 0, sizeof(*fps));
 
 	fps->fps_net = net;
 	fps->fps_cpt = cpt;
@@ -1469,7 +1482,7 @@
 	INIT_LIST_HEAD(&fps->fps_failed_pool_list);
 
 	rc = kiblnd_create_fmr_pool(fps, &fpo);
-	if (rc == 0)
+	if (!rc)
 		list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
 
 	return rc;
@@ -1477,7 +1490,7 @@
 
 static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
 {
-	if (fpo->fpo_map_count != 0) /* still in use */
+	if (fpo->fpo_map_count) /* still in use */
 		return 0;
 	if (fpo->fpo_failed)
 		return 1;
@@ -1494,11 +1507,11 @@
 	int rc;
 
 	rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
-	if (status != 0) {
+	if (status) {
 		rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
-		LASSERT(rc == 0);
+		LASSERT(!rc);
 	}
 
 	fmr->fmr_pool = NULL;
@@ -1563,11 +1576,9 @@
 
 	if (fps->fps_increasing) {
 		spin_unlock(&fps->fps_lock);
-		CDEBUG(D_NET,
-			"Another thread is allocating new FMR pool, waiting for her to complete\n");
+		CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
 		schedule();
 		goto again;
-
 	}
 
 	if (time_before(cfs_time_current(), fps->fps_next_retry)) {
@@ -1583,7 +1594,7 @@
 	rc = kiblnd_create_fmr_pool(fps, &fpo);
 	spin_lock(&fps->fps_lock);
 	fps->fps_increasing = 0;
-	if (rc == 0) {
+	if (!rc) {
 		fps->fps_version++;
 		list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
 	} else {
@@ -1597,7 +1608,7 @@
 static void kiblnd_fini_pool(kib_pool_t *pool)
 {
 	LASSERT(list_empty(&pool->po_free_list));
-	LASSERT(pool->po_allocated == 0);
+	LASSERT(!pool->po_allocated);
 
 	CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
 }
@@ -1606,7 +1617,7 @@
 {
 	CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
 
-	memset(pool, 0, sizeof(kib_pool_t));
+	memset(pool, 0, sizeof(*pool));
 	INIT_LIST_HEAD(&pool->po_free_list);
 	pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
 	pool->po_owner    = ps;
@@ -1621,14 +1632,14 @@
 		pool = list_entry(head->next, kib_pool_t, po_list);
 		list_del(&pool->po_list);
 
-		LASSERT(pool->po_owner != NULL);
+		LASSERT(pool->po_owner);
 		pool->po_owner->ps_pool_destroy(pool);
 	}
 }
 
 static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
 {
-	if (ps->ps_net == NULL) /* intialized? */
+	if (!ps->ps_net) /* intialized? */
 		return;
 
 	spin_lock(&ps->ps_lock);
@@ -1637,7 +1648,7 @@
 					    kib_pool_t, po_list);
 		po->po_failed = 1;
 		list_del(&po->po_list);
-		if (po->po_allocated == 0)
+		if (!po->po_allocated)
 			list_add(&po->po_list, zombies);
 		else
 			list_add(&po->po_list, &ps->ps_failed_pool_list);
@@ -1647,7 +1658,7 @@
 
 static void kiblnd_fini_poolset(kib_poolset_t *ps)
 {
-	if (ps->ps_net != NULL) { /* initialized? */
+	if (ps->ps_net) { /* initialized? */
 		kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
 		kiblnd_destroy_pool_list(&ps->ps_pool_list);
 	}
@@ -1663,7 +1674,7 @@
 	kib_pool_t *pool;
 	int rc;
 
-	memset(ps, 0, sizeof(kib_poolset_t));
+	memset(ps, 0, sizeof(*ps));
 
 	ps->ps_cpt          = cpt;
 	ps->ps_net          = net;
@@ -1680,7 +1691,7 @@
 	INIT_LIST_HEAD(&ps->ps_failed_pool_list);
 
 	rc = ps->ps_pool_create(ps, size, &pool);
-	if (rc == 0)
+	if (!rc)
 		list_add(&pool->po_list, &ps->ps_pool_list);
 	else
 		CERROR("Failed to create the first pool for %s\n", ps->ps_name);
@@ -1690,7 +1701,7 @@
 
 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
 {
-	if (pool->po_allocated != 0) /* still in use */
+	if (pool->po_allocated) /* still in use */
 		return 0;
 	if (pool->po_failed)
 		return 1;
@@ -1706,7 +1717,7 @@
 
 	spin_lock(&ps->ps_lock);
 
-	if (ps->ps_node_fini != NULL)
+	if (ps->ps_node_fini)
 		ps->ps_node_fini(pool, node);
 
 	LASSERT(pool->po_allocated > 0);
@@ -1744,7 +1755,7 @@
 		node = pool->po_free_list.next;
 		list_del(node);
 
-		if (ps->ps_node_init != NULL) {
+		if (ps->ps_node_init) {
 			/* still hold the lock */
 			ps->ps_node_init(pool, node);
 		}
@@ -1777,7 +1788,7 @@
 
 	spin_lock(&ps->ps_lock);
 	ps->ps_increasing = 0;
-	if (rc == 0) {
+	if (!rc) {
 		list_add_tail(&pool->po_list, &ps->ps_pool_list);
 	} else {
 		ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
@@ -1794,37 +1805,37 @@
 	kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
 	int i;
 
-	LASSERT(pool->po_allocated == 0);
+	LASSERT(!pool->po_allocated);
 
-	if (tpo->tpo_tx_pages != NULL) {
+	if (tpo->tpo_tx_pages) {
 		kiblnd_unmap_tx_pool(tpo);
 		kiblnd_free_pages(tpo->tpo_tx_pages);
 	}
 
-	if (tpo->tpo_tx_descs == NULL)
+	if (!tpo->tpo_tx_descs)
 		goto out;
 
 	for (i = 0; i < pool->po_size; i++) {
 		kib_tx_t *tx = &tpo->tpo_tx_descs[i];
 
 		list_del(&tx->tx_list);
-		if (tx->tx_pages != NULL)
+		if (tx->tx_pages)
 			LIBCFS_FREE(tx->tx_pages,
 				    LNET_MAX_IOV *
 				    sizeof(*tx->tx_pages));
-		if (tx->tx_frags != NULL)
+		if (tx->tx_frags)
 			LIBCFS_FREE(tx->tx_frags,
 				    IBLND_MAX_RDMA_FRAGS *
 					    sizeof(*tx->tx_frags));
-		if (tx->tx_wrq != NULL)
+		if (tx->tx_wrq)
 			LIBCFS_FREE(tx->tx_wrq,
 				    (1 + IBLND_MAX_RDMA_FRAGS) *
 				    sizeof(*tx->tx_wrq));
-		if (tx->tx_sge != NULL)
+		if (tx->tx_sge)
 			LIBCFS_FREE(tx->tx_sge,
 				    (1 + IBLND_MAX_RDMA_FRAGS) *
 				    sizeof(*tx->tx_sge));
-		if (tx->tx_rd != NULL)
+		if (tx->tx_rd)
 			LIBCFS_FREE(tx->tx_rd,
 				    offsetof(kib_rdma_desc_t,
 					     rd_frags[IBLND_MAX_RDMA_FRAGS]));
@@ -1834,7 +1845,7 @@
 		    pool->po_size * sizeof(kib_tx_t));
 out:
 	kiblnd_fini_pool(pool);
-	LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+	LIBCFS_FREE(tpo, sizeof(*tpo));
 }
 
 static int kiblnd_tx_pool_size(int ncpts)
@@ -1853,7 +1864,7 @@
 	kib_tx_pool_t *tpo;
 
 	LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
-	if (tpo == NULL) {
+	if (!tpo) {
 		CERROR("Failed to allocate TX pool\n");
 		return -ENOMEM;
 	}
@@ -1864,15 +1875,15 @@
 	tpo->tpo_tx_pages = NULL;
 
 	npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
-	if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
+	if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
 		CERROR("Can't allocate tx pages: %d\n", npg);
-		LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+		LIBCFS_FREE(tpo, sizeof(*tpo));
 		return -ENOMEM;
 	}
 
 	LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
 			 size * sizeof(kib_tx_t));
-	if (tpo->tpo_tx_descs == NULL) {
+	if (!tpo->tpo_tx_descs) {
 		CERROR("Can't allocate %d tx descriptors\n", size);
 		ps->ps_pool_destroy(pool);
 		return -ENOMEM;
@@ -1884,17 +1895,17 @@
 		kib_tx_t *tx = &tpo->tpo_tx_descs[i];
 
 		tx->tx_pool = tpo;
-		if (ps->ps_net->ibn_fmr_ps != NULL) {
+		if (ps->ps_net->ibn_fmr_ps) {
 			LIBCFS_CPT_ALLOC(tx->tx_pages,
 					 lnet_cpt_table(), ps->ps_cpt,
 					 LNET_MAX_IOV * sizeof(*tx->tx_pages));
-			if (tx->tx_pages == NULL)
+			if (!tx->tx_pages)
 				break;
 		}
 
 		LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
 				 IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
-		if (tx->tx_frags == NULL)
+		if (!tx->tx_frags)
 			break;
 
 		sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
@@ -1902,19 +1913,19 @@
 		LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
 				 (1 + IBLND_MAX_RDMA_FRAGS) *
 				 sizeof(*tx->tx_wrq));
-		if (tx->tx_wrq == NULL)
+		if (!tx->tx_wrq)
 			break;
 
 		LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
 				 (1 + IBLND_MAX_RDMA_FRAGS) *
 				 sizeof(*tx->tx_sge));
-		if (tx->tx_sge == NULL)
+		if (!tx->tx_sge)
 			break;
 
 		LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
 				 offsetof(kib_rdma_desc_t,
 					  rd_frags[IBLND_MAX_RDMA_FRAGS]));
-		if (tx->tx_rd == NULL)
+		if (!tx->tx_rd)
 			break;
 	}
 
@@ -1945,23 +1956,23 @@
 		kib_tx_poolset_t *tps;
 		kib_fmr_poolset_t *fps;
 
-		if (net->ibn_tx_ps != NULL) {
+		if (net->ibn_tx_ps) {
 			tps = net->ibn_tx_ps[i];
 			kiblnd_fini_poolset(&tps->tps_poolset);
 		}
 
-		if (net->ibn_fmr_ps != NULL) {
+		if (net->ibn_fmr_ps) {
 			fps = net->ibn_fmr_ps[i];
 			kiblnd_fini_fmr_poolset(fps);
 		}
 	}
 
-	if (net->ibn_tx_ps != NULL) {
+	if (net->ibn_tx_ps) {
 		cfs_percpt_free(net->ibn_tx_ps);
 		net->ibn_tx_ps = NULL;
 	}
 
-	if (net->ibn_fmr_ps != NULL) {
+	if (net->ibn_fmr_ps) {
 		cfs_percpt_free(net->ibn_fmr_ps);
 		net->ibn_fmr_ps = NULL;
 	}
@@ -1975,7 +1986,7 @@
 	int i;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-	if (*kiblnd_tunables.kib_map_on_demand == 0 &&
+	if (!*kiblnd_tunables.kib_map_on_demand &&
 	    net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
 		read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 		goto create_tx_pool;
@@ -1996,7 +2007,7 @@
 	 * TX pool must be created later than FMR, see LU-2268
 	 * for details
 	 */
-	LASSERT(net->ibn_tx_ps == NULL);
+	LASSERT(!net->ibn_tx_ps);
 
 	/*
 	 * premapping can fail if ibd_nmr > 1, so we always create
@@ -2005,21 +2016,21 @@
 
 	net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
 					   sizeof(kib_fmr_poolset_t));
-	if (net->ibn_fmr_ps == NULL) {
+	if (!net->ibn_fmr_ps) {
 		CERROR("Failed to allocate FMR pool array\n");
 		rc = -ENOMEM;
 		goto failed;
 	}
 
 	for (i = 0; i < ncpts; i++) {
-		cpt = (cpts == NULL) ? i : cpts[i];
+		cpt = !cpts ? i : cpts[i];
 		rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
 					     kiblnd_fmr_pool_size(ncpts),
 					     kiblnd_fmr_flush_trigger(ncpts));
-		if (rc == -ENOSYS && i == 0) /* no FMR */
+		if (rc == -ENOSYS && !i) /* no FMR */
 			break;
 
-		if (rc != 0) { /* a real error */
+		if (rc) { /* a real error */
 			CERROR("Can't initialize FMR pool for CPT %d: %d\n",
 			       cpt, rc);
 			goto failed;
@@ -2040,21 +2051,21 @@
  create_tx_pool:
 	net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
 					  sizeof(kib_tx_poolset_t));
-	if (net->ibn_tx_ps == NULL) {
+	if (!net->ibn_tx_ps) {
 		CERROR("Failed to allocate tx pool array\n");
 		rc = -ENOMEM;
 		goto failed;
 	}
 
 	for (i = 0; i < ncpts; i++) {
-		cpt = (cpts == NULL) ? i : cpts[i];
+		cpt = !cpts ? i : cpts[i];
 		rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
 					 cpt, net, "TX",
 					 kiblnd_tx_pool_size(ncpts),
 					 kiblnd_create_tx_pool,
 					 kiblnd_destroy_tx_pool,
 					 kiblnd_tx_init, NULL);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't initialize TX pool for CPT %d: %d\n",
 			       cpt, rc);
 			goto failed;
@@ -2064,14 +2075,16 @@
 	return 0;
  failed:
 	kiblnd_net_fini_pools(net);
-	LASSERT(rc != 0);
+	LASSERT(rc);
 	return rc;
 }
 
 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
 {
-	/* It's safe to assume a HCA can handle a page size
-	 * matching that of the native system */
+	/*
+	 * It's safe to assume a HCA can handle a page size
+	 * matching that of the native system
+	 */
 	hdev->ibh_page_shift = PAGE_SHIFT;
 	hdev->ibh_page_size  = 1 << PAGE_SHIFT;
 	hdev->ibh_page_mask  = ~((__u64)hdev->ibh_page_size - 1);
@@ -2097,11 +2110,11 @@
 {
 	int i;
 
-	if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
+	if (!hdev->ibh_nmrs || !hdev->ibh_mrs)
 		return;
 
 	for (i = 0; i < hdev->ibh_nmrs; i++) {
-		if (hdev->ibh_mrs[i] == NULL)
+		if (!hdev->ibh_mrs[i])
 			break;
 
 		ib_dereg_mr(hdev->ibh_mrs[i]);
@@ -2116,10 +2129,10 @@
 {
 	kiblnd_hdev_cleanup_mrs(hdev);
 
-	if (hdev->ibh_pd != NULL)
+	if (hdev->ibh_pd)
 		ib_dealloc_pd(hdev->ibh_pd);
 
-	if (hdev->ibh_cmid != NULL)
+	if (hdev->ibh_cmid)
 		rdma_destroy_id(hdev->ibh_cmid);
 
 	LIBCFS_FREE(hdev, sizeof(*hdev));
@@ -2132,11 +2145,11 @@
 	int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
 
 	rc = kiblnd_hdev_get_attr(hdev);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
-	if (hdev->ibh_mrs == NULL) {
+	if (!hdev->ibh_mrs) {
 		CERROR("Failed to allocate MRs table\n");
 		return -ENOMEM;
 	}
@@ -2170,12 +2183,13 @@
 	struct sockaddr_in dstaddr;
 	int rc;
 
-	if (dev->ibd_hdev == NULL || /* initializing */
-	    dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
+	if (!dev->ibd_hdev || /* initializing */
+	    !dev->ibd_hdev->ibh_cmid || /* listener is dead */
 	    *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
 		return 1;
 
-	/* XXX: it's UGLY, but I don't have better way to find
+	/*
+	 * XXX: it's UGLY, but I don't have better way to find
 	 * ib-bonding HCA failover because:
 	 *
 	 * a. no reliable CM event for HCA failover...
@@ -2184,7 +2198,8 @@
 	 * We have only two choices at this point:
 	 *
 	 * a. rdma_bind_addr(), it will conflict with listener cmid
-	 * b. rdma_resolve_addr() to zero addr */
+	 * b. rdma_resolve_addr() to zero addr
+	 */
 	cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
 				     IB_QPT_RC);
 	if (IS_ERR(cmid)) {
@@ -2201,7 +2216,7 @@
 	dstaddr.sin_family = AF_INET;
 	rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
 			       (struct sockaddr *)&dstaddr, 1);
-	if (rc != 0 || cmid->device == NULL) {
+	if (rc || !cmid->device) {
 		CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
 		       dev->ibd_ifname, &dev->ibd_ifip,
 		       cmid->device, rc);
@@ -2230,24 +2245,27 @@
 	int i;
 
 	LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
-		 dev->ibd_can_failover ||
-		 dev->ibd_hdev == NULL);
+		dev->ibd_can_failover || !dev->ibd_hdev);
 
 	rc = kiblnd_dev_need_failover(dev);
 	if (rc <= 0)
 		goto out;
 
-	if (dev->ibd_hdev != NULL &&
-	    dev->ibd_hdev->ibh_cmid != NULL) {
-		/* XXX it's not good to close old listener at here,
+	if (dev->ibd_hdev &&
+	    dev->ibd_hdev->ibh_cmid) {
+		/*
+		 * XXX it's not good to close old listener at here,
 		 * because we can fail to create new listener.
 		 * But we have to close it now, otherwise rdma_bind_addr
-		 * will return EADDRINUSE... How crap! */
+		 * will return EADDRINUSE... How crap!
+		 */
 		write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 		cmid = dev->ibd_hdev->ibh_cmid;
-		/* make next schedule of kiblnd_dev_need_failover()
-		 * return 1 for me */
+		/*
+		 * make next schedule of kiblnd_dev_need_failover()
+		 * return 1 for me
+		 */
 		dev->ibd_hdev->ibh_cmid  = NULL;
 		write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
@@ -2269,7 +2287,7 @@
 
 	/* Bind to failover device or port */
 	rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
-	if (rc != 0 || cmid->device == NULL) {
+	if (rc || !cmid->device) {
 		CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
 		       dev->ibd_ifname, &dev->ibd_ifip,
 		       cmid->device, rc);
@@ -2278,7 +2296,7 @@
 	}
 
 	LIBCFS_ALLOC(hdev, sizeof(*hdev));
-	if (hdev == NULL) {
+	if (!hdev) {
 		CERROR("Failed to allocate kib_hca_dev\n");
 		rdma_destroy_id(cmid);
 		rc = -ENOMEM;
@@ -2300,13 +2318,13 @@
 	hdev->ibh_pd = pd;
 
 	rc = rdma_listen(cmid, 0);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't start new listener: %d\n", rc);
 		goto out;
 	}
 
 	rc = kiblnd_hdev_setup_mrs(hdev);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't setup device: %d\n", rc);
 		goto out;
 	}
@@ -2334,10 +2352,10 @@
 		kiblnd_destroy_pool_list(&zombie_ppo);
 	if (!list_empty(&zombie_fpo))
 		kiblnd_destroy_fmr_pool_list(&zombie_fpo);
-	if (hdev != NULL)
+	if (hdev)
 		kiblnd_hdev_decref(hdev);
 
-	if (rc != 0)
+	if (rc)
 		dev->ibd_failed_failover++;
 	else
 		dev->ibd_failed_failover = 0;
@@ -2347,13 +2365,13 @@
 
 void kiblnd_destroy_dev(kib_dev_t *dev)
 {
-	LASSERT(dev->ibd_nnets == 0);
+	LASSERT(!dev->ibd_nnets);
 	LASSERT(list_empty(&dev->ibd_nets));
 
 	list_del(&dev->ibd_fail_list);
 	list_del(&dev->ibd_list);
 
-	if (dev->ibd_hdev != NULL)
+	if (dev->ibd_hdev)
 		kiblnd_hdev_decref(dev->ibd_hdev);
 
 	LIBCFS_FREE(dev, sizeof(*dev));
@@ -2369,7 +2387,7 @@
 	int rc;
 
 	rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't query IPoIB interface %s: %d\n",
 		       ifname, rc);
 		return NULL;
@@ -2381,11 +2399,11 @@
 	}
 
 	LIBCFS_ALLOC(dev, sizeof(*dev));
-	if (dev == NULL)
+	if (!dev)
 		return NULL;
 
 	netdev = dev_get_by_name(&init_net, ifname);
-	if (netdev == NULL) {
+	if (!netdev) {
 		dev->ibd_can_failover = 0;
 	} else {
 		dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
@@ -2400,14 +2418,13 @@
 
 	/* initialize the device */
 	rc = kiblnd_dev_failover(dev);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't initialize device: %d\n", rc);
 		LIBCFS_FREE(dev, sizeof(*dev));
 		return NULL;
 	}
 
-	list_add_tail(&dev->ibd_list,
-			  &kiblnd_data.kib_devs);
+	list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
 	return dev;
 }
 
@@ -2424,7 +2441,7 @@
 
 	case IBLND_INIT_ALL:
 	case IBLND_INIT_DATA:
-		LASSERT(kiblnd_data.kib_peers != NULL);
+		LASSERT(kiblnd_data.kib_peers);
 		for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
 			LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
 		LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
@@ -2433,9 +2450,11 @@
 		/* flag threads to terminate; wake and wait for them to die */
 		kiblnd_data.kib_shutdown = 1;
 
-		/* NB: we really want to stop scheduler threads net by net
+		/*
+		 * NB: we really want to stop scheduler threads net by net
 		 * instead of the whole module, this should be improved
-		 * with dynamic configuration LNet */
+		 * with dynamic configuration LNet
+		 */
 		cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
 			wake_up_all(&sched->ibs_waitq);
 
@@ -2443,7 +2462,7 @@
 		wake_up_all(&kiblnd_data.kib_failover_waitq);
 
 		i = 2;
-		while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+		while (atomic_read(&kiblnd_data.kib_nthreads)) {
 			i++;
 			/* power of 2 ? */
 			CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
@@ -2459,13 +2478,13 @@
 		break;
 	}
 
-	if (kiblnd_data.kib_peers != NULL) {
+	if (kiblnd_data.kib_peers) {
 		LIBCFS_FREE(kiblnd_data.kib_peers,
 			    sizeof(struct list_head) *
 			    kiblnd_data.kib_peer_hash_size);
 	}
 
-	if (kiblnd_data.kib_scheds != NULL)
+	if (kiblnd_data.kib_scheds)
 		cfs_percpt_free(kiblnd_data.kib_scheds);
 
 	kiblnd_data.kib_init = IBLND_INIT_NOTHING;
@@ -2481,7 +2500,7 @@
 
 	LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
 
-	if (net == NULL)
+	if (!net)
 		goto out;
 
 	write_lock_irqsave(g_lock, flags);
@@ -2498,7 +2517,7 @@
 
 		/* Wait for all peer state to clean up */
 		i = 2;
-		while (atomic_read(&net->ibn_npeers) != 0) {
+		while (atomic_read(&net->ibn_npeers)) {
 			i++;
 			CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
 			       "%s: waiting for %d peers to disconnect\n",
@@ -2519,10 +2538,9 @@
 		/* fall through */
 
 	case IBLND_INIT_NOTHING:
-		LASSERT(atomic_read(&net->ibn_nconns) == 0);
+		LASSERT(!atomic_read(&net->ibn_nconns));
 
-		if (net->ibn_dev != NULL &&
-		    net->ibn_dev->ibd_nnets == 0)
+		if (net->ibn_dev && !net->ibn_dev->ibd_nnets)
 			kiblnd_destroy_dev(net->ibn_dev);
 
 		break;
@@ -2558,7 +2576,7 @@
 	kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
 	LIBCFS_ALLOC(kiblnd_data.kib_peers,
 		     sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
-	if (kiblnd_data.kib_peers == NULL)
+	if (!kiblnd_data.kib_peers)
 		goto failed;
 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
 		INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
@@ -2571,7 +2589,7 @@
 
 	kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
 						  sizeof(*sched));
-	if (kiblnd_data.kib_scheds == NULL)
+	if (!kiblnd_data.kib_scheds)
 		goto failed;
 
 	cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
@@ -2585,8 +2603,10 @@
 		if (*kiblnd_tunables.kib_nscheds > 0) {
 			nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
 		} else {
-			/* max to half of CPUs, another half is reserved for
-			 * upper layer modules */
+			/*
+			 * max to half of CPUs, another half is reserved for
+			 * upper layer modules
+			 */
 			nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
 		}
 
@@ -2601,16 +2621,16 @@
 	/*****************************************************/
 
 	rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't spawn o2iblnd connd: %d\n", rc);
 		goto failed;
 	}
 
-	if (*kiblnd_tunables.kib_dev_failover != 0)
+	if (*kiblnd_tunables.kib_dev_failover)
 		rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
 					 "kiblnd_failover");
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
 		goto failed;
 	}
@@ -2632,7 +2652,7 @@
 	int nthrs;
 	int i;
 
-	if (sched->ibs_nthreads == 0) {
+	if (!sched->ibs_nthreads) {
 		if (*kiblnd_tunables.kib_nscheds > 0) {
 			nthrs = sched->ibs_nthreads_max;
 		} else {
@@ -2655,7 +2675,7 @@
 		snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
 			 KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
 		rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
-		if (rc == 0)
+		if (!rc)
 			continue;
 
 		CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
@@ -2677,14 +2697,14 @@
 	for (i = 0; i < ncpts; i++) {
 		struct kib_sched_info *sched;
 
-		cpt = (cpts == NULL) ? i : cpts[i];
+		cpt = !cpts ? i : cpts[i];
 		sched = kiblnd_data.kib_scheds[cpt];
 
 		if (!newdev && sched->ibs_nthreads > 0)
 			continue;
 
 		rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Failed to start scheduler threads for %s\n",
 			       dev->ibd_ifname);
 			return rc;
@@ -2702,24 +2722,24 @@
 
 	colon = strchr(ifname, ':');
 	list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
-		if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+		if (!strcmp(&dev->ibd_ifname[0], ifname))
 			return dev;
 
-		if (alias != NULL)
+		if (alias)
 			continue;
 
 		colon2 = strchr(dev->ibd_ifname, ':');
-		if (colon != NULL)
+		if (colon)
 			*colon = 0;
-		if (colon2 != NULL)
+		if (colon2)
 			*colon2 = 0;
 
-		if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+		if (!strcmp(&dev->ibd_ifname[0], ifname))
 			alias = dev;
 
-		if (colon != NULL)
+		if (colon)
 			*colon = ':';
-		if (colon2 != NULL)
+		if (colon2)
 			*colon2 = ':';
 	}
 	return alias;
@@ -2739,13 +2759,13 @@
 
 	if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
 		rc = kiblnd_base_startup();
-		if (rc != 0)
+		if (rc)
 			return rc;
 	}
 
 	LIBCFS_ALLOC(net, sizeof(*net));
 	ni->ni_data = net;
-	if (net == NULL)
+	if (!net)
 		goto net_failed;
 
 	ktime_get_real_ts64(&tv);
@@ -2757,11 +2777,11 @@
 	ni->ni_peertxcredits  = *kiblnd_tunables.kib_peertxcredits;
 	ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
 
-	if (ni->ni_interfaces[0] != NULL) {
+	if (ni->ni_interfaces[0]) {
 		/* Use the IPoIB interface specified in 'networks=' */
 
 		CLASSERT(LNET_MAX_INTERFACES > 1);
-		if (ni->ni_interfaces[1] != NULL) {
+		if (ni->ni_interfaces[1]) {
 			CERROR("Multiple interfaces not supported\n");
 			goto failed;
 		}
@@ -2778,12 +2798,12 @@
 
 	ibdev = kiblnd_dev_search(ifname);
 
-	newdev = ibdev == NULL;
+	newdev = !ibdev;
 	/* hmm...create kib_dev even for alias */
-	if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
+	if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname))
 		ibdev = kiblnd_create_dev(ifname);
 
-	if (ibdev == NULL)
+	if (!ibdev)
 		goto failed;
 
 	net->ibn_dev = ibdev;
@@ -2791,11 +2811,11 @@
 
 	rc = kiblnd_dev_start_threads(ibdev, newdev,
 				      ni->ni_cpts, ni->ni_ncpts);
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Failed to initialize NI pools: %d\n", rc);
 		goto failed;
 	}
@@ -2810,7 +2830,7 @@
 	return 0;
 
 failed:
-	if (net->ibn_dev == NULL && ibdev != NULL)
+	if (!net->ibn_dev && ibdev)
 		kiblnd_destroy_dev(ibdev);
 
 net_failed:
@@ -2831,14 +2851,14 @@
 
 	CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
 	CLASSERT(offsetof(kib_msg_t,
-		ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
-		<= IBLND_MSG_SIZE);
+			  ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+			  <= IBLND_MSG_SIZE);
 	CLASSERT(offsetof(kib_msg_t,
-		ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
-		<= IBLND_MSG_SIZE);
+			  ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+			  <= IBLND_MSG_SIZE);
 
 	rc = kiblnd_tunables_init();
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	lnet_register_lnd(&the_o2iblnd);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 025faa9..2abb574 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -146,9 +146,9 @@
 #define IBLND_OOB_CAPABLE(v)       ((v) != IBLND_MSG_VERSION_1)
 #define IBLND_OOB_MSGS(v)	   (IBLND_OOB_CAPABLE(v) ? 2 : 0)
 
-#define IBLND_MSG_SIZE	      (4<<10)		 /* max size of queued messages (inc hdr) */
+#define IBLND_MSG_SIZE		(4 << 10)	 /* max size of queued messages (inc hdr) */
 #define IBLND_MAX_RDMA_FRAGS	 LNET_MAX_IOV	   /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand ? \
 				    *kiblnd_tunables.kib_map_on_demand :      \
 				     IBLND_MAX_RDMA_FRAGS)  /* max # of fragments configured by user */
 #define IBLND_RDMA_FRAGS(v)	((v) == IBLND_MSG_VERSION_1 ? \
@@ -611,7 +611,7 @@
 	if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
 		return 0;
 
-	if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+	if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
 		return 0;
 
 	if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
@@ -691,7 +691,7 @@
 {
 	return (*kiblnd_tunables.kib_keepalive > 0) &&
 		cfs_time_after(jiffies, conn->ibc_last_send +
-			       *kiblnd_tunables.kib_keepalive*HZ);
+			       *kiblnd_tunables.kib_keepalive * HZ);
 }
 
 static inline int
@@ -710,16 +710,16 @@
 
 		/* No tx to piggyback NOOP onto or no credit to send a tx */
 		return (list_empty(&conn->ibc_tx_queue) ||
-			conn->ibc_credits == 0);
+			!conn->ibc_credits);
 	}
 
 	if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
 	    !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
-	    conn->ibc_credits == 0)		    /* no credit */
+	    !conn->ibc_credits)		    /* no credit */
 		return 0;
 
 	if (conn->ibc_credits == 1 &&      /* last credit reserved for */
-	    conn->ibc_outstanding_credits == 0) /* giving back credits */
+	    !conn->ibc_outstanding_credits) /* giving back credits */
 		return 0;
 
 	/* No tx to piggyback NOOP onto or no credit to send a tx */
@@ -765,8 +765,8 @@
 {
 	unsigned long lptr = (unsigned long)ptr;
 
-	LASSERT((lptr & IBLND_WID_MASK) == 0);
-	LASSERT((type & ~IBLND_WID_MASK) == 0);
+	LASSERT(!(lptr & IBLND_WID_MASK));
+	LASSERT(!(type & ~IBLND_WID_MASK));
 	return (__u64)(lptr | type);
 }
 
@@ -948,33 +948,33 @@
 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
 void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
 int  kiblnd_close_stale_conns_locked(kib_peer_t *peer,
-				      int version, __u64 incarnation);
+				     int version, __u64 incarnation);
 int  kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
 
 void kiblnd_connreq_done(kib_conn_t *conn, int status);
 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
-				int state, int version);
+			       int state, int version);
 void kiblnd_destroy_conn(kib_conn_t *conn);
 void kiblnd_close_conn(kib_conn_t *conn, int error);
 void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
 
 int  kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
-		       int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+		      int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
 
 void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
 void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
 void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
 void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
 void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
-			 int status);
-void kiblnd_check_sends (kib_conn_t *conn);
+			int status);
+void kiblnd_check_sends(kib_conn_t *conn);
 
 void kiblnd_qp_event(struct ib_event *event, void *arg);
 void kiblnd_cq_event(struct ib_event *event, void *arg);
 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
 
 void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
-		      int credits, lnet_nid_t dstnid, __u64 dststamp);
+		     int credits, lnet_nid_t dstnid, __u64 dststamp);
 int  kiblnd_unpack_msg(kib_msg_t *msg, int nob);
 int  kiblnd_post_rx(kib_rx_t *rx, int credit);
 
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index c7b9ccb..0608431 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -50,12 +50,12 @@
 	int rc;
 	int i;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 	LASSERT(!in_interrupt());
 	LASSERT(!tx->tx_queued);	       /* mustn't be queued for sending */
-	LASSERT(tx->tx_sending == 0);	  /* mustn't be awaiting sent callback */
+	LASSERT(!tx->tx_sending);	  /* mustn't be awaiting sent callback */
 	LASSERT(!tx->tx_waiting);	      /* mustn't be awaiting peer response */
-	LASSERT(tx->tx_pool != NULL);
+	LASSERT(tx->tx_pool);
 
 	kiblnd_unmap_tx(ni, tx);
 
@@ -64,7 +64,7 @@
 	lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
 	rc = tx->tx_status;
 
-	if (tx->tx_conn != NULL) {
+	if (tx->tx_conn) {
 		LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni);
 
 		kiblnd_conn_decref(tx->tx_conn);
@@ -78,7 +78,7 @@
 
 	/* delay finalize until my descs have been freed */
 	for (i = 0; i < 2; i++) {
-		if (lntmsg[i] == NULL)
+		if (!lntmsg[i])
 			continue;
 
 		lnet_finalize(ni, lntmsg[i], rc);
@@ -111,19 +111,19 @@
 
 	tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
 	node = kiblnd_pool_alloc_node(&tps->tps_poolset);
-	if (node == NULL)
+	if (!node)
 		return NULL;
 	tx = container_of(node, kib_tx_t, tx_list);
 
-	LASSERT(tx->tx_nwrq == 0);
+	LASSERT(!tx->tx_nwrq);
 	LASSERT(!tx->tx_queued);
-	LASSERT(tx->tx_sending == 0);
+	LASSERT(!tx->tx_sending);
 	LASSERT(!tx->tx_waiting);
-	LASSERT(tx->tx_status == 0);
-	LASSERT(tx->tx_conn == NULL);
-	LASSERT(tx->tx_lntmsg[0] == NULL);
-	LASSERT(tx->tx_lntmsg[1] == NULL);
-	LASSERT(tx->tx_nfrags == 0);
+	LASSERT(!tx->tx_status);
+	LASSERT(!tx->tx_conn);
+	LASSERT(!tx->tx_lntmsg[0]);
+	LASSERT(!tx->tx_lntmsg[1]);
+	LASSERT(!tx->tx_nfrags);
 
 	return tx;
 }
@@ -152,14 +152,14 @@
 	struct ib_mr *mr;
 	int rc;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 	LASSERT(!in_interrupt());
 	LASSERT(credit == IBLND_POSTRX_NO_CREDIT ||
 		credit == IBLND_POSTRX_PEER_CREDIT ||
 		credit == IBLND_POSTRX_RSRVD_CREDIT);
 
 	mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
-	LASSERT(mr != NULL);
+	LASSERT(mr);
 
 	rx->rx_sge.lkey   = mr->lkey;
 	rx->rx_sge.addr   = rx->rx_msgaddr;
@@ -185,7 +185,7 @@
 	 */
 	kiblnd_conn_addref(conn);
 	rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
-	if (unlikely(rc != 0)) {
+	if (unlikely(rc)) {
 		CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
 		       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
 		rx->rx_nob = 0;
@@ -194,7 +194,7 @@
 	if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
 		goto out;
 
-	if (unlikely(rc != 0)) {
+	if (unlikely(rc)) {
 		kiblnd_close_conn(conn, rc);
 		kiblnd_drop_rx(rx);	     /* No more posts for this rx */
 		goto out;
@@ -225,7 +225,7 @@
 		kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
 
 		LASSERT(!tx->tx_queued);
-		LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
+		LASSERT(tx->tx_sending || tx->tx_waiting);
 
 		if (tx->tx_cookie != cookie)
 			continue;
@@ -251,7 +251,7 @@
 	spin_lock(&conn->ibc_lock);
 
 	tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
-	if (tx == NULL) {
+	if (!tx) {
 		spin_unlock(&conn->ibc_lock);
 
 		CWARN("Unmatched completion type %x cookie %#llx from %s\n",
@@ -260,7 +260,7 @@
 		return;
 	}
 
-	if (tx->tx_status == 0) {	       /* success so far */
+	if (!tx->tx_status) {	       /* success so far */
 		if (status < 0) /* failed? */
 			tx->tx_status = status;
 		else if (txtype == IBLND_MSG_GET_REQ)
@@ -269,7 +269,7 @@
 
 	tx->tx_waiting = 0;
 
-	idle = !tx->tx_queued && (tx->tx_sending == 0);
+	idle = !tx->tx_queued && !tx->tx_sending;
 	if (idle)
 		list_del(&tx->tx_list);
 
@@ -285,7 +285,7 @@
 	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
 	kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
 
-	if (tx == NULL) {
+	if (!tx) {
 		CERROR("Can't get tx for completion %x for %s\n",
 		       type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
 		return;
@@ -316,7 +316,7 @@
 	       msg->ibm_type, credits,
 	       libcfs_nid2str(conn->ibc_peer->ibp_nid));
 
-	if (credits != 0) {
+	if (credits) {
 		/* Have I received credits that will let me send? */
 		spin_lock(&conn->ibc_lock);
 
@@ -360,7 +360,7 @@
 			break;
 		}
 
-		if (credits != 0) /* credit already posted */
+		if (credits) /* credit already posted */
 			post_credit = IBLND_POSTRX_NO_CREDIT;
 		else	      /* a keepalive NOOP */
 			post_credit = IBLND_POSTRX_PEER_CREDIT;
@@ -396,12 +396,12 @@
 
 		spin_lock(&conn->ibc_lock);
 		tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
-					msg->ibm_u.putack.ibpam_src_cookie);
-		if (tx != NULL)
+						   msg->ibm_u.putack.ibpam_src_cookie);
+		if (tx)
 			list_del(&tx->tx_list);
 		spin_unlock(&conn->ibc_lock);
 
-		if (tx == NULL) {
+		if (!tx) {
 			CERROR("Unmatched PUT_ACK from %s\n",
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid));
 			rc = -EPROTO;
@@ -409,10 +409,11 @@
 		}
 
 		LASSERT(tx->tx_waiting);
-		/* CAVEAT EMPTOR: I could be racing with tx_complete, but...
+		/*
+		 * CAVEAT EMPTOR: I could be racing with tx_complete, but...
 		 * (a) I can overwrite tx_msg since my peer has received it!
-		 * (b) tx_waiting set tells tx_complete() it's not done. */
-
+		 * (b) tx_waiting set tells tx_complete() it's not done.
+		 */
 		tx->tx_nwrq = 0;		/* overwrite PUT_REQ */
 
 		rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
@@ -469,7 +470,7 @@
 	int rc;
 	int err = -EIO;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 	LASSERT(rx->rx_nob < 0);	       /* was posted */
 	rx->rx_nob = 0;			 /* isn't now */
 
@@ -486,9 +487,9 @@
 	rx->rx_nob = nob;
 
 	rc = kiblnd_unpack_msg(msg, rx->rx_nob);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d unpacking rx from %s\n",
-			rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+		       rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
 		goto failed;
 	}
 
@@ -497,7 +498,7 @@
 	    msg->ibm_srcstamp != conn->ibc_incarnation ||
 	    msg->ibm_dststamp != net->ibn_incarnation) {
 		CERROR("Stale rx from %s\n",
-			libcfs_nid2str(conn->ibc_peer->ibp_nid));
+		       libcfs_nid2str(conn->ibc_peer->ibp_nid));
 		err = -ESTALE;
 		goto failed;
 	}
@@ -537,7 +538,7 @@
 
 	if (is_vmalloc_addr((void *)vaddr)) {
 		page = vmalloc_to_page((void *)vaddr);
-		LASSERT(page != NULL);
+		LASSERT(page);
 		return page;
 	}
 #ifdef CONFIG_HIGHMEM
@@ -549,7 +550,7 @@
 	}
 #endif
 	page = virt_to_page(vaddr);
-	LASSERT(page != NULL);
+	LASSERT(page);
 	return page;
 }
 
@@ -565,8 +566,8 @@
 	int rc;
 	int i;
 
-	LASSERT(tx->tx_pool != NULL);
-	LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+	LASSERT(tx->tx_pool);
+	LASSERT(tx->tx_pool->tpo_pool.po_owner);
 
 	hdev = tx->tx_pool->tpo_hdev;
 
@@ -582,13 +583,15 @@
 
 	fps = net->ibn_fmr_ps[cpt];
 	rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't map %d pages: %d\n", npages, rc);
 		return rc;
 	}
 
-	/* If rd is not tx_rd, it's going to get sent to a peer, who will need
-	 * the rkey */
+	/*
+	 * If rd is not tx_rd, it's going to get sent to a peer, who will need
+	 * the rkey
+	 */
 	rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
 					 tx->fmr.fmr_pfmr->fmr->lkey;
 	rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
@@ -602,14 +605,14 @@
 {
 	kib_net_t *net = ni->ni_data;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 
 	if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
 		kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
 		tx->fmr.fmr_pfmr = NULL;
 	}
 
-	if (tx->tx_nfrags != 0) {
+	if (tx->tx_nfrags) {
 		kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
 				    tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
 		tx->tx_nfrags = 0;
@@ -625,8 +628,10 @@
 	__u32 nob;
 	int i;
 
-	/* If rd is not tx_rd, it's going to get sent to a peer and I'm the
-	 * RDMA sink */
+	/*
+	 * If rd is not tx_rd, it's going to get sent to a peer and I'm the
+	 * RDMA sink
+	 */
 	tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 	tx->tx_nfrags = nfrags;
 
@@ -643,13 +648,13 @@
 
 	/* looking for pre-mapping MR */
 	mr = kiblnd_find_rd_dma_mr(hdev, rd);
-	if (mr != NULL) {
+	if (mr) {
 		/* found pre-mapping MR */
 		rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
 		return 0;
 	}
 
-	if (net->ibn_fmr_ps != NULL)
+	if (net->ibn_fmr_ps)
 		return kiblnd_fmr_map_tx(net, tx, rd, nob);
 
 	return -EINVAL;
@@ -668,7 +673,7 @@
 
 	LASSERT(nob > 0);
 	LASSERT(niov > 0);
-	LASSERT(net != NULL);
+	LASSERT(net);
 
 	while (offset >= iov->iov_len) {
 		offset -= iov->iov_len;
@@ -684,7 +689,7 @@
 		vaddr = ((unsigned long)iov->iov_base) + offset;
 		page_offset = vaddr & (PAGE_SIZE - 1);
 		page = kiblnd_kvaddr_to_page(vaddr);
-		if (page == NULL) {
+		if (!page) {
 			CERROR("Can't find page\n");
 			return -EFAULT;
 		}
@@ -710,7 +715,7 @@
 
 static int
 kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
-		      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+		     int nkiov, lnet_kiov_t *kiov, int offset, int nob)
 {
 	kib_net_t *net = ni->ni_data;
 	struct scatterlist *sg;
@@ -720,7 +725,7 @@
 
 	LASSERT(nob > 0);
 	LASSERT(nkiov > 0);
-	LASSERT(net != NULL);
+	LASSERT(net);
 
 	while (offset >= kiov->kiov_len) {
 		offset -= kiov->kiov_len;
@@ -750,8 +755,7 @@
 
 static int
 kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
-	__releases(conn->ibc_lock)
-	__acquires(conn->ibc_lock)
+	__must_hold(&conn->ibc_lock)
 {
 	kib_msg_t *msg = tx->tx_msg;
 	kib_peer_t *peer = conn->ibc_peer;
@@ -765,7 +769,7 @@
 	LASSERT(tx->tx_nwrq > 0);
 	LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
 
-	LASSERT(credit == 0 || credit == 1);
+	LASSERT(!credit || credit == 1);
 	LASSERT(conn->ibc_outstanding_credits >= 0);
 	LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
 	LASSERT(conn->ibc_credits >= 0);
@@ -778,13 +782,13 @@
 		return -EAGAIN;
 	}
 
-	if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
+	if (credit && !conn->ibc_credits) {   /* no credits */
 		CDEBUG(D_NET, "%s: no credits\n",
 		       libcfs_nid2str(peer->ibp_nid));
 		return -EAGAIN;
 	}
 
-	if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
+	if (credit && !IBLND_OOB_CAPABLE(ver) &&
 	    conn->ibc_credits == 1 &&   /* last credit reserved */
 	    msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
 		CDEBUG(D_NET, "%s: not using last credit\n",
@@ -800,9 +804,11 @@
 	    (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
 	     (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
 	      conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
-		/* OK to drop when posted enough NOOPs, since
+		/*
+		 * OK to drop when posted enough NOOPs, since
 		 * kiblnd_check_sends will queue NOOP again when
-		 * posted NOOPs complete */
+		 * posted NOOPs complete
+		 */
 		spin_unlock(&conn->ibc_lock);
 		kiblnd_tx_done(peer->ibp_ni, tx);
 		spin_lock(&conn->ibc_lock);
@@ -821,12 +827,14 @@
 	if (msg->ibm_type == IBLND_MSG_NOOP)
 		conn->ibc_noops_posted++;
 
-	/* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
+	/*
+	 * CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
 	 * PUT.  If so, it was first queued here as a PUT_REQ, sent and
 	 * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
 	 * and then re-queued here.  It's (just) possible that
 	 * tx_sending is non-zero if we've not done the tx_complete()
-	 * from the first send; hence the ++ rather than = below. */
+	 * from the first send; hence the ++ rather than = below.
+	 */
 	tx->tx_sending++;
 	list_add(&tx->tx_list, &conn->ibc_active_txs);
 
@@ -843,11 +851,13 @@
 
 	conn->ibc_last_send = jiffies;
 
-	if (rc == 0)
+	if (!rc)
 		return 0;
 
-	/* NB credits are transferred in the actual
-	 * message, which can only be the last work item */
+	/*
+	 * NB credits are transferred in the actual
+	 * message, which can only be the last work item
+	 */
 	conn->ibc_credits += credit;
 	conn->ibc_outstanding_credits += msg->ibm_credits;
 	conn->ibc_nsends_posted--;
@@ -858,7 +868,7 @@
 	tx->tx_waiting = 0;
 	tx->tx_sending--;
 
-	done = (tx->tx_sending == 0);
+	done = !tx->tx_sending;
 	if (done)
 		list_del(&tx->tx_list);
 
@@ -899,13 +909,13 @@
 
 	LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
 	LASSERT(!IBLND_OOB_CAPABLE(ver) ||
-		 conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
+		conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
 	LASSERT(conn->ibc_reserved_credits >= 0);
 
 	while (conn->ibc_reserved_credits > 0 &&
 	       !list_empty(&conn->ibc_tx_queue_rsrvd)) {
 		tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
-				    kib_tx_t, tx_list);
+				kib_tx_t, tx_list);
 		list_del(&tx->tx_list);
 		list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
 		conn->ibc_reserved_credits--;
@@ -915,11 +925,11 @@
 		spin_unlock(&conn->ibc_lock);
 
 		tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
-		if (tx != NULL)
+		if (tx)
 			kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
 
 		spin_lock(&conn->ibc_lock);
-		if (tx != NULL)
+		if (tx)
 			kiblnd_queue_tx_locked(tx, conn);
 	}
 
@@ -931,7 +941,7 @@
 		if (!list_empty(&conn->ibc_tx_queue_nocred)) {
 			credit = 0;
 			tx = list_entry(conn->ibc_tx_queue_nocred.next,
-					    kib_tx_t, tx_list);
+					kib_tx_t, tx_list);
 		} else if (!list_empty(&conn->ibc_tx_noops)) {
 			LASSERT(!IBLND_OOB_CAPABLE(ver));
 			credit = 1;
@@ -940,11 +950,12 @@
 		} else if (!list_empty(&conn->ibc_tx_queue)) {
 			credit = 1;
 			tx = list_entry(conn->ibc_tx_queue.next,
-					    kib_tx_t, tx_list);
-		} else
+					kib_tx_t, tx_list);
+		} else {
 			break;
+		}
 
-		if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
+		if (kiblnd_post_tx_locked(conn, tx, credit))
 			break;
 	}
 
@@ -976,9 +987,10 @@
 
 	spin_lock(&conn->ibc_lock);
 
-	/* I could be racing with rdma completion.  Whoever makes 'tx' idle
-	 * gets to free it, which also drops its ref on 'conn'. */
-
+	/*
+	 * I could be racing with rdma completion.  Whoever makes 'tx' idle
+	 * gets to free it, which also drops its ref on 'conn'.
+	 */
 	tx->tx_sending--;
 	conn->ibc_nsends_posted--;
 	if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
@@ -989,7 +1001,7 @@
 		tx->tx_status = -EIO;
 	}
 
-	idle = (tx->tx_sending == 0) &&	 /* This is the final callback */
+	idle = !tx->tx_sending &&	 /* This is the final callback */
 	       !tx->tx_waiting &&	       /* Not waiting for peer */
 	       !tx->tx_queued;		  /* Not re-queued (PUT_DONE) */
 	if (idle)
@@ -1023,7 +1035,7 @@
 	kiblnd_init_msg(tx->tx_msg, type, body_nob);
 
 	mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
-	LASSERT(mr != NULL);
+	LASSERT(mr);
 
 	sge->lkey   = mr->lkey;
 	sge->addr   = tx->tx_msgaddr;
@@ -1043,23 +1055,21 @@
 
 int
 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
-		  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+		 int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
 {
 	kib_msg_t *ibmsg = tx->tx_msg;
 	kib_rdma_desc_t *srcrd = tx->tx_rd;
 	struct ib_sge *sge = &tx->tx_sge[0];
 	struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
 	int rc  = resid;
-	int srcidx;
-	int dstidx;
+	int srcidx = 0;
+	int dstidx = 0;
 	int wrknob;
 
 	LASSERT(!in_interrupt());
-	LASSERT(tx->tx_nwrq == 0);
+	LASSERT(!tx->tx_nwrq);
 	LASSERT(type == IBLND_MSG_GET_DONE ||
-		 type == IBLND_MSG_PUT_DONE);
-
-	srcidx = dstidx = 0;
+		type == IBLND_MSG_PUT_DONE);
 
 	while (resid > 0) {
 		if (srcidx >= srcrd->rd_nfrags) {
@@ -1139,7 +1149,7 @@
 	tx->tx_queued = 1;
 	tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
 
-	if (tx->tx_conn == NULL) {
+	if (!tx->tx_conn) {
 		kiblnd_conn_addref(conn);
 		tx->tx_conn = conn;
 		LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
@@ -1200,19 +1210,19 @@
 
 	/* allow the port to be reused */
 	rc = rdma_set_reuseaddr(cmid, 1);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Unable to set reuse on cmid: %d\n", rc);
 		return rc;
 	}
 
 	/* look for a free privileged port */
-	for (port = PROT_SOCK-1; port > 0; port--) {
+	for (port = PROT_SOCK - 1; port > 0; port--) {
 		srcaddr->sin_port = htons(port);
 		rc = rdma_resolve_addr(cmid,
 				       (struct sockaddr *)srcaddr,
 				       (struct sockaddr *)dstaddr,
 				       timeout_ms);
-		if (rc == 0) {
+		if (!rc) {
 			CDEBUG(D_NET, "bound to port %hu\n", port);
 			return 0;
 		} else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
@@ -1237,7 +1247,7 @@
 	struct sockaddr_in dstaddr;
 	int rc;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 	LASSERT(peer->ibp_connecting > 0);
 
 	cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
@@ -1271,14 +1281,14 @@
 				       (struct sockaddr *)&dstaddr,
 				       *kiblnd_tunables.kib_timeout * 1000);
 	}
-	if (rc != 0) {
+	if (rc) {
 		/* Can't initiate address resolution:  */
 		CERROR("Can't resolve addr for %s: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), rc);
 		goto failed2;
 	}
 
-	LASSERT(cmid->device != NULL);
+	LASSERT(cmid->device);
 	CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
 	       libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
 	       &dev->ibd_ifip, cmid->device->name);
@@ -1302,25 +1312,28 @@
 	unsigned long flags;
 	int rc;
 
-	/* If I get here, I've committed to send, so I complete the tx with
-	 * failure on any problems */
+	/*
+	 * If I get here, I've committed to send, so I complete the tx with
+	 * failure on any problems
+	 */
+	LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */
+	LASSERT(!tx || tx->tx_nwrq > 0);     /* work items have been set up */
 
-	LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
-	LASSERT(tx == NULL || tx->tx_nwrq > 0);     /* work items have been set up */
-
-	/* First time, just use a read lock since I expect to find my peer
-	 * connected */
+	/*
+	 * First time, just use a read lock since I expect to find my peer
+	 * connected
+	 */
 	read_lock_irqsave(g_lock, flags);
 
 	peer = kiblnd_find_peer_locked(nid);
-	if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+	if (peer && !list_empty(&peer->ibp_conns)) {
 		/* Found a peer with an established connection */
 		conn = kiblnd_get_conn_locked(peer);
 		kiblnd_conn_addref(conn); /* 1 ref for me... */
 
 		read_unlock_irqrestore(g_lock, flags);
 
-		if (tx != NULL)
+		if (tx)
 			kiblnd_queue_tx(tx, conn);
 		kiblnd_conn_decref(conn); /* ...to here */
 		return;
@@ -1331,14 +1344,14 @@
 	write_lock(g_lock);
 
 	peer = kiblnd_find_peer_locked(nid);
-	if (peer != NULL) {
+	if (peer) {
 		if (list_empty(&peer->ibp_conns)) {
 			/* found a peer, but it's still connecting... */
-			LASSERT(peer->ibp_connecting != 0 ||
-				 peer->ibp_accepting != 0);
-			if (tx != NULL)
+			LASSERT(peer->ibp_connecting ||
+				peer->ibp_accepting);
+			if (tx)
 				list_add_tail(&tx->tx_list,
-						  &peer->ibp_tx_queue);
+					      &peer->ibp_tx_queue);
 			write_unlock_irqrestore(g_lock, flags);
 		} else {
 			conn = kiblnd_get_conn_locked(peer);
@@ -1346,7 +1359,7 @@
 
 			write_unlock_irqrestore(g_lock, flags);
 
-			if (tx != NULL)
+			if (tx)
 				kiblnd_queue_tx(tx, conn);
 			kiblnd_conn_decref(conn); /* ...to here */
 		}
@@ -1357,9 +1370,9 @@
 
 	/* Allocate a peer ready to add to the peer table and retry */
 	rc = kiblnd_create_peer(ni, &peer, nid);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
-		if (tx != NULL) {
+		if (tx) {
 			tx->tx_status = -EHOSTUNREACH;
 			tx->tx_waiting = 0;
 			kiblnd_tx_done(ni, tx);
@@ -1370,14 +1383,14 @@
 	write_lock_irqsave(g_lock, flags);
 
 	peer2 = kiblnd_find_peer_locked(nid);
-	if (peer2 != NULL) {
+	if (peer2) {
 		if (list_empty(&peer2->ibp_conns)) {
 			/* found a peer, but it's still connecting... */
-			LASSERT(peer2->ibp_connecting != 0 ||
-				 peer2->ibp_accepting != 0);
-			if (tx != NULL)
+			LASSERT(peer2->ibp_connecting ||
+				peer2->ibp_accepting);
+			if (tx)
 				list_add_tail(&tx->tx_list,
-						  &peer2->ibp_tx_queue);
+					      &peer2->ibp_tx_queue);
 			write_unlock_irqrestore(g_lock, flags);
 		} else {
 			conn = kiblnd_get_conn_locked(peer2);
@@ -1385,7 +1398,7 @@
 
 			write_unlock_irqrestore(g_lock, flags);
 
-			if (tx != NULL)
+			if (tx)
 				kiblnd_queue_tx(tx, conn);
 			kiblnd_conn_decref(conn); /* ...to here */
 		}
@@ -1395,13 +1408,13 @@
 	}
 
 	/* Brand new peer */
-	LASSERT(peer->ibp_connecting == 0);
+	LASSERT(!peer->ibp_connecting);
 	peer->ibp_connecting = 1;
 
 	/* always called with a ref on ni, which prevents ni being shutdown */
-	LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+	LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
 
-	if (tx != NULL)
+	if (tx)
 		list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
 
 	kiblnd_peer_addref(peer);
@@ -1437,13 +1450,13 @@
 	CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
 	       payload_nob, payload_niov, libcfs_id2str(target));
 
-	LASSERT(payload_nob == 0 || payload_niov > 0);
+	LASSERT(!payload_nob || payload_niov > 0);
 	LASSERT(payload_niov <= LNET_MAX_IOV);
 
 	/* Thread context */
 	LASSERT(!in_interrupt());
 	/* payload is either all vaddrs or all pages */
-	LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
+	LASSERT(!(payload_kiov && payload_iov));
 
 	switch (type) {
 	default:
@@ -1451,7 +1464,7 @@
 		return -EIO;
 
 	case LNET_MSG_ACK:
-		LASSERT(payload_nob == 0);
+		LASSERT(!payload_nob);
 		break;
 
 	case LNET_MSG_GET:
@@ -1464,7 +1477,7 @@
 			break;		  /* send IMMEDIATE */
 
 		tx = kiblnd_get_idle_tx(ni, target.nid);
-		if (tx == NULL) {
+		if (!tx) {
 			CERROR("Can't allocate txd for GET to %s\n",
 			       libcfs_nid2str(target.nid));
 			return -ENOMEM;
@@ -1472,7 +1485,7 @@
 
 		ibmsg = tx->tx_msg;
 		rd = &ibmsg->ibm_u.get.ibgm_rd;
-		if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+		if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV))
 			rc = kiblnd_setup_rd_iov(ni, tx, rd,
 						 lntmsg->msg_md->md_niov,
 						 lntmsg->msg_md->md_iov.iov,
@@ -1482,7 +1495,7 @@
 						  lntmsg->msg_md->md_niov,
 						  lntmsg->msg_md->md_iov.kiov,
 						  0, lntmsg->msg_md->md_length);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't setup GET sink for %s: %d\n",
 			       libcfs_nid2str(target.nid), rc);
 			kiblnd_tx_done(ni, tx);
@@ -1496,7 +1509,7 @@
 		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
 
 		tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
-		if (tx->tx_lntmsg[1] == NULL) {
+		if (!tx->tx_lntmsg[1]) {
 			CERROR("Can't create reply for GET -> %s\n",
 			       libcfs_nid2str(target.nid));
 			kiblnd_tx_done(ni, tx);
@@ -1516,14 +1529,14 @@
 			break;		  /* send IMMEDIATE */
 
 		tx = kiblnd_get_idle_tx(ni, target.nid);
-		if (tx == NULL) {
+		if (!tx) {
 			CERROR("Can't allocate %s txd for %s\n",
 			       type == LNET_MSG_PUT ? "PUT" : "REPLY",
 			       libcfs_nid2str(target.nid));
 			return -ENOMEM;
 		}
 
-		if (payload_kiov == NULL)
+		if (!payload_kiov)
 			rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
 						 payload_niov, payload_iov,
 						 payload_offset, payload_nob);
@@ -1531,7 +1544,7 @@
 			rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
 						  payload_niov, payload_kiov,
 						  payload_offset, payload_nob);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't setup PUT src for %s: %d\n",
 			       libcfs_nid2str(target.nid), rc);
 			kiblnd_tx_done(ni, tx);
@@ -1555,16 +1568,16 @@
 		 <= IBLND_MSG_SIZE);
 
 	tx = kiblnd_get_idle_tx(ni, target.nid);
-	if (tx == NULL) {
+	if (!tx) {
 		CERROR("Can't send %d to %s: tx descs exhausted\n",
-			type, libcfs_nid2str(target.nid));
+		       type, libcfs_nid2str(target.nid));
 		return -ENOMEM;
 	}
 
 	ibmsg = tx->tx_msg;
 	ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
 
-	if (payload_kiov != NULL)
+	if (payload_kiov)
 		lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
 				    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
 				    payload_niov, payload_kiov,
@@ -1596,22 +1609,22 @@
 	int rc;
 
 	tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
-	if (tx == NULL) {
+	if (!tx) {
 		CERROR("Can't get tx for REPLY to %s\n",
 		       libcfs_nid2str(target.nid));
 		goto failed_0;
 	}
 
-	if (nob == 0)
+	if (!nob)
 		rc = 0;
-	else if (kiov == NULL)
+	else if (!kiov)
 		rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
 					 niov, iov, offset, nob);
 	else
 		rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
 					  niov, kiov, offset, nob);
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't setup GET src for %s: %d\n",
 		       libcfs_nid2str(target.nid), rc);
 		goto failed_1;
@@ -1627,12 +1640,11 @@
 		goto failed_1;
 	}
 
-	if (nob == 0) {
+	if (!nob) {
 		/* No RDMA: local completion may happen now! */
 		lnet_finalize(ni, lntmsg, 0);
 	} else {
-		/* RDMA: lnet_finalize(lntmsg) when it
-		 * completes */
+		/* RDMA: lnet_finalize(lntmsg) when it completes */
 		tx->tx_lntmsg[0] = lntmsg;
 	}
 
@@ -1647,8 +1659,8 @@
 
 int
 kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
-	     unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
-	     unsigned int offset, unsigned int mlen, unsigned int rlen)
+	    unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+	    unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
 	kib_rx_t *rx = private;
 	kib_msg_t *rxmsg = rx->rx_msg;
@@ -1661,7 +1673,7 @@
 	LASSERT(mlen <= rlen);
 	LASSERT(!in_interrupt());
 	/* Either all pages or all vaddrs */
-	LASSERT(!(kiov != NULL && iov != NULL));
+	LASSERT(!(kiov && iov));
 
 	switch (rxmsg->ibm_type) {
 	default:
@@ -1671,13 +1683,13 @@
 		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
 		if (nob > rx->rx_nob) {
 			CERROR("Immediate message from %s too big: %d(%d)\n",
-				libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
-				nob, rx->rx_nob);
+			       libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
+			       nob, rx->rx_nob);
 			rc = -EPROTO;
 			break;
 		}
 
-		if (kiov != NULL)
+		if (kiov)
 			lnet_copy_flat2kiov(niov, kiov, offset,
 					    IBLND_MSG_SIZE, rxmsg,
 					    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
@@ -1694,7 +1706,7 @@
 		kib_msg_t	*txmsg;
 		kib_rdma_desc_t *rd;
 
-		if (mlen == 0) {
+		if (!mlen) {
 			lnet_finalize(ni, lntmsg, 0);
 			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
 					       rxmsg->ibm_u.putreq.ibprm_cookie);
@@ -1702,7 +1714,7 @@
 		}
 
 		tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
-		if (tx == NULL) {
+		if (!tx) {
 			CERROR("Can't allocate tx for %s\n",
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid));
 			/* Not replying will break the connection */
@@ -1712,13 +1724,13 @@
 
 		txmsg = tx->tx_msg;
 		rd = &txmsg->ibm_u.putack.ibpam_rd;
-		if (kiov == NULL)
+		if (!kiov)
 			rc = kiblnd_setup_rd_iov(ni, tx, rd,
 						 niov, iov, offset, mlen);
 		else
 			rc = kiblnd_setup_rd_kiov(ni, tx, rd,
 						  niov, kiov, offset, mlen);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't setup PUT sink for %s: %d\n",
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
 			kiblnd_tx_done(ni, tx);
@@ -1744,7 +1756,7 @@
 		}
 
 	case IBLND_MSG_GET_REQ:
-		if (lntmsg != NULL) {
+		if (lntmsg) {
 			/* Optimized GET; RDMA lntmsg's payload */
 			kiblnd_reply(ni, rx, lntmsg);
 		} else {
@@ -1796,9 +1808,9 @@
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	if (list_empty(&peer->ibp_conns) &&
-	    peer->ibp_accepting == 0 &&
-	    peer->ibp_connecting == 0 &&
-	    peer->ibp_error != 0) {
+	    !peer->ibp_accepting &&
+	    !peer->ibp_connecting &&
+	    peer->ibp_error) {
 		error = peer->ibp_error;
 		peer->ibp_error = 0;
 
@@ -1807,7 +1819,7 @@
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-	if (error != 0)
+	if (error)
 		lnet_notify(peer->ibp_ni,
 			    peer->ibp_nid, 0, last_alive);
 }
@@ -1815,25 +1827,27 @@
 void
 kiblnd_close_conn_locked(kib_conn_t *conn, int error)
 {
-	/* This just does the immediate housekeeping.  'error' is zero for a
+	/*
+	 * This just does the immediate housekeeping. 'error' is zero for a
 	 * normal shutdown which can happen only after the connection has been
 	 * established.  If the connection is established, schedule the
-	 * connection to be finished off by the connd.  Otherwise the connd is
+	 * connection to be finished off by the connd. Otherwise the connd is
 	 * already dealing with it (either to set it up or tear it down).
-	 * Caller holds kib_global_lock exclusively in irq context */
+	 * Caller holds kib_global_lock exclusively in irq context
+	 */
 	kib_peer_t *peer = conn->ibc_peer;
 	kib_dev_t *dev;
 	unsigned long flags;
 
-	LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+	LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
-	if (error != 0 && conn->ibc_comms_error == 0)
+	if (error && !conn->ibc_comms_error)
 		conn->ibc_comms_error = error;
 
 	if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
 		return; /* already being handled  */
 
-	if (error == 0 &&
+	if (!error &&
 	    list_empty(&conn->ibc_tx_noops) &&
 	    list_empty(&conn->ibc_tx_queue) &&
 	    list_empty(&conn->ibc_tx_queue_rsrvd) &&
@@ -1843,12 +1857,12 @@
 		       libcfs_nid2str(peer->ibp_nid));
 	} else {
 		CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
-		       libcfs_nid2str(peer->ibp_nid), error,
-		       list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
-		       list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
-		       list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
-		       list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
-		       list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+		        libcfs_nid2str(peer->ibp_nid), error,
+		        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+		        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+		        list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+		        list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+		        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
 	}
 
 	dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -1865,7 +1879,7 @@
 
 	kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
-	if (error != 0 &&
+	if (error &&
 	    kiblnd_dev_can_failover(dev)) {
 		list_add_tail(&dev->ibd_fail_list,
 			      &kiblnd_data.kib_failed_devs);
@@ -1929,8 +1943,7 @@
 
 		if (txs == &conn->ibc_active_txs) {
 			LASSERT(!tx->tx_queued);
-			LASSERT(tx->tx_waiting ||
-				 tx->tx_sending != 0);
+			LASSERT(tx->tx_waiting || tx->tx_sending);
 		} else {
 			LASSERT(tx->tx_queued);
 		}
@@ -1938,7 +1951,7 @@
 		tx->tx_status = -ECONNABORTED;
 		tx->tx_waiting = 0;
 
-		if (tx->tx_sending == 0) {
+		if (!tx->tx_sending) {
 			tx->tx_queued = 0;
 			list_del(&tx->tx_list);
 			list_add(&tx->tx_list, &zombies);
@@ -1958,14 +1971,17 @@
 
 	kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
 
-	/* abort_receives moves QP state to IB_QPS_ERR.  This is only required
+	/*
+	 * abort_receives moves QP state to IB_QPS_ERR.  This is only required
 	 * for connections that didn't get as far as being connected, because
-	 * rdma_disconnect() does this for free. */
+	 * rdma_disconnect() does this for free.
+	 */
 	kiblnd_abort_receives(conn);
 
-	/* Complete all tx descs not waiting for sends to complete.
-	 * NB we should be safe from RDMA now that the QP has changed state */
-
+	/*
+	 * Complete all tx descs not waiting for sends to complete.
+	 * NB we should be safe from RDMA now that the QP has changed state
+	 */
 	kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
 	kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
 	kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
@@ -1981,7 +1997,7 @@
 	LIST_HEAD(zombies);
 	unsigned long flags;
 
-	LASSERT(error != 0);
+	LASSERT(error);
 	LASSERT(!in_interrupt());
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1994,11 +2010,11 @@
 		peer->ibp_accepting--;
 	}
 
-	if (peer->ibp_connecting != 0 ||
-	    peer->ibp_accepting != 0) {
+	if (peer->ibp_connecting ||
+	    peer->ibp_accepting) {
 		/* another connection attempt under way... */
 		write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-					    flags);
+					flags);
 		return;
 	}
 
@@ -2047,14 +2063,14 @@
 
 	LASSERT(!in_interrupt());
 	LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
-		  peer->ibp_connecting > 0) ||
+		 peer->ibp_connecting > 0) ||
 		 (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
-		  peer->ibp_accepting > 0));
+		 peer->ibp_accepting > 0));
 
 	LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
 	conn->ibc_connvars = NULL;
 
-	if (status != 0) {
+	if (status) {
 		/* failed to establish connection */
 		kiblnd_peer_connect_failed(peer, active, status);
 		kiblnd_finalise_conn(conn);
@@ -2068,8 +2084,10 @@
 	kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
 	kiblnd_peer_alive(peer);
 
-	/* Add conn to peer's list and nuke any dangling conns from a different
-	 * peer instance... */
+	/*
+	 * Add conn to peer's list and nuke any dangling conns from a different
+	 * peer instance...
+	 */
 	kiblnd_conn_addref(conn);	       /* +1 ref for ibc_list */
 	list_add(&conn->ibc_list, &peer->ibp_conns);
 	if (active)
@@ -2077,7 +2095,7 @@
 	else
 		peer->ibp_accepting--;
 
-	if (peer->ibp_version == 0) {
+	if (!peer->ibp_version) {
 		peer->ibp_version     = conn->ibc_version;
 		peer->ibp_incarnation = conn->ibc_incarnation;
 	}
@@ -2095,7 +2113,7 @@
 	list_del_init(&peer->ibp_tx_queue);
 
 	if (!kiblnd_peer_active(peer) ||	/* peer has been deleted */
-	    conn->ibc_comms_error != 0) {       /* error has happened already */
+	    conn->ibc_comms_error) {       /* error has happened already */
 		lnet_ni_t *ni = peer->ibp_ni;
 
 		/* start to shut down connection */
@@ -2131,7 +2149,7 @@
 
 	rc = rdma_reject(cmid, rej, sizeof(*rej));
 
-	if (rc != 0)
+	if (rc)
 		CWARN("Error %d sending reject\n", rc);
 }
 
@@ -2159,14 +2177,14 @@
 
 	/* cmid inherits 'context' from the corresponding listener id */
 	ibdev = (kib_dev_t *)cmid->context;
-	LASSERT(ibdev != NULL);
+	LASSERT(ibdev);
 
 	memset(&rej, 0, sizeof(rej));
 	rej.ibr_magic = IBLND_MSG_MAGIC;
 	rej.ibr_why = IBLND_REJECT_FATAL;
 	rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
 
-	peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+	peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr;
 	if (*kiblnd_tunables.kib_require_priv_port &&
 	    ntohs(peer_addr->sin_port) >= PROT_SOCK) {
 		__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
@@ -2181,12 +2199,14 @@
 		goto failed;
 	}
 
-	/* Future protocol version compatibility support!  If the
+	/*
+	 * Future protocol version compatibility support!  If the
 	 * o2iblnd-specific protocol changes, or when LNET unifies
 	 * protocols over all LNDs, the initial connection will
 	 * negotiate a protocol version.  I trap this here to avoid
 	 * console errors; the reject tells the peer which protocol I
-	 * speak. */
+	 * speak.
+	 */
 	if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
 	    reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
 		goto failed;
@@ -2200,7 +2220,7 @@
 		goto failed;
 
 	rc = kiblnd_unpack_msg(reqmsg, priv_nob);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't parse connection request: %d\n", rc);
 		goto failed;
 	}
@@ -2208,17 +2228,17 @@
 	nid = reqmsg->ibm_srcnid;
 	ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
 
-	if (ni != NULL) {
+	if (ni) {
 		net = (kib_net_t *)ni->ni_data;
 		rej.ibr_incarnation = net->ibn_incarnation;
 	}
 
-	if (ni == NULL ||			 /* no matching net */
+	if (!ni ||			 /* no matching net */
 	    ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
 	    net->ibn_dev != ibdev) {	      /* wrong device */
 		CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
 		       libcfs_nid2str(nid),
-		       ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+		       !ni ? "NA" : libcfs_nid2str(ni->ni_nid),
 		       ibdev->ibd_ifname, ibdev->ibd_nnets,
 		       &ibdev->ibd_ifip,
 		       libcfs_nid2str(reqmsg->ibm_dstnid));
@@ -2227,7 +2247,7 @@
 	}
 
        /* check time stamp as soon as possible */
-	if (reqmsg->ibm_dststamp != 0 &&
+	if (reqmsg->ibm_dststamp &&
 	    reqmsg->ibm_dststamp != net->ibn_incarnation) {
 		CWARN("Stale connection request\n");
 		rej.ibr_why = IBLND_REJECT_CONN_STALE;
@@ -2266,7 +2286,6 @@
 			rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
 
 		goto failed;
-
 	}
 
 	if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
@@ -2279,7 +2298,7 @@
 
 	/* assume 'nid' is a new peer; create  */
 	rc = kiblnd_create_peer(ni, &peer, nid);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
 		rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
 		goto failed;
@@ -2288,8 +2307,8 @@
 	write_lock_irqsave(g_lock, flags);
 
 	peer2 = kiblnd_find_peer_locked(nid);
-	if (peer2 != NULL) {
-		if (peer2->ibp_version == 0) {
+	if (peer2) {
+		if (!peer2->ibp_version) {
 			peer2->ibp_version     = version;
 			peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
 		}
@@ -2309,7 +2328,7 @@
 		}
 
 		/* tie-break connection race in favour of the higher NID */
-		if (peer2->ibp_connecting != 0 &&
+		if (peer2->ibp_connecting &&
 		    nid < ni->ni_nid) {
 			write_unlock_irqrestore(g_lock, flags);
 
@@ -2328,16 +2347,16 @@
 		peer = peer2;
 	} else {
 		/* Brand new peer */
-		LASSERT(peer->ibp_accepting == 0);
-		LASSERT(peer->ibp_version == 0 &&
-			 peer->ibp_incarnation == 0);
+		LASSERT(!peer->ibp_accepting);
+		LASSERT(!peer->ibp_version &&
+			!peer->ibp_incarnation);
 
 		peer->ibp_accepting   = 1;
 		peer->ibp_version     = version;
 		peer->ibp_incarnation = reqmsg->ibm_srcstamp;
 
 		/* I have a ref on ni that prevents it being shutdown */
-		LASSERT(net->ibn_shutdown == 0);
+		LASSERT(!net->ibn_shutdown);
 
 		kiblnd_peer_addref(peer);
 		list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
@@ -2346,16 +2365,17 @@
 	}
 
 	conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
-	if (conn == NULL) {
+	if (!conn) {
 		kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
 		kiblnd_peer_decref(peer);
 		rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
 		goto failed;
 	}
 
-	/* conn now "owns" cmid, so I return success from here on to ensure the
-	 * CM callback doesn't destroy cmid. */
-
+	/*
+	 * conn now "owns" cmid, so I return success from here on to ensure the
+	 * CM callback doesn't destroy cmid.
+	 */
 	conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
 	conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
 	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
@@ -2385,7 +2405,7 @@
 	CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
 
 	rc = rdma_accept(cmid, &cp);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
 		rej.ibr_version = version;
 		rej.ibr_why     = IBLND_REJECT_FATAL;
@@ -2399,7 +2419,7 @@
 	return 0;
 
  failed:
-	if (ni != NULL)
+	if (ni)
 		lnet_ni_decref(ni);
 
 	rej.ibr_version             = version;
@@ -2412,7 +2432,7 @@
 
 static void
 kiblnd_reconnect(kib_conn_t *conn, int version,
-		  __u64 incarnation, int why, kib_connparams_t *cp)
+		 __u64 incarnation, int why, kib_connparams_t *cp)
 {
 	kib_peer_t *peer = conn->ibc_peer;
 	char *reason;
@@ -2424,15 +2444,17 @@
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-	/* retry connection if it's still needed and no other connection
+	/*
+	 * retry connection if it's still needed and no other connection
 	 * attempts (active or passive) are in progress
 	 * NB: reconnect is still needed even when ibp_tx_queue is
 	 * empty if ibp_version != version because reconnect may be
-	 * initiated by kiblnd_query() */
+	 * initiated by kiblnd_query()
+	 */
 	if ((!list_empty(&peer->ibp_tx_queue) ||
 	     peer->ibp_version != version) &&
 	    peer->ibp_connecting == 1 &&
-	    peer->ibp_accepting == 0) {
+	    !peer->ibp_accepting) {
 		retry = 1;
 		peer->ibp_connecting++;
 
@@ -2466,9 +2488,9 @@
 	CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
 		libcfs_nid2str(peer->ibp_nid),
 		reason, IBLND_MSG_VERSION, version,
-		cp != NULL ? cp->ibcp_queue_depth  : IBLND_MSG_QUEUE_SIZE(version),
-		cp != NULL ? cp->ibcp_max_frags    : IBLND_RDMA_FRAGS(version),
-		cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
+		cp ? cp->ibcp_queue_depth  : IBLND_MSG_QUEUE_SIZE(version),
+		cp ? cp->ibcp_max_frags    : IBLND_RDMA_FRAGS(version),
+		cp ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
 
 	kiblnd_connect_peer(peer);
 }
@@ -2521,9 +2543,11 @@
 
 			if (priv_nob >= sizeof(kib_rej_t) &&
 			    rej->ibr_version > IBLND_MSG_VERSION_1) {
-				/* priv_nob is always 148 in current version
+				/*
+				 * priv_nob is always 148 in current version
 				 * of OFED, so we still need to check version.
-				 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
+				 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
+				 */
 				cp = &rej->ibr_cp;
 
 				if (flip) {
@@ -2571,7 +2595,7 @@
 			case IBLND_REJECT_MSG_QUEUE_SIZE:
 				CERROR("%s rejected: incompatible message queue depth %d, %d\n",
 				       libcfs_nid2str(peer->ibp_nid),
-				       cp != NULL ? cp->ibcp_queue_depth :
+				       cp ? cp->ibcp_queue_depth :
 				       IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
 				       IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
 				break;
@@ -2579,7 +2603,7 @@
 			case IBLND_REJECT_RDMA_FRAGS:
 				CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
 				       libcfs_nid2str(peer->ibp_nid),
-				       cp != NULL ? cp->ibcp_max_frags :
+				       cp ? cp->ibcp_max_frags :
 				       IBLND_RDMA_FRAGS(rej->ibr_version),
 				       IBLND_RDMA_FRAGS(conn->ibc_version));
 				break;
@@ -2623,9 +2647,9 @@
 	int rc = kiblnd_unpack_msg(msg, priv_nob);
 	unsigned long flags;
 
-	LASSERT(net != NULL);
+	LASSERT(net);
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't unpack connack from %s: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), rc);
 		goto failed;
@@ -2682,7 +2706,7 @@
 		rc = -ESTALE;
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), rc,
 		       msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
@@ -2699,12 +2723,13 @@
 	return;
 
  failed:
-	/* NB My QP has already established itself, so I handle anything going
+	/*
+	 * NB My QP has already established itself, so I handle anything going
 	 * wrong here by setting ibc_comms_error.
 	 * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
-	 * immediately tears it down. */
-
-	LASSERT(rc != 0);
+	 * immediately tears it down.
+	 */
+	LASSERT(rc);
 	conn->ibc_comms_error = rc;
 	kiblnd_connreq_done(conn, 0);
 }
@@ -2724,22 +2749,23 @@
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	incarnation = peer->ibp_incarnation;
-	version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
-					     peer->ibp_version;
+	version = !peer->ibp_version ? IBLND_MSG_VERSION :
+				       peer->ibp_version;
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
 	conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
-	if (conn == NULL) {
+	if (!conn) {
 		kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
 		kiblnd_peer_decref(peer); /* lose cmid's ref */
 		return -ENOMEM;
 	}
 
-	/* conn "owns" cmid now, so I return success from here on to ensure the
+	/*
+	 * conn "owns" cmid now, so I return success from here on to ensure the
 	 * CM callback doesn't destroy cmid. conn also takes over cmid's ref
-	 * on peer */
-
+	 * on peer
+	 */
 	msg = &conn->ibc_connvars->cv_msg;
 
 	memset(msg, 0, sizeof(*msg));
@@ -2764,7 +2790,7 @@
 	LASSERT(conn->ibc_cmid == cmid);
 
 	rc = rdma_connect(cmid, &cp);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't connect to %s: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), rc);
 		kiblnd_connreq_done(conn, rc);
@@ -2798,10 +2824,10 @@
 	case RDMA_CM_EVENT_ADDR_ERROR:
 		peer = (kib_peer_t *)cmid->context;
 		CNETERR("%s: ADDR ERROR %d\n",
-		       libcfs_nid2str(peer->ibp_nid), event->status);
+		        libcfs_nid2str(peer->ibp_nid), event->status);
 		kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
 		kiblnd_peer_decref(peer);
-		return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
+		return -EHOSTUNREACH;      /* rc destroys cmid */
 
 	case RDMA_CM_EVENT_ADDR_RESOLVED:
 		peer = (kib_peer_t *)cmid->context;
@@ -2809,14 +2835,14 @@
 		CDEBUG(D_NET, "%s Addr resolved: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), event->status);
 
-		if (event->status != 0) {
+		if (event->status) {
 			CNETERR("Can't resolve address for %s: %d\n",
 				libcfs_nid2str(peer->ibp_nid), event->status);
 			rc = event->status;
 		} else {
 			rc = rdma_resolve_route(
 				cmid, *kiblnd_tunables.kib_timeout * 1000);
-			if (rc == 0)
+			if (!rc)
 				return 0;
 			/* Can't initiate route resolution */
 			CERROR("Can't resolve route for %s: %d\n",
@@ -2824,7 +2850,7 @@
 		}
 		kiblnd_peer_connect_failed(peer, 1, rc);
 		kiblnd_peer_decref(peer);
-		return rc;		      /* rc != 0 destroys cmid */
+		return rc;		      /* rc destroys cmid */
 
 	case RDMA_CM_EVENT_ROUTE_ERROR:
 		peer = (kib_peer_t *)cmid->context;
@@ -2832,28 +2858,28 @@
 			libcfs_nid2str(peer->ibp_nid), event->status);
 		kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
 		kiblnd_peer_decref(peer);
-		return -EHOSTUNREACH;	   /* rc != 0 destroys cmid */
+		return -EHOSTUNREACH;	   /* rc destroys cmid */
 
 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
 		peer = (kib_peer_t *)cmid->context;
 		CDEBUG(D_NET, "%s Route resolved: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), event->status);
 
-		if (event->status == 0)
+		if (!event->status)
 			return kiblnd_active_connect(cmid);
 
 		CNETERR("Can't resolve route for %s: %d\n",
-		       libcfs_nid2str(peer->ibp_nid), event->status);
+		        libcfs_nid2str(peer->ibp_nid), event->status);
 		kiblnd_peer_connect_failed(peer, 1, event->status);
 		kiblnd_peer_decref(peer);
-		return event->status;	   /* rc != 0 destroys cmid */
+		return event->status;	   /* rc destroys cmid */
 
 	case RDMA_CM_EVENT_UNREACHABLE:
 		conn = (kib_conn_t *)cmid->context;
 		LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
 			conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
 		CNETERR("%s: UNREACHABLE %d\n",
-		       libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+		        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
 		kiblnd_connreq_done(conn, -ENETDOWN);
 		kiblnd_conn_decref(conn);
 		return 0;
@@ -2876,8 +2902,8 @@
 
 		case IBLND_CONN_PASSIVE_WAIT:
 			CERROR("%s: REJECTED %d\n",
-				libcfs_nid2str(conn->ibc_peer->ibp_nid),
-				event->status);
+			       libcfs_nid2str(conn->ibc_peer->ibp_nid),
+			       event->status);
 			kiblnd_connreq_done(conn, -ECONNRESET);
 			break;
 
@@ -2933,8 +2959,10 @@
 		LCONSOLE_ERROR_MSG(0x131,
 				   "Received notification of device removal\n"
 				   "Please shutdown LNET to allow this to proceed\n");
-		/* Can't remove network from underneath LNET for now, so I have
-		 * to ignore this */
+		/*
+		 * Can't remove network from underneath LNET for now, so I have
+		 * to ignore this
+		 */
 		return 0;
 
 	case RDMA_CM_EVENT_ADDR_CHANGE:
@@ -2956,7 +2984,7 @@
 			LASSERT(tx->tx_queued);
 		} else {
 			LASSERT(!tx->tx_queued);
-			LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+			LASSERT(tx->tx_waiting || tx->tx_sending);
 		}
 
 		if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
@@ -2993,9 +3021,11 @@
 	struct list_head *ctmp;
 	unsigned long flags;
 
-	/* NB. We expect to have a look at all the peers and not find any
+	/*
+	 * NB. We expect to have a look at all the peers and not find any
 	 * RDMAs to time out, so we just use a shared lock while we
-	 * take a look... */
+	 * take a look...
+	 */
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	list_for_each(ptmp, peers) {
@@ -3028,8 +3058,7 @@
 				       conn->ibc_reserved_credits);
 				list_add(&conn->ibc_connd_list, &closes);
 			} else {
-				list_add(&conn->ibc_connd_list,
-					     &checksends);
+				list_add(&conn->ibc_connd_list, &checksends);
 			}
 			/* +ref for 'closes' or 'checksends' */
 			kiblnd_conn_addref(conn);
@@ -3040,21 +3069,24 @@
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-	/* Handle timeout by closing the whole
+	/*
+	 * Handle timeout by closing the whole
 	 * connection. We can only be sure RDMA activity
-	 * has ceased once the QP has been modified. */
+	 * has ceased once the QP has been modified.
+	 */
 	list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) {
 		list_del(&conn->ibc_connd_list);
 		kiblnd_close_conn(conn, -ETIMEDOUT);
 		kiblnd_conn_decref(conn);
 	}
 
-	/* In case we have enough credits to return via a
+	/*
+	 * In case we have enough credits to return via a
 	 * NOOP, but there were no non-blocking tx descs
-	 * free to do it last time... */
+	 * free to do it last time...
+	 */
 	while (!list_empty(&checksends)) {
-		conn = list_entry(checksends.next,
-				      kib_conn_t, ibc_connd_list);
+		conn = list_entry(checksends.next, kib_conn_t, ibc_connd_list);
 		list_del(&conn->ibc_connd_list);
 		kiblnd_check_sends(conn);
 		kiblnd_conn_decref(conn);
@@ -3094,12 +3126,11 @@
 	spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
 	while (!kiblnd_data.kib_shutdown) {
-
 		dropped_lock = 0;
 
 		if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
 			conn = list_entry(kiblnd_data.kib_connd_zombies.next,
-					      kib_conn_t, ibc_list);
+					  kib_conn_t, ibc_list);
 			list_del(&conn->ibc_list);
 
 			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
@@ -3113,7 +3144,7 @@
 
 		if (!list_empty(&kiblnd_data.kib_connd_conns)) {
 			conn = list_entry(kiblnd_data.kib_connd_conns.next,
-					      kib_conn_t, ibc_list);
+					  kib_conn_t, ibc_list);
 			list_del(&conn->ibc_list);
 
 			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
@@ -3136,18 +3167,19 @@
 			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 			dropped_lock = 1;
 
-			/* Time to check for RDMA timeouts on a few more
+			/*
+			 * Time to check for RDMA timeouts on a few more
 			 * peers: I do checks every 'p' seconds on a
 			 * proportion of the peer table and I need to check
 			 * every connection 'n' times within a timeout
 			 * interval, to ensure I detect a timeout on any
 			 * connection within (n+1)/n times the timeout
-			 * interval. */
-
+			 * interval.
+			 */
 			if (*kiblnd_tunables.kib_timeout > n * p)
 				chunk = (chunk * n * p) /
 					*kiblnd_tunables.kib_timeout;
-			if (chunk == 0)
+			if (!chunk)
 				chunk = 1;
 
 			for (i = 0; i < chunk; i++) {
@@ -3206,12 +3238,14 @@
 		LBUG();
 
 	case IBLND_WID_RDMA:
-		/* We only get RDMA completion notification if it fails.  All
+		/*
+		 * We only get RDMA completion notification if it fails.  All
 		 * subsequent work items, including the final SEND will fail
 		 * too.  However we can't print out any more info about the
 		 * failing RDMA because 'tx' might be back on the idle list or
 		 * even reused already if we didn't manage to post all our work
-		 * items */
+		 * items
+		 */
 		CNETERR("RDMA (tx: %p) failed: %d\n",
 			kiblnd_wreqid2ptr(wc->wr_id), wc->status);
 		return;
@@ -3230,11 +3264,13 @@
 void
 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
 {
-	/* NB I'm not allowed to schedule this conn once its refcount has
+	/*
+	 * NB I'm not allowed to schedule this conn once its refcount has
 	 * reached 0.  Since fundamentally I'm racing with scheduler threads
 	 * consuming my CQ I could be called after all completions have
-	 * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
-	 * and this CQ is about to be destroyed so I NOOP. */
+	 * occurred.  But in this case, !ibc_nrx && !ibc_nsends_posted
+	 * and this CQ is about to be destroyed so I NOOP.
+	 */
 	kib_conn_t *conn = arg;
 	struct kib_sched_info *sched = conn->ibc_sched;
 	unsigned long flags;
@@ -3288,7 +3324,7 @@
 	sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
 
 	rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
-	if (rc != 0) {
+	if (rc) {
 		CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
 		      sched->ibs_cpt);
 	}
@@ -3308,8 +3344,8 @@
 		did_something = 0;
 
 		if (!list_empty(&sched->ibs_conns)) {
-			conn = list_entry(sched->ibs_conns.next,
-					      kib_conn_t, ibc_sched_list);
+			conn = list_entry(sched->ibs_conns.next, kib_conn_t,
+					  ibc_sched_list);
 			/* take over kib_sched_conns' ref on conn... */
 			LASSERT(conn->ibc_scheduled);
 			list_del(&conn->ibc_sched_list);
@@ -3318,7 +3354,7 @@
 			spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
 			rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
-			if (rc == 0) {
+			if (!rc) {
 				rc = ib_req_notify_cq(conn->ibc_cq,
 						      IB_CQ_NEXT_COMP);
 				if (rc < 0) {
@@ -3327,7 +3363,7 @@
 					kiblnd_close_conn(conn, -EIO);
 					kiblnd_conn_decref(conn);
 					spin_lock_irqsave(&sched->ibs_lock,
-							      flags);
+							  flags);
 					continue;
 				}
 
@@ -3346,21 +3382,23 @@
 
 			spin_lock_irqsave(&sched->ibs_lock, flags);
 
-			if (rc != 0 || conn->ibc_ready) {
-				/* There may be another completion waiting; get
+			if (rc || conn->ibc_ready) {
+				/*
+				 * There may be another completion waiting; get
 				 * another scheduler to check while I handle
-				 * this one... */
+				 * this one...
+				 */
 				/* +1 ref for sched_conns */
 				kiblnd_conn_addref(conn);
 				list_add_tail(&conn->ibc_sched_list,
-						  &sched->ibs_conns);
+					      &sched->ibs_conns);
 				if (waitqueue_active(&sched->ibs_waitq))
 					wake_up(&sched->ibs_waitq);
 			} else {
 				conn->ibc_scheduled = 0;
 			}
 
-			if (rc != 0) {
+			if (rc) {
 				spin_unlock_irqrestore(&sched->ibs_lock, flags);
 				kiblnd_complete(&wc);
 
@@ -3400,7 +3438,7 @@
 	unsigned long flags;
 	int rc;
 
-	LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
+	LASSERT(*kiblnd_tunables.kib_dev_failover);
 
 	cfs_block_allsigs();
 
@@ -3459,13 +3497,15 @@
 		remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
 		write_lock_irqsave(glock, flags);
 
-		if (!long_sleep || rc != 0)
+		if (!long_sleep || rc)
 			continue;
 
-		/* have a long sleep, routine check all active devices,
+		/*
+		 * have a long sleep, routine check all active devices,
 		 * we need checking like this because if there is not active
 		 * connection on the dev and no SEND from local, we may listen
-		 * on wrong HCA for ever while there is a bonding failover */
+		 * on wrong HCA for ever while there is a bonding failover
+		 */
 		list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
 			if (kiblnd_dev_can_failover(dev)) {
 				list_add_tail(&dev->ibd_fail_list,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 1d4e7ef..b4607da 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -52,8 +52,10 @@
 module_param(timeout, int, 0644);
 MODULE_PARM_DESC(timeout, "timeout (seconds)");
 
-/* Number of threads in each scheduler pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's set to zero. */
+/*
+ * Number of threads in each scheduler pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's set to zero.
+ */
 static int nscheds;
 module_param(nscheds, int, 0444);
 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
@@ -200,7 +202,7 @@
 	if (*kiblnd_tunables.kib_map_on_demand == 1)
 		*kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
 
-	if (*kiblnd_tunables.kib_concurrent_sends == 0) {
+	if (!*kiblnd_tunables.kib_concurrent_sends) {
 		if (*kiblnd_tunables.kib_map_on_demand > 0 &&
 		    *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
 			*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 05aa90e..854814c 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -70,7 +70,7 @@
 	ksock_route_t *route;
 
 	LIBCFS_ALLOC(route, sizeof(*route));
-	if (route == NULL)
+	if (!route)
 		return NULL;
 
 	atomic_set(&route->ksnr_refcount, 1);
@@ -91,9 +91,9 @@
 void
 ksocknal_destroy_route(ksock_route_t *route)
 {
-	LASSERT(atomic_read(&route->ksnr_refcount) == 0);
+	LASSERT(!atomic_read(&route->ksnr_refcount));
 
-	if (route->ksnr_peer != NULL)
+	if (route->ksnr_peer)
 		ksocknal_peer_decref(route->ksnr_peer);
 
 	LIBCFS_FREE(route, sizeof(*route));
@@ -110,7 +110,7 @@
 	LASSERT(!in_interrupt());
 
 	LIBCFS_ALLOC(peer, sizeof(*peer));
-	if (peer == NULL)
+	if (!peer)
 		return -ENOMEM;
 
 	peer->ksnp_ni = ni;
@@ -152,10 +152,10 @@
 	ksock_net_t *net = peer->ksnp_ni->ni_data;
 
 	CDEBUG(D_NET, "peer %s %p deleted\n",
-		libcfs_id2str(peer->ksnp_id), peer);
+	       libcfs_id2str(peer->ksnp_id), peer);
 
-	LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
-	LASSERT(peer->ksnp_accepting == 0);
+	LASSERT(!atomic_read(&peer->ksnp_refcount));
+	LASSERT(!peer->ksnp_accepting);
 	LASSERT(list_empty(&peer->ksnp_conns));
 	LASSERT(list_empty(&peer->ksnp_routes));
 	LASSERT(list_empty(&peer->ksnp_tx_queue));
@@ -163,10 +163,12 @@
 
 	LIBCFS_FREE(peer, sizeof(*peer));
 
-	/* NB a peer's connections and routes keep a reference on their peer
+	/*
+	 * NB a peer's connections and routes keep a reference on their peer
 	 * until they are destroyed, so we can be assured that _all_ state to
 	 * do with this peer has been cleaned up when its refcount drops to
-	 * zero. */
+	 * zero.
+	 */
 	spin_lock_bh(&net->ksnn_lock);
 	net->ksnn_npeers--;
 	spin_unlock_bh(&net->ksnn_lock);
@@ -180,7 +182,6 @@
 	ksock_peer_t *peer;
 
 	list_for_each(tmp, peer_list) {
-
 		peer = list_entry(tmp, ksock_peer_t, ksnp_list);
 
 		LASSERT(!peer->ksnp_closing);
@@ -207,7 +208,7 @@
 
 	read_lock(&ksocknal_data.ksnd_global_lock);
 	peer = ksocknal_find_peer_locked(ni, id);
-	if (peer != NULL)			/* +1 ref for caller? */
+	if (peer)			/* +1 ref for caller? */
 		ksocknal_peer_addref(peer);
 	read_unlock(&ksocknal_data.ksnd_global_lock);
 
@@ -226,9 +227,11 @@
 		ip = peer->ksnp_passive_ips[i];
 
 		iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
-		/* All IPs in peer->ksnp_passive_ips[] come from the
-		 * interface list, therefore the call must succeed. */
-		LASSERT(iface != NULL);
+		/*
+		 * All IPs in peer->ksnp_passive_ips[] come from the
+		 * interface list, therefore the call must succeed.
+		 */
+		LASSERT(iface);
 
 		CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
 		       peer, iface, iface->ksni_nroutes);
@@ -246,8 +249,8 @@
 
 static int
 ksocknal_get_peer_info(lnet_ni_t *ni, int index,
-			lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
-			int *port, int *conn_count, int *share_count)
+		       lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+		       int *port, int *conn_count, int *share_count)
 {
 	ksock_peer_t *peer;
 	struct list_head *ptmp;
@@ -260,14 +263,13 @@
 	read_lock(&ksocknal_data.ksnd_global_lock);
 
 	for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-
 		list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
 			peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 
 			if (peer->ksnp_ni != ni)
 				continue;
 
-			if (peer->ksnp_n_passive_ips == 0 &&
+			if (!peer->ksnp_n_passive_ips &&
 			    list_empty(&peer->ksnp_routes)) {
 				if (index-- > 0)
 					continue;
@@ -301,7 +303,7 @@
 					continue;
 
 				route = list_entry(rtmp, ksock_route_t,
-						       ksnr_list);
+						   ksnr_list);
 
 				*id = peer->ksnp_id;
 				*myip = route->ksnr_myipaddr;
@@ -330,7 +332,7 @@
 	ksocknal_route_addref(route);
 
 	if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
-		if (route->ksnr_myipaddr == 0) {
+		if (!route->ksnr_myipaddr) {
 			/* route wasn't bound locally yet (the initial route) */
 			CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
 			       libcfs_id2str(peer->ksnp_id),
@@ -345,21 +347,23 @@
 
 			iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 						  route->ksnr_myipaddr);
-			if (iface != NULL)
+			if (iface)
 				iface->ksni_nroutes--;
 		}
 		route->ksnr_myipaddr = conn->ksnc_myipaddr;
 		iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 					  route->ksnr_myipaddr);
-		if (iface != NULL)
+		if (iface)
 			iface->ksni_nroutes++;
 	}
 
-	route->ksnr_connected |= (1<<type);
+	route->ksnr_connected |= (1 << type);
 	route->ksnr_conn_count++;
 
-	/* Successful connection => further attempts can
-	 * proceed immediately */
+	/*
+	 * Successful connection => further attempts can
+	 * proceed immediately
+	 */
 	route->ksnr_retry_interval = 0;
 }
 
@@ -371,10 +375,10 @@
 	ksock_route_t *route2;
 
 	LASSERT(!peer->ksnp_closing);
-	LASSERT(route->ksnr_peer == NULL);
+	LASSERT(!route->ksnr_peer);
 	LASSERT(!route->ksnr_scheduled);
 	LASSERT(!route->ksnr_connecting);
-	LASSERT(route->ksnr_connected == 0);
+	LASSERT(!route->ksnr_connected);
 
 	/* LASSERT(unique) */
 	list_for_each(tmp, &peer->ksnp_routes) {
@@ -382,8 +386,8 @@
 
 		if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
 			CERROR("Duplicate route %s %pI4h\n",
-				libcfs_id2str(peer->ksnp_id),
-				&route->ksnr_ipaddr);
+			       libcfs_id2str(peer->ksnp_id),
+			       &route->ksnr_ipaddr);
 			LBUG();
 		}
 	}
@@ -425,10 +429,10 @@
 		ksocknal_close_conn_locked(conn, 0);
 	}
 
-	if (route->ksnr_myipaddr != 0) {
+	if (route->ksnr_myipaddr) {
 		iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 					  route->ksnr_myipaddr);
-		if (iface != NULL)
+		if (iface)
 			iface->ksni_nroutes--;
 	}
 
@@ -438,8 +442,10 @@
 
 	if (list_empty(&peer->ksnp_routes) &&
 	    list_empty(&peer->ksnp_conns)) {
-		/* I've just removed the last route to a peer with no active
-		 * connections */
+		/*
+		 * I've just removed the last route to a peer with no active
+		 * connections
+		 */
 		ksocknal_unlink_peer_locked(peer);
 	}
 }
@@ -460,11 +466,11 @@
 
 	/* Have a brand new peer ready... */
 	rc = ksocknal_create_peer(&peer, ni, id);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	route = ksocknal_create_route(ipaddr, port);
-	if (route == NULL) {
+	if (!route) {
 		ksocknal_peer_decref(peer);
 		return -ENOMEM;
 	}
@@ -472,16 +478,16 @@
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
 	/* always called with a ref on ni, so shutdown can't have started */
-	LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+	LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
 
 	peer2 = ksocknal_find_peer_locked(ni, id);
-	if (peer2 != NULL) {
+	if (peer2) {
 		ksocknal_peer_decref(peer);
 		peer = peer2;
 	} else {
 		/* peer table takes my ref on peer */
 		list_add_tail(&peer->ksnp_list,
-				   ksocknal_nid2peerlist(id.nid));
+			      ksocknal_nid2peerlist(id.nid));
 	}
 
 	route2 = NULL;
@@ -493,7 +499,7 @@
 
 		route2 = NULL;
 	}
-	if (route2 == NULL) {
+	if (!route2) {
 		ksocknal_add_route_locked(peer, route);
 		route->ksnr_share_count++;
 	} else {
@@ -524,7 +530,7 @@
 		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		/* no match */
-		if (!(ip == 0 || route->ksnr_ipaddr == ip))
+		if (!(!ip || route->ksnr_ipaddr == ip))
 			continue;
 
 		route->ksnr_share_count = 0;
@@ -538,15 +544,16 @@
 		nshared += route->ksnr_share_count;
 	}
 
-	if (nshared == 0) {
-		/* remove everything else if there are no explicit entries
-		 * left */
-
+	if (!nshared) {
+		/*
+		 * remove everything else if there are no explicit entries
+		 * left
+		 */
 		list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
 			route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 			/* we should only be removing auto-entries */
-			LASSERT(route->ksnr_share_count == 0);
+			LASSERT(!route->ksnr_share_count);
 			ksocknal_del_route_locked(route);
 		}
 
@@ -575,16 +582,16 @@
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-	if (id.nid != LNET_NID_ANY)
-		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
-	else {
+	if (id.nid != LNET_NID_ANY) {
+		lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+		hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+	} else {
 		lo = 0;
 		hi = ksocknal_data.ksnd_peer_hash_size - 1;
 	}
 
 	for (i = lo; i <= hi; i++) {
-		list_for_each_safe(ptmp, pnxt,
-					&ksocknal_data.ksnd_peers[i]) {
+		list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
 			peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 
 			if (peer->ksnp_ni != ni)
@@ -604,7 +611,7 @@
 				LASSERT(list_empty(&peer->ksnp_routes));
 
 				list_splice_init(&peer->ksnp_tx_queue,
-						     &zombies);
+						 &zombies);
 			}
 
 			ksocknal_peer_decref(peer);     /* ...till here */
@@ -645,7 +652,7 @@
 					continue;
 
 				conn = list_entry(ctmp, ksock_conn_t,
-						       ksnc_list);
+						  ksnc_list);
 				ksocknal_conn_addref(conn);
 				read_unlock(&ksocknal_data.ksnd_global_lock);
 				return conn;
@@ -692,8 +699,10 @@
 	nip = net->ksnn_ninterfaces;
 	LASSERT(nip <= LNET_MAX_INTERFACES);
 
-	/* Only offer interfaces for additional connections if I have
-	 * more than one. */
+	/*
+	 * Only offer interfaces for additional connections if I have
+	 * more than one.
+	 */
 	if (nip < 2) {
 		read_unlock(&ksocknal_data.ksnd_global_lock);
 		return 0;
@@ -701,7 +710,7 @@
 
 	for (i = 0; i < nip; i++) {
 		ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
-		LASSERT(ipaddrs[i] != 0);
+		LASSERT(ipaddrs[i]);
 	}
 
 	read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -719,11 +728,11 @@
 	int i;
 
 	for (i = 0; i < nips; i++) {
-		if (ips[i] == 0)
+		if (!ips[i])
 			continue;
 
 		this_xor = ips[i] ^ iface->ksni_ipaddr;
-		this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
+		this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
 
 		if (!(best < 0 ||
 		      best_netmatch < this_netmatch ||
@@ -757,33 +766,38 @@
 	int best_netmatch;
 	int best_npeers;
 
-	/* CAVEAT EMPTOR: We do all our interface matching with an
+	/*
+	 * CAVEAT EMPTOR: We do all our interface matching with an
 	 * exclusive hold of global lock at IRQ priority.  We're only
 	 * expecting to be dealing with small numbers of interfaces, so the
-	 * O(n**3)-ness shouldn't matter */
-
-	/* Also note that I'm not going to return more than n_peerips
-	 * interfaces, even if I have more myself */
-
+	 * O(n**3)-ness shouldn't matter
+	 */
+	/*
+	 * Also note that I'm not going to return more than n_peerips
+	 * interfaces, even if I have more myself
+	 */
 	write_lock_bh(global_lock);
 
 	LASSERT(n_peerips <= LNET_MAX_INTERFACES);
 	LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
 
-	/* Only match interfaces for additional connections
-	 * if I have > 1 interface */
+	/*
+	 * Only match interfaces for additional connections
+	 * if I have > 1 interface
+	 */
 	n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
 		min(n_peerips, net->ksnn_ninterfaces);
 
 	for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
 		/*	      ^ yes really... */
 
-		/* If we have any new interfaces, first tick off all the
+		/*
+		 * If we have any new interfaces, first tick off all the
 		 * peer IPs that match old interfaces, then choose new
 		 * interfaces to match the remaining peer IPS.
 		 * We don't forget interfaces we've stopped using; we might
-		 * start using them again... */
-
+		 * start using them again...
+		 */
 		if (i < peer->ksnp_n_passive_ips) {
 			/* Old interface. */
 			ip = peer->ksnp_passive_ips[i];
@@ -810,9 +824,9 @@
 
 				k = ksocknal_match_peerip(iface, peerips, n_peerips);
 				xor = ip ^ peerips[k];
-				this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
+				this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
 
-				if (!(best_iface == NULL ||
+				if (!(!best_iface ||
 				      best_netmatch < this_netmatch ||
 				      (best_netmatch == this_netmatch &&
 				       best_npeers > iface->ksni_npeers)))
@@ -826,7 +840,7 @@
 			best_iface->ksni_npeers++;
 			ip = best_iface->ksni_ipaddr;
 			peer->ksnp_passive_ips[i] = ip;
-			peer->ksnp_n_passive_ips = i+1;
+			peer->ksnp_n_passive_ips = i + 1;
 		}
 
 		/* mark the best matching peer IP used */
@@ -860,16 +874,19 @@
 	int i;
 	int j;
 
-	/* CAVEAT EMPTOR: We do all our interface matching with an
+	/*
+	 * CAVEAT EMPTOR: We do all our interface matching with an
 	 * exclusive hold of global lock at IRQ priority.  We're only
 	 * expecting to be dealing with small numbers of interfaces, so the
-	 * O(n**3)-ness here shouldn't matter */
-
+	 * O(n**3)-ness here shouldn't matter
+	 */
 	write_lock_bh(global_lock);
 
 	if (net->ksnn_ninterfaces < 2) {
-		/* Only create additional connections
-		 * if I have > 1 interface */
+		/*
+		 * Only create additional connections
+		 * if I have > 1 interface
+		 */
 		write_unlock_bh(global_lock);
 		return;
 	}
@@ -877,13 +894,13 @@
 	LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
 
 	for (i = 0; i < npeer_ipaddrs; i++) {
-		if (newroute != NULL) {
+		if (newroute) {
 			newroute->ksnr_ipaddr = peer_ipaddrs[i];
 		} else {
 			write_unlock_bh(global_lock);
 
 			newroute = ksocknal_create_route(peer_ipaddrs[i], port);
-			if (newroute == NULL)
+			if (!newroute)
 				return;
 
 			write_lock_bh(global_lock);
@@ -904,7 +921,7 @@
 
 			route = NULL;
 		}
-		if (route != NULL)
+		if (route)
 			continue;
 
 		best_iface = NULL;
@@ -920,21 +937,21 @@
 			/* Using this interface already? */
 			list_for_each(rtmp, &peer->ksnp_routes) {
 				route = list_entry(rtmp, ksock_route_t,
-						       ksnr_list);
+						   ksnr_list);
 
 				if (route->ksnr_myipaddr == iface->ksni_ipaddr)
 					break;
 
 				route = NULL;
 			}
-			if (route != NULL)
+			if (route)
 				continue;
 
-			this_netmatch = (((iface->ksni_ipaddr ^
+			this_netmatch = (!((iface->ksni_ipaddr ^
 					   newroute->ksnr_ipaddr) &
-					   iface->ksni_netmask) == 0) ? 1 : 0;
+					   iface->ksni_netmask)) ? 1 : 0;
 
-			if (!(best_iface == NULL ||
+			if (!(!best_iface ||
 			      best_netmatch < this_netmatch ||
 			      (best_netmatch == this_netmatch &&
 			       best_nroutes > iface->ksni_nroutes)))
@@ -945,7 +962,7 @@
 			best_nroutes = iface->ksni_nroutes;
 		}
 
-		if (best_iface == NULL)
+		if (!best_iface)
 			continue;
 
 		newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
@@ -956,7 +973,7 @@
 	}
 
 	write_unlock_bh(global_lock);
-	if (newroute != NULL)
+	if (newroute)
 		ksocknal_route_decref(newroute);
 }
 
@@ -969,10 +986,10 @@
 	int peer_port;
 
 	rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
-	LASSERT(rc == 0);		      /* we succeeded before */
+	LASSERT(!rc);		      /* we succeeded before */
 
 	LIBCFS_ALLOC(cr, sizeof(*cr));
-	if (cr == NULL) {
+	if (!cr) {
 		LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
 				   &peer_ip);
 		return -ENOMEM;
@@ -997,7 +1014,6 @@
 	ksock_route_t *route;
 
 	list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
-
 		if (route->ksnr_ipaddr == ipaddr)
 			return route->ksnr_connecting;
 	}
@@ -1006,7 +1022,7 @@
 
 int
 ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
-		      struct socket *sock, int type)
+		     struct socket *sock, int type)
 {
 	rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
 	LIST_HEAD(zombies);
@@ -1026,12 +1042,12 @@
 	int active;
 	char *warn = NULL;
 
-	active = (route != NULL);
+	active = !!route;
 
 	LASSERT(active == (type != SOCKLND_CONN_NONE));
 
 	LIBCFS_ALLOC(conn, sizeof(*conn));
-	if (conn == NULL) {
+	if (!conn) {
 		rc = -ENOMEM;
 		goto failed_0;
 	}
@@ -1039,8 +1055,10 @@
 	conn->ksnc_peer = NULL;
 	conn->ksnc_route = NULL;
 	conn->ksnc_sock = sock;
-	/* 2 ref, 1 for conn, another extra ref prevents socket
-	 * being closed before establishment of connection */
+	/*
+	 * 2 ref, 1 for conn, another extra ref prevents socket
+	 * being closed before establishment of connection
+	 */
 	atomic_set(&conn->ksnc_sock_refcount, 2);
 	conn->ksnc_type = type;
 	ksocknal_lib_save_callback(sock, conn);
@@ -1057,21 +1075,22 @@
 
 	LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
 				     kshm_ips[LNET_MAX_INTERFACES]));
-	if (hello == NULL) {
+	if (!hello) {
 		rc = -ENOMEM;
 		goto failed_1;
 	}
 
 	/* stash conn's local and remote addrs */
 	rc = ksocknal_lib_get_conn_addrs(conn);
-	if (rc != 0)
+	if (rc)
 		goto failed_1;
 
-	/* Find out/confirm peer's NID and connection type and get the
+	/*
+	 * Find out/confirm peer's NID and connection type and get the
 	 * vector of interfaces she's willing to let me connect to.
 	 * Passive connections use the listener timeout since the peer sends
-	 * eagerly */
-
+	 * eagerly
+	 */
 	if (active) {
 		peer = route->ksnr_peer;
 		LASSERT(ni == peer->ksnp_ni);
@@ -1084,7 +1103,7 @@
 		conn->ksnc_proto = peer->ksnp_proto;
 		write_unlock_bh(global_lock);
 
-		if (conn->ksnc_proto == NULL) {
+		if (!conn->ksnc_proto) {
 			 conn->ksnc_proto = &ksocknal_protocol_v3x;
 #if SOCKNAL_VERSION_DEBUG
 			 if (*ksocknal_tunables.ksnd_protocol == 2)
@@ -1095,7 +1114,7 @@
 		}
 
 		rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
-		if (rc != 0)
+		if (rc)
 			goto failed_1;
 	} else {
 		peerid.nid = LNET_NID_ANY;
@@ -1109,8 +1128,8 @@
 	if (rc < 0)
 		goto failed_1;
 
-	LASSERT(rc == 0 || active);
-	LASSERT(conn->ksnc_proto != NULL);
+	LASSERT(!rc || active);
+	LASSERT(conn->ksnc_proto);
 	LASSERT(peerid.nid != LNET_NID_ANY);
 
 	cpt = lnet_cpt_of_nid(peerid.nid);
@@ -1120,20 +1139,22 @@
 		write_lock_bh(global_lock);
 	} else {
 		rc = ksocknal_create_peer(&peer, ni, peerid);
-		if (rc != 0)
+		if (rc)
 			goto failed_1;
 
 		write_lock_bh(global_lock);
 
 		/* called with a ref on ni, so shutdown can't have started */
-		LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+		LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
 
 		peer2 = ksocknal_find_peer_locked(ni, peerid);
-		if (peer2 == NULL) {
-			/* NB this puts an "empty" peer in the peer
-			 * table (which takes my ref) */
+		if (!peer2) {
+			/*
+			 * NB this puts an "empty" peer in the peer
+			 * table (which takes my ref)
+			 */
 			list_add_tail(&peer->ksnp_list,
-					  ksocknal_nid2peerlist(peerid.nid));
+				      ksocknal_nid2peerlist(peerid.nid));
 		} else {
 			ksocknal_peer_decref(peer);
 			peer = peer2;
@@ -1143,8 +1164,10 @@
 		ksocknal_peer_addref(peer);
 		peer->ksnp_accepting++;
 
-		/* Am I already connecting to this guy?  Resolve in
-		 * favour of higher NID... */
+		/*
+		 * Am I already connecting to this guy?  Resolve in
+		 * favour of higher NID...
+		 */
 		if (peerid.nid < ni->ni_nid &&
 		    ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
 			rc = EALREADY;
@@ -1161,8 +1184,9 @@
 		goto failed_2;
 	}
 
-	if (peer->ksnp_proto == NULL) {
-		/* Never connected before.
+	if (!peer->ksnp_proto) {
+		/*
+		 * Never connected before.
 		 * NB recv_hello may have returned EPROTO to signal my peer
 		 * wants a different protocol than the one I asked for.
 		 */
@@ -1198,8 +1222,10 @@
 		goto failed_2;
 	}
 
-	/* Refuse to duplicate an existing connection, unless this is a
-	 * loopback connection */
+	/*
+	 * Refuse to duplicate an existing connection, unless this is a
+	 * loopback connection
+	 */
 	if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
 		list_for_each(tmp, &peer->ksnp_conns) {
 			conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
@@ -1209,9 +1235,11 @@
 			    conn2->ksnc_type != conn->ksnc_type)
 				continue;
 
-			/* Reply on a passive connection attempt so the peer
-			 * realises we're connected. */
-			LASSERT(rc == 0);
+			/*
+			 * Reply on a passive connection attempt so the peer
+			 * realises we're connected.
+			 */
+			LASSERT(!rc);
 			if (!active)
 				rc = EALREADY;
 
@@ -1220,9 +1248,11 @@
 		}
 	}
 
-	/* If the connection created by this route didn't bind to the IP
+	/*
+	 * If the connection created by this route didn't bind to the IP
 	 * address the route connected to, the connection/route matching
-	 * code below probably isn't going to work. */
+	 * code below probably isn't going to work.
+	 */
 	if (active &&
 	    route->ksnr_ipaddr != conn->ksnc_ipaddr) {
 		CERROR("Route %s %pI4h connected to %pI4h\n",
@@ -1231,10 +1261,12 @@
 		       &conn->ksnc_ipaddr);
 	}
 
-	/* Search for a route corresponding to the new connection and
+	/*
+	 * Search for a route corresponding to the new connection and
 	 * create an association.  This allows incoming connections created
 	 * by routes in my peer to match my own route entries so I don't
-	 * continually create duplicate routes. */
+	 * continually create duplicate routes.
+	 */
 	list_for_each(tmp, &peer->ksnp_routes) {
 		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
@@ -1278,14 +1310,14 @@
 
 	write_unlock_bh(global_lock);
 
-	/* We've now got a new connection.  Any errors from here on are just
+	/*
+	 * We've now got a new connection.  Any errors from here on are just
 	 * like "normal" comms errors and we close the connection normally.
 	 * NB (a) we still have to send the reply HELLO for passive
 	 *	connections,
 	 *    (b) normal I/O on the conn is blocked until I setup and call the
 	 *	socket callbacks.
 	 */
-
 	CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
 	       libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
 	       &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
@@ -1305,12 +1337,14 @@
 	LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
 				    kshm_ips[LNET_MAX_INTERFACES]));
 
-	/* setup the socket AFTER I've received hello (it disables
+	/*
+	 * setup the socket AFTER I've received hello (it disables
 	 * SO_LINGER).  I might call back to the acceptor who may want
 	 * to send a protocol version response and then close the
 	 * socket; this ensures the socket only tears down after the
-	 * response has been sent. */
-	if (rc == 0)
+	 * response has been sent.
+	 */
+	if (!rc)
 		rc = ksocknal_lib_setup_sock(sock);
 
 	write_lock_bh(global_lock);
@@ -1323,14 +1357,14 @@
 
 	write_unlock_bh(global_lock);
 
-	if (rc != 0) {
+	if (rc) {
 		write_lock_bh(global_lock);
 		if (!conn->ksnc_closing) {
 			/* could be closed by another thread */
 			ksocknal_close_conn_locked(conn, rc);
 		}
 		write_unlock_bh(global_lock);
-	} else if (ksocknal_connsock_addref(conn) == 0) {
+	} else if (!ksocknal_connsock_addref(conn)) {
 		/* Allow I/O to proceed. */
 		ksocknal_read_callback(conn);
 		ksocknal_write_callback(conn);
@@ -1352,19 +1386,21 @@
 
 	write_unlock_bh(global_lock);
 
-	if (warn != NULL) {
+	if (warn) {
 		if (rc < 0)
 			CERROR("Not creating conn %s type %d: %s\n",
 			       libcfs_id2str(peerid), conn->ksnc_type, warn);
 		else
 			CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
-			      libcfs_id2str(peerid), conn->ksnc_type, warn);
+			       libcfs_id2str(peerid), conn->ksnc_type, warn);
 	}
 
 	if (!active) {
 		if (rc > 0) {
-			/* Request retry by replying with CONN_NONE
-			 * ksnc_proto has been set already */
+			/*
+			 * Request retry by replying with CONN_NONE
+			 * ksnc_proto has been set already
+			 */
 			conn->ksnc_type = SOCKLND_CONN_NONE;
 			hello->kshm_nips = 0;
 			ksocknal_send_hello(ni, conn, peerid.nid, hello);
@@ -1379,7 +1415,7 @@
 	ksocknal_peer_decref(peer);
 
 failed_1:
-	if (hello != NULL)
+	if (hello)
 		LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
 					    kshm_ips[LNET_MAX_INTERFACES]));
 
@@ -1393,15 +1429,17 @@
 void
 ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
 {
-	/* This just does the immmediate housekeeping, and queues the
+	/*
+	 * This just does the immmediate housekeeping, and queues the
 	 * connection for the reaper to terminate.
-	 * Caller holds ksnd_global_lock exclusively in irq context */
+	 * Caller holds ksnd_global_lock exclusively in irq context
+	 */
 	ksock_peer_t *peer = conn->ksnc_peer;
 	ksock_route_t *route;
 	ksock_conn_t *conn2;
 	struct list_head *tmp;
 
-	LASSERT(peer->ksnp_error == 0);
+	LASSERT(!peer->ksnp_error);
 	LASSERT(!conn->ksnc_closing);
 	conn->ksnc_closing = 1;
 
@@ -1409,10 +1447,10 @@
 	list_del(&conn->ksnc_list);
 
 	route = conn->ksnc_route;
-	if (route != NULL) {
+	if (route) {
 		/* dissociate conn from route... */
 		LASSERT(!route->ksnr_deleted);
-		LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+		LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
 
 		conn2 = NULL;
 		list_for_each(tmp, &peer->ksnp_conns) {
@@ -1424,7 +1462,7 @@
 
 			conn2 = NULL;
 		}
-		if (conn2 == NULL)
+		if (!conn2)
 			route->ksnr_connected &= ~(1 << conn->ksnc_type);
 
 		conn->ksnc_route = NULL;
@@ -1445,15 +1483,17 @@
 
 			LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
 
-			/* throw them to the last connection...,
-			 * these TXs will be send to /dev/null by scheduler */
+			/*
+			 * throw them to the last connection...,
+			 * these TXs will be send to /dev/null by scheduler
+			 */
 			list_for_each_entry(tx, &peer->ksnp_tx_queue,
-						tx_list)
+					    tx_list)
 				ksocknal_tx_prep(conn, tx);
 
 			spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
 			list_splice_init(&peer->ksnp_tx_queue,
-					     &conn->ksnc_tx_queue);
+					 &conn->ksnc_tx_queue);
 			spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
 		}
 
@@ -1461,8 +1501,10 @@
 		peer->ksnp_error = error;       /* stash last conn close reason */
 
 		if (list_empty(&peer->ksnp_routes)) {
-			/* I've just closed last conn belonging to a
-			 * peer with no routes to it */
+			/*
+			 * I've just closed last conn belonging to a
+			 * peer with no routes to it
+			 */
 			ksocknal_unlink_peer_locked(peer);
 		}
 	}
@@ -1470,7 +1512,7 @@
 	spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 	list_add_tail(&conn->ksnc_list,
-			  &ksocknal_data.ksnd_deathrow_conns);
+		      &ksocknal_data.ksnd_deathrow_conns);
 	wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
 	spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -1482,16 +1524,17 @@
 	int notify = 0;
 	unsigned long last_alive = 0;
 
-	/* There has been a connection failure or comms error; but I'll only
+	/*
+	 * There has been a connection failure or comms error; but I'll only
 	 * tell LNET I think the peer is dead if it's to another kernel and
-	 * there are no connections or connection attempts in existence. */
-
+	 * there are no connections or connection attempts in existence.
+	 */
 	read_lock(&ksocknal_data.ksnd_global_lock);
 
-	if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+	if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
 	    list_empty(&peer->ksnp_conns) &&
-	    peer->ksnp_accepting == 0 &&
-	    ksocknal_find_connecting_route_locked(peer) == NULL) {
+	    !peer->ksnp_accepting &&
+	    !ksocknal_find_connecting_route_locked(peer)) {
 		notify = 1;
 		last_alive = peer->ksnp_last_alive;
 	}
@@ -1500,7 +1543,7 @@
 
 	if (notify)
 		lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
-			     last_alive);
+			    last_alive);
 }
 
 void
@@ -1511,9 +1554,11 @@
 	ksock_tx_t *tmp;
 	LIST_HEAD(zlist);
 
-	/* NB safe to finalize TXs because closing of socket will
-	 * abort all buffered data */
-	LASSERT(conn->ksnc_sock == NULL);
+	/*
+	 * NB safe to finalize TXs because closing of socket will
+	 * abort all buffered data
+	 */
+	LASSERT(!conn->ksnc_sock);
 
 	spin_lock(&peer->ksnp_lock);
 
@@ -1521,7 +1566,7 @@
 		if (tx->tx_conn != conn)
 			continue;
 
-		LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
+		LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
 
 		tx->tx_msg.ksm_zc_cookies[0] = 0;
 		tx->tx_zc_aborted = 1; /* mark it as not-acked */
@@ -1542,10 +1587,12 @@
 void
 ksocknal_terminate_conn(ksock_conn_t *conn)
 {
-	/* This gets called by the reaper (guaranteed thread context) to
+	/*
+	 * This gets called by the reaper (guaranteed thread context) to
 	 * disengage the socket from its callbacks and close it.
 	 * ksnc_refcount will eventually hit zero, and then the reaper will
-	 * destroy it. */
+	 * destroy it.
+	 */
 	ksock_peer_t *peer = conn->ksnc_peer;
 	ksock_sched_t *sched = conn->ksnc_scheduler;
 	int failed = 0;
@@ -1561,7 +1608,7 @@
 	if (!conn->ksnc_tx_scheduled &&
 	    !list_empty(&conn->ksnc_tx_queue)) {
 		list_add_tail(&conn->ksnc_tx_list,
-			       &sched->kss_tx_conns);
+			      &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
@@ -1576,11 +1623,13 @@
 
 	ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
 
-	/* OK, so this conn may not be completely disengaged from its
-	 * scheduler yet, but it _has_ committed to terminate... */
+	/*
+	 * OK, so this conn may not be completely disengaged from its
+	 * scheduler yet, but it _has_ committed to terminate...
+	 */
 	conn->ksnc_scheduler->kss_nconns--;
 
-	if (peer->ksnp_error != 0) {
+	if (peer->ksnp_error) {
 		/* peer's last conn closed in error */
 		LASSERT(list_empty(&peer->ksnp_conns));
 		failed = 1;
@@ -1592,11 +1641,13 @@
 	if (failed)
 		ksocknal_peer_failed(peer);
 
-	/* The socket is closed on the final put; either here, or in
+	/*
+	 * The socket is closed on the final put; either here, or in
 	 * ksocknal_{send,recv}msg().  Since we set up the linger2 option
 	 * when the connection was established, this will close the socket
 	 * immediately, aborting anything buffered in it. Any hung
-	 * zero-copy transmits will therefore complete in finite time. */
+	 * zero-copy transmits will therefore complete in finite time.
+	 */
 	ksocknal_connsock_decref(conn);
 }
 
@@ -1605,7 +1656,7 @@
 {
 	/* Queue the conn for the reaper to destroy */
 
-	LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
+	LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
 	spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 	list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
@@ -1622,10 +1673,10 @@
 	/* Final coup-de-grace of the reaper */
 	CDEBUG(D_NET, "connection %p\n", conn);
 
-	LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
-	LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
-	LASSERT(conn->ksnc_sock == NULL);
-	LASSERT(conn->ksnc_route == NULL);
+	LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
+	LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
+	LASSERT(!conn->ksnc_sock);
+	LASSERT(!conn->ksnc_route);
 	LASSERT(!conn->ksnc_tx_scheduled);
 	LASSERT(!conn->ksnc_rx_scheduled);
 	LASSERT(list_empty(&conn->ksnc_tx_queue));
@@ -1642,7 +1693,7 @@
 		       cfs_duration_sec(cfs_time_sub(cfs_time_current(),
 						     last_rcv)));
 		lnet_finalize(conn->ksnc_peer->ksnp_ni,
-			       conn->ksnc_cookie, -EIO);
+			      conn->ksnc_cookie, -EIO);
 		break;
 	case SOCKNAL_RX_LNET_HEADER:
 		if (conn->ksnc_rx_started)
@@ -1685,8 +1736,7 @@
 	list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
 		conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 
-		if (ipaddr == 0 ||
-		    conn->ksnc_ipaddr == ipaddr) {
+		if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
 			count++;
 			ksocknal_close_conn_locked(conn, why);
 		}
@@ -1724,17 +1774,17 @@
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-	if (id.nid != LNET_NID_ANY)
-		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
-	else {
+	if (id.nid != LNET_NID_ANY) {
+		lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+		hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+	} else {
 		lo = 0;
 		hi = ksocknal_data.ksnd_peer_hash_size - 1;
 	}
 
 	for (i = lo; i <= hi; i++) {
 		list_for_each_safe(ptmp, pnxt,
-					&ksocknal_data.ksnd_peers[i]) {
-
+				   &ksocknal_data.ksnd_peers[i]) {
 			peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 
 			if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
@@ -1748,10 +1798,10 @@
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
 	/* wildcards always succeed */
-	if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+	if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
 		return 0;
 
-	if (count == 0)
+	if (!count)
 		return -ENOENT;
 	else
 		return 0;
@@ -1760,15 +1810,17 @@
 void
 ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
 {
-	/* The router is telling me she's been notified of a change in
-	 * gateway state.... */
+	/*
+	 * The router is telling me she's been notified of a change in
+	 * gateway state....
+	 */
 	lnet_process_id_t id = {0};
 
 	id.nid = gw_nid;
 	id.pid = LNET_PID_ANY;
 
 	CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
-		alive ? "up" : "down");
+	       alive ? "up" : "down");
 
 	if (!alive) {
 		/* If the gateway crashed, close all open connections... */
@@ -1776,8 +1828,10 @@
 		return;
 	}
 
-	/* ...otherwise do nothing.  We can only establish new connections
-	 * if we have autroutes, and these connect on demand. */
+	/*
+	 * ...otherwise do nothing.  We can only establish new connections
+	 * if we have autroutes, and these connect on demand.
+	 */
 }
 
 void
@@ -1788,12 +1842,15 @@
 	unsigned long now = cfs_time_current();
 	ksock_peer_t *peer = NULL;
 	rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
-	lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
+	lnet_process_id_t id = {
+		.nid = nid,
+		.pid = LNET_PID_LUSTRE,
+	};
 
 	read_lock(glock);
 
 	peer = ksocknal_find_peer_locked(ni, id);
-	if (peer != NULL) {
+	if (peer) {
 		struct list_head *tmp;
 		ksock_conn_t *conn;
 		int bufnob;
@@ -1812,13 +1869,13 @@
 		}
 
 		last_alive = peer->ksnp_last_alive;
-		if (ksocknal_find_connectable_route_locked(peer) == NULL)
+		if (!ksocknal_find_connectable_route_locked(peer))
 			connect = 0;
 	}
 
 	read_unlock(glock);
 
-	if (last_alive != 0)
+	if (last_alive)
 		*when = last_alive;
 
 	CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
@@ -1834,7 +1891,7 @@
 	write_lock_bh(glock);
 
 	peer = ksocknal_find_peer_locked(ni, id);
-	if (peer != NULL)
+	if (peer)
 		ksocknal_launch_all_connections_locked(peer);
 
 	write_unlock_bh(glock);
@@ -1857,7 +1914,7 @@
 		list_for_each(tmp, &peer->ksnp_conns) {
 			if (i++ == index) {
 				conn = list_entry(tmp, ksock_conn_t,
-						       ksnc_list);
+						  ksnc_list);
 				ksocknal_conn_addref(conn);
 				break;
 			}
@@ -1865,7 +1922,7 @@
 
 		read_unlock(&ksocknal_data.ksnd_global_lock);
 
-		if (conn == NULL)
+		if (!conn)
 			break;
 
 		ksocknal_lib_push_conn(conn);
@@ -1885,7 +1942,8 @@
 		start = &ksocknal_data.ksnd_peers[0];
 		end = &ksocknal_data.ksnd_peers[hsize - 1];
 	} else {
-		start = end = ksocknal_nid2peerlist(id.nid);
+		start = ksocknal_nid2peerlist(id.nid);
+		end = ksocknal_nid2peerlist(id.nid);
 	}
 
 	for (tmp = start; tmp <= end; tmp++) {
@@ -1910,7 +1968,7 @@
 			}
 			read_unlock(&ksocknal_data.ksnd_global_lock);
 
-			if (i == 0) /* no match */
+			if (!i) /* no match */
 				break;
 
 			rc = 0;
@@ -1934,14 +1992,13 @@
 	struct list_head *rtmp;
 	ksock_route_t *route;
 
-	if (ipaddress == 0 ||
-	    netmask == 0)
+	if (!ipaddress || !netmask)
 		return -EINVAL;
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
 	iface = ksocknal_ip2iface(ni, ipaddress);
-	if (iface != NULL) {
+	if (iface) {
 		/* silently ignore dups */
 		rc = 0;
 	} else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
@@ -1957,16 +2014,15 @@
 		for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 			list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
 				peer = list_entry(ptmp, ksock_peer_t,
-						      ksnp_list);
+						  ksnp_list);
 
 				for (j = 0; j < peer->ksnp_n_passive_ips; j++)
 					if (peer->ksnp_passive_ips[j] == ipaddress)
 						iface->ksni_npeers++;
 
 				list_for_each(rtmp, &peer->ksnp_routes) {
-					route = list_entry(rtmp,
-							       ksock_route_t,
-							       ksnr_list);
+					route = list_entry(rtmp, ksock_route_t,
+							   ksnr_list);
 
 					if (route->ksnr_myipaddr == ipaddress)
 						iface->ksni_nroutes++;
@@ -1995,8 +2051,8 @@
 
 	for (i = 0; i < peer->ksnp_n_passive_ips; i++)
 		if (peer->ksnp_passive_ips[i] == ipaddr) {
-			for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
-				peer->ksnp_passive_ips[j-1] =
+			for (j = i + 1; j < peer->ksnp_n_passive_ips; j++)
+				peer->ksnp_passive_ips[j - 1] =
 					peer->ksnp_passive_ips[j];
 			peer->ksnp_n_passive_ips--;
 			break;
@@ -2008,7 +2064,7 @@
 		if (route->ksnr_myipaddr != ipaddr)
 			continue;
 
-		if (route->ksnr_share_count != 0) {
+		if (route->ksnr_share_count) {
 			/* Manually created; keep, but unbind */
 			route->ksnr_myipaddr = 0;
 		} else {
@@ -2041,23 +2097,21 @@
 	for (i = 0; i < net->ksnn_ninterfaces; i++) {
 		this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
 
-		if (!(ipaddress == 0 ||
-		      ipaddress == this_ip))
+		if (!(!ipaddress || ipaddress == this_ip))
 			continue;
 
 		rc = 0;
 
-		for (j = i+1; j < net->ksnn_ninterfaces; j++)
-			net->ksnn_interfaces[j-1] =
+		for (j = i + 1; j < net->ksnn_ninterfaces; j++)
+			net->ksnn_interfaces[j - 1] =
 				net->ksnn_interfaces[j];
 
 		net->ksnn_ninterfaces--;
 
 		for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
 			list_for_each_safe(tmp, nxt,
-					       &ksocknal_data.ksnd_peers[j]) {
-				peer = list_entry(tmp, ksock_peer_t,
-						      ksnp_list);
+					   &ksocknal_data.ksnd_peers[j]) {
+				peer = list_entry(tmp, ksock_peer_t, ksnp_list);
 
 				if (peer->ksnp_ni != ni)
 					continue;
@@ -2121,7 +2175,7 @@
 		rc = ksocknal_get_peer_info(ni, data->ioc_count,
 					    &id, &myip, &ip, &port,
 					    &conn_count,  &share_count);
-		if (rc != 0)
+		if (rc)
 			return rc;
 
 		data->ioc_nid    = id.nid;
@@ -2136,7 +2190,7 @@
 
 	case IOC_LIBCFS_ADD_PEER:
 		id.nid = data->ioc_nid;
-		id.pid = LUSTRE_SRV_LNET_PID;
+		id.pid = LNET_PID_LUSTRE;
 		return ksocknal_add_peer(ni, id,
 					  data->ioc_u32[0], /* IP */
 					  data->ioc_u32[1]); /* port */
@@ -2153,7 +2207,7 @@
 		int nagle;
 		ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
 
-		if (conn == NULL)
+		if (!conn)
 			return -ENOENT;
 
 		ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
@@ -2202,14 +2256,14 @@
 static void
 ksocknal_free_buffers(void)
 {
-	LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+	LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
 
-	if (ksocknal_data.ksnd_sched_info != NULL) {
+	if (ksocknal_data.ksnd_sched_info) {
 		struct ksock_sched_info *info;
 		int i;
 
 		cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-			if (info->ksi_scheds != NULL) {
+			if (info->ksi_scheds) {
 				LIBCFS_FREE(info->ksi_scheds,
 					    info->ksi_nthreads_max *
 					    sizeof(info->ksi_scheds[0]));
@@ -2219,8 +2273,8 @@
 	}
 
 	LIBCFS_FREE(ksocknal_data.ksnd_peers,
-		     sizeof(struct list_head) *
-		     ksocknal_data.ksnd_peer_hash_size);
+		    sizeof(struct list_head) *
+		    ksocknal_data.ksnd_peer_hash_size);
 
 	spin_lock(&ksocknal_data.ksnd_tx_lock);
 
@@ -2250,7 +2304,7 @@
 	int i;
 	int j;
 
-	LASSERT(ksocknal_data.ksnd_nnets == 0);
+	LASSERT(!ksocknal_data.ksnd_nnets);
 
 	switch (ksocknal_data.ksnd_init) {
 	default:
@@ -2258,7 +2312,7 @@
 
 	case SOCKNAL_INIT_ALL:
 	case SOCKNAL_INIT_DATA:
-		LASSERT(ksocknal_data.ksnd_peers != NULL);
+		LASSERT(ksocknal_data.ksnd_peers);
 		for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
 			LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
 
@@ -2268,14 +2322,13 @@
 		LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
 		LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
 
-		if (ksocknal_data.ksnd_sched_info != NULL) {
+		if (ksocknal_data.ksnd_sched_info) {
 			cfs_percpt_for_each(info, i,
 					    ksocknal_data.ksnd_sched_info) {
-				if (info->ksi_scheds == NULL)
+				if (!info->ksi_scheds)
 					continue;
 
 				for (j = 0; j < info->ksi_nthreads_max; j++) {
-
 					sched = &info->ksi_scheds[j];
 					LASSERT(list_empty(
 						&sched->kss_tx_conns));
@@ -2283,7 +2336,7 @@
 						&sched->kss_rx_conns));
 					LASSERT(list_empty(
 						&sched->kss_zombie_noop_txs));
-					LASSERT(sched->kss_nconns == 0);
+					LASSERT(!sched->kss_nconns);
 				}
 			}
 		}
@@ -2293,10 +2346,10 @@
 		wake_up_all(&ksocknal_data.ksnd_connd_waitq);
 		wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
 
-		if (ksocknal_data.ksnd_sched_info != NULL) {
+		if (ksocknal_data.ksnd_sched_info) {
 			cfs_percpt_for_each(info, i,
 					    ksocknal_data.ksnd_sched_info) {
-				if (info->ksi_scheds == NULL)
+				if (!info->ksi_scheds)
 					continue;
 
 				for (j = 0; j < info->ksi_nthreads_max; j++) {
@@ -2308,7 +2361,7 @@
 
 		i = 4;
 		read_lock(&ksocknal_data.ksnd_global_lock);
-		while (ksocknal_data.ksnd_nthreads != 0) {
+		while (ksocknal_data.ksnd_nthreads) {
 			i++;
 			CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
 			       "waiting for %d threads to terminate\n",
@@ -2332,7 +2385,6 @@
 static __u64
 ksocknal_new_incarnation(void)
 {
-
 	/* The incarnation number is the time this module loaded and it
 	 * identifies this particular instance of the socknal.
 	 */
@@ -2347,15 +2399,15 @@
 	int i;
 
 	LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
-	LASSERT(ksocknal_data.ksnd_nnets == 0);
+	LASSERT(!ksocknal_data.ksnd_nnets);
 
 	memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
 
 	ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
 	LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
-		      sizeof(struct list_head) *
-		      ksocknal_data.ksnd_peer_hash_size);
-	if (ksocknal_data.ksnd_peers == NULL)
+		     sizeof(struct list_head) *
+		     ksocknal_data.ksnd_peer_hash_size);
+	if (!ksocknal_data.ksnd_peers)
 		return -ENOMEM;
 
 	for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
@@ -2386,7 +2438,7 @@
 
 	ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
 							 sizeof(*info));
-	if (ksocknal_data.ksnd_sched_info == NULL)
+	if (!ksocknal_data.ksnd_sched_info)
 		goto failed;
 
 	cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
@@ -2397,8 +2449,10 @@
 		if (*ksocknal_tunables.ksnd_nscheds > 0) {
 			nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
 		} else {
-			/* max to half of CPUs, assume another half should be
-			 * reserved for upper layer modules */
+			/*
+			 * max to half of CPUs, assume another half should be
+			 * reserved for upper layer modules
+			 */
 			nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
 		}
 
@@ -2407,7 +2461,7 @@
 
 		LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
 				 info->ksi_nthreads_max * sizeof(*sched));
-		if (info->ksi_scheds == NULL)
+		if (!info->ksi_scheds)
 			goto failed;
 
 		for (; nthrs > 0; nthrs--) {
@@ -2425,8 +2479,10 @@
 	ksocknal_data.ksnd_connd_starting       = 0;
 	ksocknal_data.ksnd_connd_failed_stamp   = 0;
 	ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
-	/* must have at least 2 connds to remain responsive to accepts while
-	 * connecting */
+	/*
+	 * must have at least 2 connds to remain responsive to accepts while
+	 * connecting
+	 */
 	if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
 		*ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
 
@@ -2446,7 +2502,7 @@
 		snprintf(name, sizeof(name), "socknal_cd%02d", i);
 		rc = ksocknal_thread_start(ksocknal_connd,
 					   (void *)((ulong_ptr_t)i), name);
-		if (rc != 0) {
+		if (rc) {
 			spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 			ksocknal_data.ksnd_connd_starting--;
 			spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
@@ -2456,7 +2512,7 @@
 	}
 
 	rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't spawn socknal reaper: %d\n", rc);
 		goto failed;
 	}
@@ -2491,7 +2547,7 @@
 		}
 	}
 
-	if (peer != NULL) {
+	if (peer) {
 		ksock_route_t *route;
 		ksock_conn_t  *conn;
 
@@ -2515,9 +2571,9 @@
 		list_for_each(tmp, &peer->ksnp_conns) {
 			conn = list_entry(tmp, ksock_conn_t, ksnc_list);
 			CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
-			       atomic_read(&conn->ksnc_conn_refcount),
-			       atomic_read(&conn->ksnc_sock_refcount),
-			       conn->ksnc_type, conn->ksnc_closing);
+			      atomic_read(&conn->ksnc_conn_refcount),
+			      atomic_read(&conn->ksnc_sock_refcount),
+			      conn->ksnc_type, conn->ksnc_closing);
 		}
 	}
 
@@ -2548,7 +2604,7 @@
 	/* Wait for all peer state to clean up */
 	i = 2;
 	spin_lock_bh(&net->ksnn_lock);
-	while (net->ksnn_npeers != 0) {
+	while (net->ksnn_npeers) {
 		spin_unlock_bh(&net->ksnn_lock);
 
 		i++;
@@ -2565,15 +2621,15 @@
 	spin_unlock_bh(&net->ksnn_lock);
 
 	for (i = 0; i < net->ksnn_ninterfaces; i++) {
-		LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
-		LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
+		LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
+		LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
 	}
 
 	list_del(&net->ksnn_list);
 	LIBCFS_FREE(net, sizeof(*net));
 
 	ksocknal_data.ksnd_nnets--;
-	if (ksocknal_data.ksnd_nnets == 0)
+	if (!ksocknal_data.ksnd_nnets)
 		ksocknal_base_shutdown();
 }
 
@@ -2601,7 +2657,7 @@
 			continue;
 
 		rc = lnet_ipif_query(names[i], &up, &ip, &mask);
-		if (rc != 0) {
+		if (rc) {
 			CWARN("Can't get interface %s info: %d\n",
 			      names[i], rc);
 			continue;
@@ -2628,7 +2684,7 @@
 
 	lnet_ipif_free_enumeration(names, n);
 
-	if (j == 0)
+	if (!j)
 		CERROR("Can't find any usable interfaces\n");
 
 	return j;
@@ -2647,21 +2703,20 @@
 		ksock_net_t *tmp;
 		int j;
 
-		if (colon != NULL) /* ignore alias device */
+		if (colon) /* ignore alias device */
 			*colon = 0;
 
-		list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
-					ksnn_list) {
+		list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
 			for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
 				char *ifnam2 =
 					&tmp->ksnn_interfaces[j].ksni_name[0];
 				char *colon2 = strchr(ifnam2, ':');
 
-				if (colon2 != NULL)
+				if (colon2)
 					*colon2 = 0;
 
-				found = strcmp(ifnam, ifnam2) == 0;
-				if (colon2 != NULL)
+				found = !strcmp(ifnam, ifnam2);
+				if (colon2)
 					*colon2 = ':';
 			}
 			if (found)
@@ -2669,7 +2724,7 @@
 		}
 
 		new_ipif += !found;
-		if (colon != NULL)
+		if (colon)
 			*colon = ':';
 	}
 
@@ -2683,7 +2738,7 @@
 	int rc = 0;
 	int i;
 
-	if (info->ksi_nthreads == 0) {
+	if (!info->ksi_nthreads) {
 		if (*ksocknal_tunables.ksnd_nscheds > 0) {
 			nthrs = info->ksi_nthreads_max;
 		} else {
@@ -2711,7 +2766,7 @@
 
 		rc = ksocknal_thread_start(ksocknal_scheduler,
 					   (void *)id, name);
-		if (rc == 0)
+		if (!rc)
 			continue;
 
 		CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
@@ -2734,7 +2789,7 @@
 
 	for (i = 0; i < ncpts; i++) {
 		struct ksock_sched_info *info;
-		int cpt = (cpts == NULL) ? i : cpts[i];
+		int cpt = !cpts ? i : cpts[i];
 
 		LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
 		info = ksocknal_data.ksnd_sched_info[cpt];
@@ -2743,7 +2798,7 @@
 			continue;
 
 		rc = ksocknal_start_schedulers(info);
-		if (rc != 0)
+		if (rc)
 			return rc;
 	}
 	return 0;
@@ -2760,12 +2815,12 @@
 
 	if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
 		rc = ksocknal_base_startup();
-		if (rc != 0)
+		if (rc)
 			return rc;
 	}
 
 	LIBCFS_ALLOC(net, sizeof(*net));
-	if (net == NULL)
+	if (!net)
 		goto fail_0;
 
 	spin_lock_init(&net->ksnn_lock);
@@ -2776,7 +2831,7 @@
 	ni->ni_peertxcredits  = *ksocknal_tunables.ksnd_peertxcredits;
 	ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
 
-	if (ni->ni_interfaces[0] == NULL) {
+	if (!ni->ni_interfaces[0]) {
 		rc = ksocknal_enumerate_interfaces(net);
 		if (rc <= 0)
 			goto fail_1;
@@ -2786,14 +2841,14 @@
 		for (i = 0; i < LNET_MAX_INTERFACES; i++) {
 			int up;
 
-			if (ni->ni_interfaces[i] == NULL)
+			if (!ni->ni_interfaces[i])
 				break;
 
 			rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
-				&net->ksnn_interfaces[i].ksni_ipaddr,
-				&net->ksnn_interfaces[i].ksni_netmask);
+					     &net->ksnn_interfaces[i].ksni_ipaddr,
+					     &net->ksnn_interfaces[i].ksni_netmask);
 
-			if (rc != 0) {
+			if (rc) {
 				CERROR("Can't get interface %s info: %d\n",
 				       ni->ni_interfaces[i], rc);
 				goto fail_1;
@@ -2814,7 +2869,7 @@
 
 	/* call it before add it to ksocknal_data.ksnd_nets */
 	rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
-	if (rc != 0)
+	if (rc)
 		goto fail_1;
 
 	ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
@@ -2828,7 +2883,7 @@
  fail_1:
 	LIBCFS_FREE(net, sizeof(*net));
  fail_0:
-	if (ksocknal_data.ksnd_nnets == 0)
+	if (!ksocknal_data.ksnd_nnets)
 		ksocknal_base_shutdown();
 
 	return -ENETDOWN;
@@ -2861,7 +2916,7 @@
 	the_ksocklnd.lnd_accept   = ksocknal_accept;
 
 	rc = ksocknal_tunables_init();
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	lnet_register_lnd(&the_ksocklnd);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index f4fa725..a1765444 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -19,10 +19,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #ifndef _SOCKLND_SOCKLND_H_
@@ -69,8 +65,10 @@
 
 #define SOCKNAL_VERSION_DEBUG   0     /* enable protocol version debugging */
 
-/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
- * no risk if we're not running on a CONFIG_HIGHMEM platform. */
+/*
+ * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
+ * no risk if we're not running on a CONFIG_HIGHMEM platform.
+ */
 #ifdef CONFIG_HIGHMEM
 # define SOCKNAL_RISK_KMAP_DEADLOCK  0
 #else
@@ -237,15 +235,16 @@
 #define SOCKNAL_INIT_DATA    1
 #define SOCKNAL_INIT_ALL     2
 
-/* A packet just assembled for transmission is represented by 1 or more
+/*
+ * A packet just assembled for transmission is represented by 1 or more
  * struct iovec fragments (the first frag contains the portals header),
  * followed by 0 or more lnet_kiov_t fragments.
  *
  * On the receive side, initially 1 struct iovec fragment is posted for
  * receive (the header).  Once the header has been received, the payload is
  * received into either struct iovec or lnet_kiov_t fragments, depending on
- * what the header matched or whether the message needs forwarding. */
-
+ * what the header matched or whether the message needs forwarding.
+ */
 struct ksock_conn;  /* forward ref */
 struct ksock_peer;  /* forward ref */
 struct ksock_route; /* forward ref */
@@ -288,8 +287,10 @@
 
 /* network zero copy callback descriptor embedded in ksock_tx_t */
 
-/* space for the rx frag descriptors; we either read a single contiguous
- * header, or up to LNET_MAX_IOV frags of payload of either type. */
+/*
+ * space for the rx frag descriptors; we either read a single contiguous
+ * header, or up to LNET_MAX_IOV frags of payload of either type.
+ */
 typedef union {
 	struct kvec      iov[LNET_MAX_IOV];
 	lnet_kiov_t      kiov[LNET_MAX_IOV];
@@ -463,11 +464,13 @@
 	/* handle ZC ACK */
 	int        (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
 
-	/* msg type matches the connection type:
+	/*
+	 * msg type matches the connection type:
 	 * return value:
 	 *   return MATCH_NO  : no
 	 *   return MATCH_YES : matching type
-	 *   return MATCH_MAY : can be backup */
+	 *   return MATCH_MAY : can be backup
+	 */
 	int        (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
 } ksock_proto_t;
 
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 477b385..efb7169 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -19,9 +19,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include "socklnd.h"
@@ -47,10 +44,10 @@
 		spin_unlock(&ksocknal_data.ksnd_tx_lock);
 	}
 
-	if (tx == NULL)
+	if (!tx)
 		LIBCFS_ALLOC(tx, size);
 
-	if (tx == NULL)
+	if (!tx)
 		return NULL;
 
 	atomic_set(&tx->tx_refcount, 1);
@@ -70,7 +67,7 @@
 	ksock_tx_t *tx;
 
 	tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
-	if (tx == NULL) {
+	if (!tx) {
 		CERROR("Can't allocate noop tx desc\n");
 		return NULL;
 	}
@@ -90,11 +87,11 @@
 }
 
 void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(ksock_tx_t *tx)
 {
 	atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
-	if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+	if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
 		/* it's a noop tx */
 		spin_lock(&ksocknal_data.ksnd_tx_lock);
 
@@ -107,7 +104,7 @@
 }
 
 static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	struct kvec *iov = tx->tx_iov;
 	int nob;
@@ -122,7 +119,7 @@
 		return rc;
 
 	nob = rc;
-	LASSERT (nob <= tx->tx_resid);
+	LASSERT(nob <= tx->tx_resid);
 	tx->tx_resid -= nob;
 
 	/* "consume" iov */
@@ -138,19 +135,19 @@
 		nob -= iov->iov_len;
 		tx->tx_iov = ++iov;
 		tx->tx_niov--;
-	} while (nob != 0);
+	} while (nob);
 
 	return rc;
 }
 
 static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	lnet_kiov_t *kiov = tx->tx_kiov;
 	int nob;
 	int rc;
 
-	LASSERT(tx->tx_niov == 0);
+	LASSERT(!tx->tx_niov);
 	LASSERT(tx->tx_nkiov > 0);
 
 	/* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
@@ -160,7 +157,7 @@
 		return rc;
 
 	nob = rc;
-	LASSERT (nob <= tx->tx_resid);
+	LASSERT(nob <= tx->tx_resid);
 	tx->tx_resid -= nob;
 
 	/* "consume" kiov */
@@ -176,27 +173,27 @@
 		nob -= (int)kiov->kiov_len;
 		tx->tx_kiov = ++kiov;
 		tx->tx_nkiov--;
-	} while (nob != 0);
+	} while (nob);
 
 	return rc;
 }
 
 static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	int rc;
 	int bufnob;
 
-	if (ksocknal_data.ksnd_stall_tx != 0) {
+	if (ksocknal_data.ksnd_stall_tx) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
 	}
 
-	LASSERT(tx->tx_resid != 0);
+	LASSERT(tx->tx_resid);
 
 	rc = ksocknal_connsock_addref(conn);
-	if (rc != 0) {
-		LASSERT (conn->ksnc_closing);
+	if (rc) {
+		LASSERT(conn->ksnc_closing);
 		return -ESHUTDOWN;
 	}
 
@@ -205,10 +202,10 @@
 			/* testing... */
 			ksocknal_data.ksnd_enomem_tx--;
 			rc = -EAGAIN;
-		} else if (tx->tx_niov != 0) {
-			rc = ksocknal_send_iov (conn, tx);
+		} else if (tx->tx_niov) {
+			rc = ksocknal_send_iov(conn, tx);
 		} else {
-			rc = ksocknal_send_kiov (conn, tx);
+			rc = ksocknal_send_kiov(conn, tx);
 		}
 
 		bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
@@ -216,8 +213,10 @@
 			conn->ksnc_tx_bufnob += rc; /* account it */
 
 		if (bufnob < conn->ksnc_tx_bufnob) {
-			/* allocated send buffer bytes < computed; infer
-			 * something got ACKed */
+			/*
+			 * allocated send buffer bytes < computed; infer
+			 * something got ACKed
+			 */
 			conn->ksnc_tx_deadline =
 				cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 			conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
@@ -227,7 +226,7 @@
 
 		if (rc <= 0) { /* Didn't write anything? */
 
-			if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
+			if (!rc) /* some stacks return 0 instead of -EAGAIN */
 				rc = -EAGAIN;
 
 			/* Check if EAGAIN is due to memory pressure */
@@ -238,17 +237,17 @@
 		}
 
 		/* socket's wmem_queued now includes 'rc' bytes */
-		atomic_sub (rc, &conn->ksnc_tx_nob);
+		atomic_sub(rc, &conn->ksnc_tx_nob);
 		rc = 0;
 
-	} while (tx->tx_resid != 0);
+	} while (tx->tx_resid);
 
 	ksocknal_connsock_decref(conn);
 	return rc;
 }
 
 static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(ksock_conn_t *conn)
 {
 	struct kvec *iov = conn->ksnc_rx_iov;
 	int nob;
@@ -256,8 +255,10 @@
 
 	LASSERT(conn->ksnc_rx_niov > 0);
 
-	/* Never touch conn->ksnc_rx_iov or change connection
-	 * status inside ksocknal_lib_recv_iov */
+	/*
+	 * Never touch conn->ksnc_rx_iov or change connection
+	 * status inside ksocknal_lib_recv_iov
+	 */
 	rc = ksocknal_lib_recv_iov(conn);
 
 	if (rc <= 0)
@@ -287,13 +288,13 @@
 		nob -= iov->iov_len;
 		conn->ksnc_rx_iov = ++iov;
 		conn->ksnc_rx_niov--;
-	} while (nob != 0);
+	} while (nob);
 
 	return rc;
 }
 
 static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(ksock_conn_t *conn)
 {
 	lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
 	int nob;
@@ -301,8 +302,10 @@
 
 	LASSERT(conn->ksnc_rx_nkiov > 0);
 
-	/* Never touch conn->ksnc_rx_kiov or change connection
-	 * status inside ksocknal_lib_recv_iov */
+	/*
+	 * Never touch conn->ksnc_rx_kiov or change connection
+	 * status inside ksocknal_lib_recv_iov
+	 */
 	rc = ksocknal_lib_recv_kiov(conn);
 
 	if (rc <= 0)
@@ -332,41 +335,43 @@
 		nob -= kiov->kiov_len;
 		conn->ksnc_rx_kiov = ++kiov;
 		conn->ksnc_rx_nkiov--;
-	} while (nob != 0);
+	} while (nob);
 
 	return 1;
 }
 
 static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(ksock_conn_t *conn)
 {
-	/* Return 1 on success, 0 on EOF, < 0 on error.
+	/*
+	 * Return 1 on success, 0 on EOF, < 0 on error.
 	 * Caller checks ksnc_rx_nob_wanted to determine
-	 * progress/completion. */
+	 * progress/completion.
+	 */
 	int rc;
 
-	if (ksocknal_data.ksnd_stall_rx != 0) {
+	if (ksocknal_data.ksnd_stall_rx) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
 	}
 
 	rc = ksocknal_connsock_addref(conn);
-	if (rc != 0) {
-		LASSERT (conn->ksnc_closing);
+	if (rc) {
+		LASSERT(conn->ksnc_closing);
 		return -ESHUTDOWN;
 	}
 
 	for (;;) {
-		if (conn->ksnc_rx_niov != 0)
-			rc = ksocknal_recv_iov (conn);
+		if (conn->ksnc_rx_niov)
+			rc = ksocknal_recv_iov(conn);
 		else
-			rc = ksocknal_recv_kiov (conn);
+			rc = ksocknal_recv_kiov(conn);
 
 		if (rc <= 0) {
 			/* error/EOF or partial receive */
 			if (rc == -EAGAIN) {
 				rc = 1;
-			} else if (rc == 0 && conn->ksnc_rx_started) {
+			} else if (!rc && conn->ksnc_rx_started) {
 				/* EOF in the middle of a message */
 				rc = -EPROTO;
 			}
@@ -375,7 +380,7 @@
 
 		/* Completed a fragment */
 
-		if (conn->ksnc_rx_nob_wanted == 0) {
+		if (!conn->ksnc_rx_nob_wanted) {
 			rc = 1;
 			break;
 		}
@@ -386,36 +391,36 @@
 }
 
 void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
 {
 	lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
-	int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
+	int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
 
-	LASSERT(ni != NULL || tx->tx_conn != NULL);
+	LASSERT(ni || tx->tx_conn);
 
-	if (tx->tx_conn != NULL)
+	if (tx->tx_conn)
 		ksocknal_conn_decref(tx->tx_conn);
 
-	if (ni == NULL && tx->tx_conn != NULL)
+	if (!ni && tx->tx_conn)
 		ni = tx->tx_conn->ksnc_peer->ksnp_ni;
 
-	ksocknal_free_tx (tx);
-	if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
-		lnet_finalize (ni, lnetmsg, rc);
+	ksocknal_free_tx(tx);
+	if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
+		lnet_finalize(ni, lnetmsg, rc);
 }
 
 void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
 {
 	ksock_tx_t *tx;
 
-	while (!list_empty (txlist)) {
+	while (!list_empty(txlist)) {
 		tx = list_entry(txlist->next, ksock_tx_t, tx_list);
 
-		if (error && tx->tx_lnetmsg != NULL) {
+		if (error && tx->tx_lnetmsg) {
 			CNETERR("Deleting packet type %d len %d %s->%s\n",
-				le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
-				le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
+				le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
+				le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
 				libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
 				libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
 		} else if (error) {
@@ -435,12 +440,14 @@
 	ksock_conn_t *conn = tx->tx_conn;
 	ksock_peer_t *peer = conn->ksnc_peer;
 
-	/* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
+	/*
+	 * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
 	 * to ksnp_zc_req_list if some fragment of this message should be sent
 	 * zero-copy.  Our peer will send an ACK containing this cookie when
 	 * she has received this message to tell us we can signal completion.
 	 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
-	 * ksnp_zc_req_list. */
+	 * ksnp_zc_req_list.
+	 */
 	LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
 	LASSERT(tx->tx_zc_capable);
 
@@ -450,9 +457,10 @@
 	    !conn->ksnc_zc_capable)
 		return;
 
-	/* assign cookie and queue tx to pending list, it will be released when
-	 * a matching ack is received. See ksocknal_handle_zcack() */
-
+	/*
+	 * assign cookie and queue tx to pending list, it will be released when
+	 * a matching ack is received. See ksocknal_handle_zcack()
+	 */
 	ksocknal_tx_addref(tx);
 
 	spin_lock(&peer->ksnp_lock);
@@ -461,11 +469,11 @@
 	tx->tx_deadline =
 		cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
-	LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+	LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
 
 	tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
 
-	if (peer->ksnp_zc_next_cookie == 0)
+	if (!peer->ksnp_zc_next_cookie)
 		peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
 
 	list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
@@ -485,7 +493,7 @@
 
 	spin_lock(&peer->ksnp_lock);
 
-	if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+	if (!tx->tx_msg.ksm_zc_cookies[0]) {
 		/* Not waiting for an ACK */
 		spin_unlock(&peer->ksnp_lock);
 		return;
@@ -500,20 +508,20 @@
 }
 
 static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	int rc;
 
 	if (tx->tx_zc_capable && !tx->tx_zc_checked)
 		ksocknal_check_zc_req(tx);
 
-	rc = ksocknal_transmit (conn, tx);
+	rc = ksocknal_transmit(conn, tx);
 
 	CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
 
-	if (tx->tx_resid == 0) {
+	if (!tx->tx_resid) {
 		/* Sent everything OK */
-		LASSERT (rc == 0);
+		LASSERT(!rc);
 
 		return 0;
 	}
@@ -532,13 +540,13 @@
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 		/* enomem list takes over scheduler's ref... */
-		LASSERT (conn->ksnc_tx_scheduled);
+		LASSERT(conn->ksnc_tx_scheduled);
 		list_add_tail(&conn->ksnc_tx_list,
-				  &ksocknal_data.ksnd_enomem_conns);
+			      &ksocknal_data.ksnd_enomem_conns);
 		if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
 						   SOCKNAL_ENOMEM_RETRY),
 				   ksocknal_data.ksnd_reaper_waketime))
-			wake_up (&ksocknal_data.ksnd_reaper_waitq);
+			wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
 		spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 		return rc;
@@ -569,21 +577,19 @@
 		ksocknal_uncheck_zc_req(tx);
 
 	/* it's not an error if conn is being closed */
-	ksocknal_close_conn_and_siblings (conn,
-					  (conn->ksnc_closing) ? 0 : rc);
+	ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
 
 	return rc;
 }
 
 static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(ksock_route_t *route)
 {
-
 	/* called holding write lock on ksnd_global_lock */
 
 	LASSERT(!route->ksnr_scheduled);
 	LASSERT(!route->ksnr_connecting);
-	LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+	LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
 
 	route->ksnr_scheduled = 1;	      /* scheduling conn for connd */
 	ksocknal_route_addref(route);	   /* extra ref for connd */
@@ -591,14 +597,14 @@
 	spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 
 	list_add_tail(&route->ksnr_connd_list,
-			  &ksocknal_data.ksnd_connd_routes);
+		      &ksocknal_data.ksnd_connd_routes);
 	wake_up(&ksocknal_data.ksnd_connd_waitq);
 
 	spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 }
 
 void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
 {
 	ksock_route_t *route;
 
@@ -606,7 +612,7 @@
 	for (;;) {
 		/* launch any/all connections that need it */
 		route = ksocknal_find_connectable_route_locked(peer);
-		if (route == NULL)
+		if (!route)
 			return;
 
 		ksocknal_launch_connection_locked(route);
@@ -623,15 +629,15 @@
 	int tnob = 0;
 	int fnob = 0;
 
-	list_for_each (tmp, &peer->ksnp_conns) {
+	list_for_each(tmp, &peer->ksnp_conns) {
 		ksock_conn_t *c  = list_entry(tmp, ksock_conn_t, ksnc_list);
 		int nob = atomic_read(&c->ksnc_tx_nob) +
 			c->ksnc_sock->sk->sk_wmem_queued;
 		int rc;
 
 		LASSERT(!c->ksnc_closing);
-		LASSERT(c->ksnc_proto != NULL &&
-			c->ksnc_proto->pro_match_tx != NULL);
+		LASSERT(c->ksnc_proto &&
+			c->ksnc_proto->pro_match_tx);
 
 		rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
 
@@ -642,7 +648,7 @@
 			continue;
 
 		case SOCKNAL_MATCH_YES: /* typed connection */
-			if (typed == NULL || tnob > nob ||
+			if (!typed || tnob > nob ||
 			    (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
 			     cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
 				typed = c;
@@ -651,7 +657,7 @@
 			break;
 
 		case SOCKNAL_MATCH_MAY: /* fallback connection */
-			if (fallback == NULL || fnob > nob ||
+			if (!fallback || fnob > nob ||
 			    (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
 			     cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
 				fallback = c;
@@ -662,9 +668,9 @@
 	}
 
 	/* prefer the typed selection */
-	conn = (typed != NULL) ? typed : fallback;
+	conn = (typed) ? typed : fallback;
 
-	if (conn != NULL)
+	if (conn)
 		conn->ksnc_tx_last_post = cfs_time_current();
 
 	return conn;
@@ -675,48 +681,51 @@
 {
 	conn->ksnc_proto->pro_pack(tx);
 
-	atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+	atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
 	ksocknal_conn_addref(conn); /* +1 ref for tx */
 	tx->tx_conn = conn;
 }
 
 void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
 {
 	ksock_sched_t *sched = conn->ksnc_scheduler;
 	ksock_msg_t *msg = &tx->tx_msg;
 	ksock_tx_t *ztx = NULL;
 	int bufnob = 0;
 
-	/* called holding global lock (read or irq-write) and caller may
+	/*
+	 * called holding global lock (read or irq-write) and caller may
 	 * not have dropped this lock between finding conn and calling me,
 	 * so we don't need the {get,put}connsock dance to deref
-	 * ksnc_sock... */
+	 * ksnc_sock...
+	 */
 	LASSERT(!conn->ksnc_closing);
 
 	CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
-		libcfs_id2str(conn->ksnc_peer->ksnp_id),
-		&conn->ksnc_ipaddr,
-		conn->ksnc_port);
+	       libcfs_id2str(conn->ksnc_peer->ksnp_id),
+	       &conn->ksnc_ipaddr, conn->ksnc_port);
 
 	ksocknal_tx_prep(conn, tx);
 
-	/* Ensure the frags we've been given EXACTLY match the number of
+	/*
+	 * Ensure the frags we've been given EXACTLY match the number of
 	 * bytes we want to send.  Many TCP/IP stacks disregard any total
 	 * size parameters passed to them and just look at the frags.
 	 *
 	 * We always expect at least 1 mapped fragment containing the
-	 * complete ksocknal message header. */
-	LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+	 * complete ksocknal message header.
+	 */
+	LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
 		lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
 		(unsigned int)tx->tx_nob);
 	LASSERT(tx->tx_niov >= 1);
 	LASSERT(tx->tx_resid == tx->tx_nob);
 
-	CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
-		tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
-					       KSOCK_MSG_NOOP,
-		tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
+	CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+	       tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
+					      KSOCK_MSG_NOOP,
+	       tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
 
 	/*
 	 * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
@@ -725,7 +734,7 @@
 	bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
 	spin_lock_bh(&sched->kss_lock);
 
-	if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+	if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
 		/* First packet starts the timeout */
 		conn->ksnc_tx_deadline =
 			cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
@@ -736,26 +745,30 @@
 	}
 
 	if (msg->ksm_type == KSOCK_MSG_NOOP) {
-		/* The packet is noop ZC ACK, try to piggyback the ack_cookie
-		 * on a normal packet so I don't need to send it */
-		LASSERT(msg->ksm_zc_cookies[1] != 0);
-		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+		/*
+		 * The packet is noop ZC ACK, try to piggyback the ack_cookie
+		 * on a normal packet so I don't need to send it
+		 */
+		LASSERT(msg->ksm_zc_cookies[1]);
+		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
 
 		if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
 			ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
 
 	} else {
-		/* It's a normal packet - can it piggback a noop zc-ack that
-		 * has been queued already? */
-		LASSERT(msg->ksm_zc_cookies[1] == 0);
-		LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL);
+		/*
+		 * It's a normal packet - can it piggback a noop zc-ack that
+		 * has been queued already?
+		 */
+		LASSERT(!msg->ksm_zc_cookies[1]);
+		LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
 
 		ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
 		/* ztx will be released later */
 	}
 
-	if (ztx != NULL) {
-		atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+	if (ztx) {
+		atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
 		list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
 	}
 
@@ -763,24 +776,23 @@
 	    !conn->ksnc_tx_scheduled) { /* not scheduled to send */
 		/* +1 ref for scheduler */
 		ksocknal_conn_addref(conn);
-		list_add_tail (&conn->ksnc_tx_list,
-				   &sched->kss_tx_conns);
+		list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 
 	spin_unlock_bh(&sched->kss_lock);
 }
 
 ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
 {
 	unsigned long now = cfs_time_current();
 	struct list_head *tmp;
 	ksock_route_t *route;
 
-	list_for_each (tmp, &peer->ksnp_routes) {
-		route = list_entry (tmp, ksock_route_t, ksnr_list);
+	list_for_each(tmp, &peer->ksnp_routes) {
+		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -788,10 +800,10 @@
 			continue;
 
 		/* all route types connected ? */
-		if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
+		if (!(ksocknal_route_mask() & ~route->ksnr_connected))
 			continue;
 
-		if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+		if (!(!route->ksnr_retry_interval || /* first attempt */
 		      cfs_time_aftereq(now, route->ksnr_timeout))) {
 			CDEBUG(D_NET,
 			       "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
@@ -809,13 +821,13 @@
 }
 
 ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
 {
 	struct list_head *tmp;
 	ksock_route_t *route;
 
-	list_for_each (tmp, &peer->ksnp_routes) {
-		route = list_entry (tmp, ksock_route_t, ksnr_list);
+	list_for_each(tmp, &peer->ksnp_routes) {
+		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -827,7 +839,7 @@
 }
 
 int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 {
 	ksock_peer_t *peer;
 	ksock_conn_t *conn;
@@ -835,21 +847,23 @@
 	int retry;
 	int rc;
 
-	LASSERT(tx->tx_conn == NULL);
+	LASSERT(!tx->tx_conn);
 
 	g_lock = &ksocknal_data.ksnd_global_lock;
 
 	for (retry = 0;; retry = 1) {
 		read_lock(g_lock);
 		peer = ksocknal_find_peer_locked(ni, id);
-		if (peer != NULL) {
-			if (ksocknal_find_connectable_route_locked(peer) == NULL) {
+		if (peer) {
+			if (!ksocknal_find_connectable_route_locked(peer)) {
 				conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
-				if (conn != NULL) {
-					/* I've got no routes that need to be
+				if (conn) {
+					/*
+					 * I've got no routes that need to be
 					 * connecting and I do have an actual
-					 * connection... */
-					ksocknal_queue_tx_locked (tx, conn);
+					 * connection...
+					 */
+					ksocknal_queue_tx_locked(tx, conn);
 					read_unlock(g_lock);
 					return 0;
 				}
@@ -862,12 +876,12 @@
 		write_lock_bh(g_lock);
 
 		peer = ksocknal_find_peer_locked(ni, id);
-		if (peer != NULL)
+		if (peer)
 			break;
 
 		write_unlock_bh(g_lock);
 
-		if ((id.pid & LNET_PID_USERFLAG) != 0) {
+		if (id.pid & LNET_PID_USERFLAG) {
 			CERROR("Refusing to create a connection to userspace process %s\n",
 			       libcfs_id2str(id));
 			return -EHOSTUNREACH;
@@ -881,7 +895,7 @@
 		rc = ksocknal_add_peer(ni, id,
 				       LNET_NIDADDR(id.nid),
 				       lnet_acceptor_port());
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't add peer %s: %d\n",
 			       libcfs_id2str(id), rc);
 			return rc;
@@ -891,21 +905,21 @@
 	ksocknal_launch_all_connections_locked(peer);
 
 	conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
-	if (conn != NULL) {
+	if (conn) {
 		/* Connection exists; queue message on it */
-		ksocknal_queue_tx_locked (tx, conn);
+		ksocknal_queue_tx_locked(tx, conn);
 		write_unlock_bh(g_lock);
 		return 0;
 	}
 
 	if (peer->ksnp_accepting > 0 ||
-	    ksocknal_find_connecting_route_locked (peer) != NULL) {
+	    ksocknal_find_connecting_route_locked(peer)) {
 		/* the message is going to be pinned to the peer */
 		tx->tx_deadline =
 			cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
 		/* Queue the message until a connection is established */
-		list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+		list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
 		write_unlock_bh(g_lock);
 		return 0;
 	}
@@ -932,19 +946,20 @@
 	int desc_size;
 	int rc;
 
-	/* NB 'private' is different depending on what we're sending.
-	 * Just ignore it... */
-
+	/*
+	 * NB 'private' is different depending on what we're sending.
+	 * Just ignore it...
+	 */
 	CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
 	       payload_nob, payload_niov, libcfs_id2str(target));
 
-	LASSERT(payload_nob == 0 || payload_niov > 0);
+	LASSERT(!payload_nob || payload_niov > 0);
 	LASSERT(payload_niov <= LNET_MAX_IOV);
 	/* payload is either all vaddrs or all pages */
-	LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-	LASSERT (!in_interrupt ());
+	LASSERT(!(payload_kiov && payload_iov));
+	LASSERT(!in_interrupt());
 
-	if (payload_iov != NULL)
+	if (payload_iov)
 		desc_size = offsetof(ksock_tx_t,
 				     tx_frags.virt.iov[1 + payload_niov]);
 	else
@@ -954,7 +969,7 @@
 	if (lntmsg->msg_vmflush)
 		mpflag = cfs_memory_pressure_get_and_set();
 	tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
-	if (tx == NULL) {
+	if (!tx) {
 		CERROR("Can't allocate tx desc type %d size %d\n",
 		       type, desc_size);
 		if (lntmsg->msg_vmflush)
@@ -965,7 +980,7 @@
 	tx->tx_conn = NULL;		     /* set when assigned a conn */
 	tx->tx_lnetmsg = lntmsg;
 
-	if (payload_iov != NULL) {
+	if (payload_iov) {
 		tx->tx_kiov = NULL;
 		tx->tx_nkiov = 0;
 		tx->tx_iov = tx->tx_frags.virt.iov;
@@ -992,7 +1007,7 @@
 	if (!mpflag)
 		cfs_memory_pressure_restore(mpflag);
 
-	if (rc == 0)
+	if (!rc)
 		return 0;
 
 	ksocknal_free_tx(tx);
@@ -1014,7 +1029,7 @@
 }
 
 void
-ksocknal_thread_fini (void)
+ksocknal_thread_fini(void)
 {
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 	ksocknal_data.ksnd_nthreads--;
@@ -1022,7 +1037,7 @@
 }
 
 int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
 {
 	static char ksocknal_slop_buffer[4096];
 
@@ -1030,14 +1045,14 @@
 	unsigned int niov;
 	int skipped;
 
-	LASSERT(conn->ksnc_proto != NULL);
+	LASSERT(conn->ksnc_proto);
 
-	if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
+	if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
 		/* Remind the socket to ack eagerly... */
 		ksocknal_lib_eager_ack(conn);
 	}
 
-	if (nob_to_skip == 0) {	 /* right at next packet boundary now */
+	if (!nob_to_skip) {	 /* right at next packet boundary now */
 		conn->ksnc_rx_started = 0;
 		mb();		       /* racing with timeout thread */
 
@@ -1061,11 +1076,11 @@
 
 			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
 			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
-			conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+			conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
 			break;
 
 		default:
-			LBUG ();
+			LBUG();
 		}
 		conn->ksnc_rx_niov = 1;
 
@@ -1075,9 +1090,10 @@
 		return 1;
 	}
 
-	/* Set up to skip as much as possible now.  If there's more left
-	 * (ran out of iov entries) we'll get called again */
-
+	/*
+	 * Set up to skip as much as possible now.  If there's more left
+	 * (ran out of iov entries) we'll get called again
+	 */
 	conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
 	conn->ksnc_rx_nob_left = nob_to_skip;
 	conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
@@ -1093,8 +1109,8 @@
 		skipped += nob;
 		nob_to_skip -= nob;
 
-	} while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
-		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+	} while (nob_to_skip &&    /* mustn't overflow conn's rx iov */
+		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
 
 	conn->ksnc_rx_niov = niov;
 	conn->ksnc_rx_kiov = NULL;
@@ -1104,13 +1120,13 @@
 }
 
 static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(ksock_conn_t *conn)
 {
 	lnet_hdr_t *lhdr;
 	lnet_process_id_t *id;
 	int rc;
 
-	LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+	LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
 
 	/* NB: sched lock NOT held */
 	/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
@@ -1119,13 +1135,13 @@
 		conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
 		conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
  again:
-	if (conn->ksnc_rx_nob_wanted != 0) {
+	if (conn->ksnc_rx_nob_wanted) {
 		rc = ksocknal_receive(conn);
 
 		if (rc <= 0) {
-			LASSERT (rc != -EAGAIN);
+			LASSERT(rc != -EAGAIN);
 
-			if (rc == 0)
+			if (!rc)
 				CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
 				       conn,
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
@@ -1139,12 +1155,12 @@
 				       conn->ksnc_port);
 
 			/* it's not an error if conn is being closed */
-			ksocknal_close_conn_and_siblings (conn,
-							  (conn->ksnc_closing) ? 0 : rc);
-			return (rc == 0 ? -ESHUTDOWN : rc);
+			ksocknal_close_conn_and_siblings(conn,
+							 (conn->ksnc_closing) ? 0 : rc);
+			return (!rc ? -ESHUTDOWN : rc);
 		}
 
-		if (conn->ksnc_rx_nob_wanted != 0) {
+		if (conn->ksnc_rx_nob_wanted) {
 			/* short read */
 			return -EAGAIN;
 		}
@@ -1169,7 +1185,7 @@
 		}
 
 		if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
-		    conn->ksnc_msg.ksm_csum != 0 &&     /* has checksum */
+		    conn->ksnc_msg.ksm_csum &&     /* has checksum */
 		    conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
 			/* NOOP Checksum error */
 			CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
@@ -1180,10 +1196,10 @@
 			return -EIO;
 		}
 
-		if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
+		if (conn->ksnc_msg.ksm_zc_cookies[1]) {
 			__u64 cookie = 0;
 
-			LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+			LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
 
 			if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
 				cookie = conn->ksnc_msg.ksm_zc_cookies[0];
@@ -1191,7 +1207,7 @@
 			rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
 					       conn->ksnc_msg.ksm_zc_cookies[1]);
 
-			if (rc != 0) {
+			if (rc) {
 				CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 				       cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
@@ -1202,7 +1218,7 @@
 		}
 
 		if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
-			ksocknal_new_packet (conn, 0);
+			ksocknal_new_packet(conn, 0);
 			return 0;       /* NOOP is done and just return */
 		}
 
@@ -1224,7 +1240,7 @@
 		/* unpack message header */
 		conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
 
-		if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
+		if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
 			/* Userspace peer */
 			lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
 			id = &conn->ksnc_peer->ksnp_id;
@@ -1243,14 +1259,14 @@
 		if (rc < 0) {
 			/* I just received garbage: give up on this conn */
 			ksocknal_new_packet(conn, 0);
-			ksocknal_close_conn_and_siblings (conn, rc);
+			ksocknal_close_conn_and_siblings(conn, rc);
 			ksocknal_conn_decref(conn);
 			return -EPROTO;
 		}
 
 		/* I'm racing with ksocknal_recv() */
-		LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
-			 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
+		LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
+			conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
 
 		if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
 			return 0;
@@ -1262,8 +1278,8 @@
 		/* payload all received */
 		rc = 0;
 
-		if (conn->ksnc_rx_nob_left == 0 &&   /* not truncating */
-		    conn->ksnc_msg.ksm_csum != 0 &&  /* has checksum */
+		if (!conn->ksnc_rx_nob_left &&   /* not truncating */
+		    conn->ksnc_msg.ksm_csum &&  /* has checksum */
 		    conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
 			CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
@@ -1271,7 +1287,7 @@
 			rc = -EIO;
 		}
 
-		if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
+		if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
 			LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
 
 			lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
@@ -1285,16 +1301,16 @@
 
 		lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
 
-		if (rc != 0) {
+		if (rc) {
 			ksocknal_new_packet(conn, 0);
-			ksocknal_close_conn_and_siblings (conn, rc);
+			ksocknal_close_conn_and_siblings(conn, rc);
 			return -EPROTO;
 		}
 		/* Fall through */
 
 	case SOCKNAL_RX_SLOP:
 		/* starting new packet? */
-		if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+		if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
 			return 0;       /* come back later */
 		goto again;	     /* try to finish reading slop now */
 
@@ -1308,9 +1324,9 @@
 }
 
 int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
-	       unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
-	       unsigned int offset, unsigned int mlen, unsigned int rlen)
+ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+	      unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+	      unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
 	ksock_conn_t *conn = private;
 	ksock_sched_t *sched = conn->ksnc_scheduler;
@@ -1322,7 +1338,7 @@
 	conn->ksnc_rx_nob_wanted = mlen;
 	conn->ksnc_rx_nob_left = rlen;
 
-	if (mlen == 0 || iov != NULL) {
+	if (!mlen || iov) {
 		conn->ksnc_rx_nkiov = 0;
 		conn->ksnc_rx_kiov = NULL;
 		conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
@@ -1349,8 +1365,8 @@
 	switch (conn->ksnc_rx_state) {
 	case SOCKNAL_RX_PARSE_WAIT:
 		list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
-		wake_up (&sched->kss_waitq);
-		LASSERT (conn->ksnc_rx_ready);
+		wake_up(&sched->kss_waitq);
+		LASSERT(conn->ksnc_rx_ready);
 		break;
 
 	case SOCKNAL_RX_PARSE:
@@ -1396,7 +1412,7 @@
 	cfs_block_allsigs();
 
 	rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't set CPT affinity to %d: %d\n",
 		       info->ksi_cpt, rc);
 	}
@@ -1408,18 +1424,20 @@
 
 		/* Ensure I progress everything semi-fairly */
 
-		if (!list_empty (&sched->kss_rx_conns)) {
+		if (!list_empty(&sched->kss_rx_conns)) {
 			conn = list_entry(sched->kss_rx_conns.next,
-					      ksock_conn_t, ksnc_rx_list);
+					  ksock_conn_t, ksnc_rx_list);
 			list_del(&conn->ksnc_rx_list);
 
 			LASSERT(conn->ksnc_rx_scheduled);
 			LASSERT(conn->ksnc_rx_ready);
 
-			/* clear rx_ready in case receive isn't complete.
+			/*
+			 * clear rx_ready in case receive isn't complete.
 			 * Do it BEFORE we call process_recv, since
 			 * data_ready can set it any time after we release
-			 * kss_lock. */
+			 * kss_lock.
+			 */
 			conn->ksnc_rx_ready = 0;
 			spin_unlock_bh(&sched->kss_lock);
 
@@ -1431,18 +1449,20 @@
 			LASSERT(conn->ksnc_rx_scheduled);
 
 			/* Did process_receive get everything it wanted? */
-			if (rc == 0)
+			if (!rc)
 				conn->ksnc_rx_ready = 1;
 
 			if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
-				/* Conn blocked waiting for ksocknal_recv()
+				/*
+				 * Conn blocked waiting for ksocknal_recv()
 				 * I change its state (under lock) to signal
-				 * it can be rescheduled */
+				 * it can be rescheduled
+				 */
 				conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
 			} else if (conn->ksnc_rx_ready) {
 				/* reschedule for rx */
-				list_add_tail (&conn->ksnc_rx_list,
-						   &sched->kss_rx_conns);
+				list_add_tail(&conn->ksnc_rx_list,
+					      &sched->kss_rx_conns);
 			} else {
 				conn->ksnc_rx_scheduled = 0;
 				/* drop my ref */
@@ -1452,25 +1472,24 @@
 			did_something = 1;
 		}
 
-		if (!list_empty (&sched->kss_tx_conns)) {
+		if (!list_empty(&sched->kss_tx_conns)) {
 			LIST_HEAD(zlist);
 
 			if (!list_empty(&sched->kss_zombie_noop_txs)) {
-				list_add(&zlist,
-					     &sched->kss_zombie_noop_txs);
+				list_add(&zlist, &sched->kss_zombie_noop_txs);
 				list_del_init(&sched->kss_zombie_noop_txs);
 			}
 
 			conn = list_entry(sched->kss_tx_conns.next,
-					      ksock_conn_t, ksnc_tx_list);
-			list_del (&conn->ksnc_tx_list);
+					  ksock_conn_t, ksnc_tx_list);
+			list_del(&conn->ksnc_tx_list);
 
 			LASSERT(conn->ksnc_tx_scheduled);
 			LASSERT(conn->ksnc_tx_ready);
 			LASSERT(!list_empty(&conn->ksnc_tx_queue));
 
 			tx = list_entry(conn->ksnc_tx_queue.next,
-					    ksock_tx_t, tx_list);
+					ksock_tx_t, tx_list);
 
 			if (conn->ksnc_tx_carrier == tx)
 				ksocknal_next_tx_carrier(conn);
@@ -1478,16 +1497,20 @@
 			/* dequeue now so empty list => more to send */
 			list_del(&tx->tx_list);
 
-			/* Clear tx_ready in case send isn't complete.  Do
+			/*
+			 * Clear tx_ready in case send isn't complete.  Do
 			 * it BEFORE we call process_transmit, since
 			 * write_space can set it any time after we release
-			 * kss_lock. */
+			 * kss_lock.
+			 */
 			conn->ksnc_tx_ready = 0;
 			spin_unlock_bh(&sched->kss_lock);
 
 			if (!list_empty(&zlist)) {
-				/* free zombie noop txs, it's fast because
-				 * noop txs are just put in freelist */
+				/*
+				 * free zombie noop txs, it's fast because
+				 * noop txs are just put in freelist
+				 */
 				ksocknal_txlist_done(NULL, &zlist, 0);
 			}
 
@@ -1496,8 +1519,7 @@
 			if (rc == -ENOMEM || rc == -EAGAIN) {
 				/* Incomplete send: replace tx on HEAD of tx_queue */
 				spin_lock_bh(&sched->kss_lock);
-				list_add(&tx->tx_list,
-					     &conn->ksnc_tx_queue);
+				list_add(&tx->tx_list, &conn->ksnc_tx_queue);
 			} else {
 				/* Complete send; tx -ref */
 				ksocknal_tx_decref(tx);
@@ -1508,13 +1530,15 @@
 			}
 
 			if (rc == -ENOMEM) {
-				/* Do nothing; after a short timeout, this
-				 * conn will be reposted on kss_tx_conns. */
+				/*
+				 * Do nothing; after a short timeout, this
+				 * conn will be reposted on kss_tx_conns.
+				 */
 			} else if (conn->ksnc_tx_ready &&
 				   !list_empty(&conn->ksnc_tx_queue)) {
 				/* reschedule for tx */
 				list_add_tail(&conn->ksnc_tx_list,
-						   &sched->kss_tx_conns);
+					      &sched->kss_tx_conns);
 			} else {
 				conn->ksnc_tx_scheduled = 0;
 				/* drop my ref */
@@ -1533,7 +1557,7 @@
 				rc = wait_event_interruptible_exclusive(
 					sched->kss_waitq,
 					!ksocknal_sched_cansleep(sched));
-				LASSERT (rc == 0);
+				LASSERT(!rc);
 			} else {
 				cond_resched();
 			}
@@ -1551,7 +1575,7 @@
  * Add connection to kss_rx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(ksock_conn_t *conn)
 {
 	ksock_sched_t *sched;
 
@@ -1562,13 +1586,12 @@
 	conn->ksnc_rx_ready = 1;
 
 	if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
-		list_add_tail(&conn->ksnc_rx_list,
-				  &sched->kss_rx_conns);
+		list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
 		conn->ksnc_rx_scheduled = 1;
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
 
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 	spin_unlock_bh(&sched->kss_lock);
 }
@@ -1577,7 +1600,7 @@
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
 {
 	ksock_sched_t *sched;
 
@@ -1589,20 +1612,19 @@
 
 	if (!conn->ksnc_tx_scheduled && /* not being progressed */
 	    !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
-		list_add_tail (&conn->ksnc_tx_list,
-				   &sched->kss_tx_conns);
+		list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
 
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 
 	spin_unlock_bh(&sched->kss_lock);
 }
 
 static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
 {
 	__u32 version = 0;
 
@@ -1611,7 +1633,7 @@
 	else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
 		version = __swab32(hello->kshm_version);
 
-	if (version != 0) {
+	if (version) {
 #if SOCKNAL_VERSION_DEBUG
 		if (*ksocknal_tunables.ksnd_protocol == 1)
 			return NULL;
@@ -1632,11 +1654,11 @@
 	if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
 		lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
 
-		CLASSERT(sizeof (lnet_magicversion_t) ==
-			 offsetof (ksock_hello_msg_t, kshm_src_nid));
+		CLASSERT(sizeof(lnet_magicversion_t) ==
+			 offsetof(ksock_hello_msg_t, kshm_src_nid));
 
-		if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
-		    hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
+		if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
+		    hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
 			return &ksocknal_protocol_v1x;
 	}
 
@@ -1644,8 +1666,8 @@
 }
 
 int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-		     lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+		    lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
 {
 	/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
 	ksock_net_t *net = (ksock_net_t *)ni->ni_data;
@@ -1653,7 +1675,7 @@
 	LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
 
 	/* rely on caller to hold a ref on socket so it wouldn't disappear */
-	LASSERT(conn->ksnc_proto != NULL);
+	LASSERT(conn->ksnc_proto);
 
 	hello->kshm_src_nid = ni->ni_nid;
 	hello->kshm_dst_nid = peer_nid;
@@ -1682,9 +1704,9 @@
 }
 
 int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-		     ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
-		     __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+		    ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+		    __u64 *incarnation)
 {
 	/* Return < 0	fatal error
 	 *	0	  success
@@ -1692,7 +1714,7 @@
 	 *	EPROTO     protocol version mismatch
 	 */
 	struct socket *sock = conn->ksnc_sock;
-	int active = (conn->ksnc_proto != NULL);
+	int active = !!conn->ksnc_proto;
 	int timeout;
 	int proto_match;
 	int rc;
@@ -1705,20 +1727,20 @@
 	timeout = active ? *ksocknal_tunables.ksnd_timeout :
 			    lnet_acceptor_timeout();
 
-	rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
-	if (rc != 0) {
+	rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+	if (rc) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
-			rc, &conn->ksnc_ipaddr);
-		LASSERT (rc < 0);
+		       rc, &conn->ksnc_ipaddr);
+		LASSERT(rc < 0);
 		return rc;
 	}
 
 	if (hello->kshm_magic != LNET_PROTO_MAGIC &&
 	    hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
-	    hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+	    hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
 		/* Unexpected magic! */
 		CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
-		       __cpu_to_le32 (hello->kshm_magic),
+		       __cpu_to_le32(hello->kshm_magic),
 		       LNET_PROTO_TCP_MAGIC,
 		       &conn->ksnc_ipaddr);
 		return -EPROTO;
@@ -1726,15 +1748,15 @@
 
 	rc = lnet_sock_read(sock, &hello->kshm_version,
 			    sizeof(hello->kshm_version), timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
-			rc, &conn->ksnc_ipaddr);
+		       rc, &conn->ksnc_ipaddr);
 		LASSERT(rc < 0);
 		return rc;
 	}
 
 	proto = ksocknal_parse_proto_version(hello);
-	if (proto == NULL) {
+	if (!proto) {
 		if (!active) {
 			/* unknown protocol from peer, tell peer my protocol */
 			conn->ksnc_proto = &ksocknal_protocol_v3x;
@@ -1760,7 +1782,7 @@
 
 	/* receive the rest of hello message anyway */
 	rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading or checking hello from from %pI4h\n",
 		       rc, &conn->ksnc_ipaddr);
 		LASSERT(rc < 0);
@@ -1792,8 +1814,8 @@
 		conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
 		if (conn->ksnc_type == SOCKLND_CONN_NONE) {
 			CERROR("Unexpected type %d from %s ip %pI4h\n",
-				hello->kshm_ctype, libcfs_id2str(*peerid),
-				&conn->ksnc_ipaddr);
+			       hello->kshm_ctype, libcfs_id2str(*peerid),
+			       &conn->ksnc_ipaddr);
 			return -EPROTO;
 		}
 
@@ -1816,9 +1838,8 @@
 
 	if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
 		CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
-			conn->ksnc_type, libcfs_id2str(*peerid),
-			&conn->ksnc_ipaddr,
-			hello->kshm_ctype);
+		       conn->ksnc_type, libcfs_id2str(*peerid),
+		       &conn->ksnc_ipaddr, hello->kshm_ctype);
 		return -EPROTO;
 	}
 
@@ -1826,7 +1847,7 @@
 }
 
 static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(ksock_route_t *route)
 {
 	LIST_HEAD(zombies);
 	ksock_peer_t *peer = route->ksnr_peer;
@@ -1850,10 +1871,12 @@
 	for (;;) {
 		wanted = ksocknal_route_mask() & ~route->ksnr_connected;
 
-		/* stop connecting if peer/route got closed under me, or
-		 * route got connected while queued */
+		/*
+		 * stop connecting if peer/route got closed under me, or
+		 * route got connected while queued
+		 */
 		if (peer->ksnp_closing || route->ksnr_deleted ||
-		    wanted == 0) {
+		    !wanted) {
 			retry_later = 0;
 			break;
 		}
@@ -1869,14 +1892,14 @@
 		if (retry_later) /* needs reschedule */
 			break;
 
-		if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+		if (wanted & (1 << SOCKLND_CONN_ANY)) {
 			type = SOCKLND_CONN_ANY;
-		} else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+		} else if (wanted & (1 << SOCKLND_CONN_CONTROL)) {
 			type = SOCKLND_CONN_CONTROL;
-		} else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+		} else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) {
 			type = SOCKLND_CONN_BULK_IN;
 		} else {
-			LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+			LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT));
 			type = SOCKLND_CONN_BULK_OUT;
 		}
 
@@ -1893,7 +1916,7 @@
 		rc = lnet_connect(&sock, peer->ksnp_id.nid,
 				  route->ksnr_myipaddr,
 				  route->ksnr_ipaddr, route->ksnr_port);
-		if (rc != 0)
+		if (rc)
 			goto failed;
 
 		rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
@@ -1904,9 +1927,11 @@
 			goto failed;
 		}
 
-		/* A +ve RC means I have to retry because I lost the connection
-		 * race or I have to renegotiate protocol version */
-		retry_later = (rc != 0);
+		/*
+		 * A +ve RC means I have to retry because I lost the connection
+		 * race or I have to renegotiate protocol version
+		 */
+		retry_later = (rc);
 		if (retry_later)
 			CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
 			       libcfs_nid2str(peer->ksnp_id.nid));
@@ -1918,17 +1943,20 @@
 	route->ksnr_connecting = 0;
 
 	if (retry_later) {
-		/* re-queue for attention; this frees me up to handle
-		 * the peer's incoming connection request */
-
+		/*
+		 * re-queue for attention; this frees me up to handle
+		 * the peer's incoming connection request
+		 */
 		if (rc == EALREADY ||
-		    (rc == 0 && peer->ksnp_accepting > 0)) {
-			/* We want to introduce a delay before next
+		    (!rc && peer->ksnp_accepting > 0)) {
+			/*
+			 * We want to introduce a delay before next
 			 * attempt to connect if we lost conn race,
 			 * but the race is resolved quickly usually,
-			 * so min_reconnectms should be good heuristic */
+			 * so min_reconnectms should be good heuristic
+			 */
 			route->ksnr_retry_interval =
-				cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+				cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
 			route->ksnr_timeout = cfs_time_add(cfs_time_current(),
 							   route->ksnr_retry_interval);
 		}
@@ -1949,30 +1977,34 @@
 	route->ksnr_retry_interval *= 2;
 	route->ksnr_retry_interval =
 		max(route->ksnr_retry_interval,
-		    cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+		    cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
 	route->ksnr_retry_interval =
 		min(route->ksnr_retry_interval,
-		    cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+		    cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
 
-	LASSERT (route->ksnr_retry_interval != 0);
+	LASSERT(route->ksnr_retry_interval);
 	route->ksnr_timeout = cfs_time_add(cfs_time_current(),
 					   route->ksnr_retry_interval);
 
 	if (!list_empty(&peer->ksnp_tx_queue) &&
-	    peer->ksnp_accepting == 0 &&
-	    ksocknal_find_connecting_route_locked(peer) == NULL) {
+	    !peer->ksnp_accepting &&
+	    !ksocknal_find_connecting_route_locked(peer)) {
 		ksock_conn_t *conn;
 
-		/* ksnp_tx_queue is queued on a conn on successful
-		 * connection for V1.x and V2.x */
-		if (!list_empty (&peer->ksnp_conns)) {
+		/*
+		 * ksnp_tx_queue is queued on a conn on successful
+		 * connection for V1.x and V2.x
+		 */
+		if (!list_empty(&peer->ksnp_conns)) {
 			conn = list_entry(peer->ksnp_conns.next,
-					      ksock_conn_t, ksnc_list);
-			LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+					  ksock_conn_t, ksnc_list);
+			LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
 		}
 
-		/* take all the blocked packets while I've got the lock and
-		 * complete below... */
+		/*
+		 * take all the blocked packets while I've got the lock and
+		 * complete below...
+		 */
 		list_splice_init(&peer->ksnp_tx_queue, &zombies);
 	}
 
@@ -2011,8 +2043,10 @@
 
 	if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
 	    total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
-		/* can't create more connd, or still have enough
-		 * threads to handle more connecting */
+		/*
+		 * can't create more connd, or still have enough
+		 * threads to handle more connecting
+		 */
 		return 0;
 	}
 
@@ -2041,7 +2075,7 @@
 	rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
 
 	spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-	if (rc == 0)
+	if (!rc)
 		return 1;
 
 	/* we tried ... */
@@ -2093,8 +2127,10 @@
 	       ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
 }
 
-/* Go through connd_routes queue looking for a route that we can process
- * right now, @timeout_p can be updated if we need to come back later */
+/*
+ * Go through connd_routes queue looking for a route that we can process
+ * right now, @timeout_p can be updated if we need to come back later
+ */
 static ksock_route_t *
 ksocknal_connd_get_route_locked(signed long *timeout_p)
 {
@@ -2104,10 +2140,9 @@
 	now = cfs_time_current();
 
 	/* connd_routes can contain both pending and ordinary routes */
-	list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
-				 ksnr_connd_list) {
-
-		if (route->ksnr_retry_interval == 0 ||
+	list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
+			    ksnr_connd_list) {
+		if (!route->ksnr_retry_interval ||
 		    cfs_time_aftereq(now, route->ksnr_timeout))
 			return route;
 
@@ -2120,7 +2155,7 @@
 }
 
 int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
 {
 	spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
 	ksock_connreq_t *cr;
@@ -2172,15 +2207,17 @@
 			spin_lock_bh(connd_lock);
 		}
 
-		/* Only handle an outgoing connection request if there
+		/*
+		 * Only handle an outgoing connection request if there
 		 * is a thread left to handle incoming connections and
-		 * create new connd */
+		 * create new connd
+		 */
 		if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
 		    ksocknal_data.ksnd_connd_running) {
 			route = ksocknal_connd_get_route_locked(&timeout);
 		}
-		if (route != NULL) {
-			list_del (&route->ksnr_connd_list);
+		if (route) {
+			list_del(&route->ksnr_connd_list);
 			ksocknal_data.ksnd_connd_connecting++;
 			spin_unlock_bh(connd_lock);
 			dropped_lock = 1;
@@ -2231,24 +2268,26 @@
 }
 
 static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn(ksock_peer_t *peer)
 {
 	/* We're called with a shared lock on ksnd_global_lock */
 	ksock_conn_t *conn;
 	struct list_head *ctmp;
 
-	list_for_each (ctmp, &peer->ksnp_conns) {
+	list_for_each(ctmp, &peer->ksnp_conns) {
 		int error;
 
-		conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+		conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 
 		/* Don't need the {get,put}connsock dance to deref ksnc_sock */
 		LASSERT(!conn->ksnc_closing);
 
-		/* SOCK_ERROR will reset error code of socket in
-		 * some platform (like Darwin8.x) */
+		/*
+		 * SOCK_ERROR will reset error code of socket in
+		 * some platform (like Darwin8.x)
+		 */
 		error = conn->ksnc_sock->sk->sk_err;
-		if (error != 0) {
+		if (error) {
 			ksocknal_conn_addref(conn);
 
 			switch (error) {
@@ -2292,11 +2331,13 @@
 		}
 
 		if ((!list_empty(&conn->ksnc_tx_queue) ||
-		     conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
+		     conn->ksnc_sock->sk->sk_wmem_queued) &&
 		    cfs_time_aftereq(cfs_time_current(),
 				     conn->ksnc_tx_deadline)) {
-			/* Timed out messages queued for sending or
-			 * buffered in the socket's send buffer */
+			/*
+			 * Timed out messages queued for sending or
+			 * buffered in the socket's send buffer
+			 */
 			ksocknal_conn_addref(conn);
 			CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
 				libcfs_id2str(peer->ksnp_id),
@@ -2317,16 +2358,15 @@
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-	while (!list_empty (&peer->ksnp_tx_queue)) {
-		tx = list_entry (peer->ksnp_tx_queue.next,
-				     ksock_tx_t, tx_list);
+	while (!list_empty(&peer->ksnp_tx_queue)) {
+		tx = list_entry(peer->ksnp_tx_queue.next, ksock_tx_t, tx_list);
 
 		if (!cfs_time_aftereq(cfs_time_current(),
 				      tx->tx_deadline))
 			break;
 
-		list_del (&tx->tx_list);
-		list_add_tail (&tx->tx_list, &stale_txs);
+		list_del(&tx->tx_list);
+		list_add_tail(&tx->tx_list, &stale_txs);
 	}
 
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2336,6 +2376,7 @@
 
 static int
 ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+	__must_hold(&ksocknal_data.ksnd_global_lock)
 {
 	ksock_sched_t *sched;
 	ksock_conn_t *conn;
@@ -2356,12 +2397,14 @@
 	if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
 		return 0;
 
-	/* retry 10 secs later, so we wouldn't put pressure
-	 * on this peer if we failed to send keepalive this time */
+	/*
+	 * retry 10 secs later, so we wouldn't put pressure
+	 * on this peer if we failed to send keepalive this time
+	 */
 	peer->ksnp_send_keepalive = cfs_time_shift(10);
 
 	conn = ksocknal_find_conn_locked(peer, NULL, 1);
-	if (conn != NULL) {
+	if (conn) {
 		sched = conn->ksnc_scheduler;
 
 		spin_lock_bh(&sched->kss_lock);
@@ -2378,12 +2421,12 @@
 
 	/* cookie = 1 is reserved for keepalive PING */
 	tx = ksocknal_alloc_tx_noop(1, 1);
-	if (tx == NULL) {
+	if (!tx) {
 		read_lock(&ksocknal_data.ksnd_global_lock);
 		return -ENOMEM;
 	}
 
-	if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+	if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
 		read_lock(&ksocknal_data.ksnd_global_lock);
 		return 1;
 	}
@@ -2395,7 +2438,7 @@
 }
 
 static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
 {
 	struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
 	ksock_peer_t *peer;
@@ -2403,9 +2446,11 @@
 	ksock_tx_t *tx;
 
  again:
-	/* NB. We expect to have a look at all the peers and not find any
+	/*
+	 * NB. We expect to have a look at all the peers and not find any
 	 * connections to time out, so we just use a shared lock while we
-	 * take a look... */
+	 * take a look...
+	 */
 	read_lock(&ksocknal_data.ksnd_global_lock);
 
 	list_for_each_entry(peer, peers, ksnp_list) {
@@ -2413,35 +2458,37 @@
 		int resid = 0;
 		int n = 0;
 
-		if (ksocknal_send_keepalive_locked(peer) != 0) {
+		if (ksocknal_send_keepalive_locked(peer)) {
 			read_unlock(&ksocknal_data.ksnd_global_lock);
 			goto again;
 		}
 
-		conn = ksocknal_find_timed_out_conn (peer);
+		conn = ksocknal_find_timed_out_conn(peer);
 
-		if (conn != NULL) {
+		if (conn) {
 			read_unlock(&ksocknal_data.ksnd_global_lock);
 
-			ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+			ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 
-			/* NB we won't find this one again, but we can't
+			/*
+			 * NB we won't find this one again, but we can't
 			 * just proceed with the next peer, since we dropped
-			 * ksnd_global_lock and it might be dead already! */
+			 * ksnd_global_lock and it might be dead already!
+			 */
 			ksocknal_conn_decref(conn);
 			goto again;
 		}
 
-		/* we can't process stale txs right here because we're
-		 * holding only shared lock */
-		if (!list_empty (&peer->ksnp_tx_queue)) {
-			ksock_tx_t *tx =
-				list_entry (peer->ksnp_tx_queue.next,
-						ksock_tx_t, tx_list);
+		/*
+		 * we can't process stale txs right here because we're
+		 * holding only shared lock
+		 */
+		if (!list_empty(&peer->ksnp_tx_queue)) {
+			ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
+						    ksock_tx_t, tx_list);
 
 			if (cfs_time_aftereq(cfs_time_current(),
 					     tx->tx_deadline)) {
-
 				ksocknal_peer_addref(peer);
 				read_unlock(&ksocknal_data.ksnd_global_lock);
 
@@ -2466,13 +2513,13 @@
 			n++;
 		}
 
-		if (n == 0) {
+		if (!n) {
 			spin_unlock(&peer->ksnp_lock);
 			continue;
 		}
 
 		tx = list_entry(peer->ksnp_zc_req_list.next,
-				    ksock_tx_t, tx_zc_list);
+				ksock_tx_t, tx_zc_list);
 		deadline = tx->tx_deadline;
 		resid = tx->tx_resid;
 		conn = tx->tx_conn;
@@ -2486,7 +2533,7 @@
 		       cfs_duration_sec(cfs_time_current() - deadline),
 		       resid, conn->ksnc_sock->sk->sk_wmem_queued);
 
-		ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+		ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 		ksocknal_conn_decref(conn);
 		goto again;
 	}
@@ -2495,7 +2542,7 @@
 }
 
 int
-ksocknal_reaper (void *arg)
+ksocknal_reaper(void *arg)
 {
 	wait_queue_t wait;
 	ksock_conn_t *conn;
@@ -2515,12 +2562,10 @@
 	spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 	while (!ksocknal_data.ksnd_shuttingdown) {
-
-		if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
-			conn = list_entry (ksocknal_data. \
-					       ksnd_deathrow_conns.next,
-					       ksock_conn_t, ksnc_list);
-			list_del (&conn->ksnc_list);
+		if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+			conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
+					  ksock_conn_t, ksnc_list);
+			list_del(&conn->ksnc_list);
 
 			spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2531,10 +2576,10 @@
 			continue;
 		}
 
-		if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
-			conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
-					       next, ksock_conn_t, ksnc_list);
-			list_del (&conn->ksnc_list);
+		if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+			conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+					  ksock_conn_t, ksnc_list);
+			list_del(&conn->ksnc_list);
 
 			spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2544,9 +2589,9 @@
 			continue;
 		}
 
-		if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+		if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
 			list_add(&enomem_conns,
-				     &ksocknal_data.ksnd_enomem_conns);
+				 &ksocknal_data.ksnd_enomem_conns);
 			list_del_init(&ksocknal_data.ksnd_enomem_conns);
 		}
 
@@ -2554,10 +2599,10 @@
 
 		/* reschedule all the connections that stalled with ENOMEM... */
 		nenomem_conns = 0;
-		while (!list_empty (&enomem_conns)) {
-			conn = list_entry (enomem_conns.next,
-					       ksock_conn_t, ksnc_tx_list);
-			list_del (&conn->ksnc_tx_list);
+		while (!list_empty(&enomem_conns)) {
+			conn = list_entry(enomem_conns.next, ksock_conn_t,
+					  ksnc_tx_list);
+			list_del(&conn->ksnc_tx_list);
 
 			sched = conn->ksnc_scheduler;
 
@@ -2566,7 +2611,7 @@
 			LASSERT(conn->ksnc_tx_scheduled);
 			conn->ksnc_tx_ready = 1;
 			list_add_tail(&conn->ksnc_tx_list,
-					  &sched->kss_tx_conns);
+				      &sched->kss_tx_conns);
 			wake_up(&sched->kss_waitq);
 
 			spin_unlock_bh(&sched->kss_lock);
@@ -2580,21 +2625,22 @@
 			const int p = 1;
 			int chunk = ksocknal_data.ksnd_peer_hash_size;
 
-			/* Time to check for timeouts on a few more peers: I do
+			/*
+			 * Time to check for timeouts on a few more peers: I do
 			 * checks every 'p' seconds on a proportion of the peer
 			 * table and I need to check every connection 'n' times
 			 * within a timeout interval, to ensure I detect a
 			 * timeout on any connection within (n+1)/n times the
-			 * timeout interval. */
-
+			 * timeout interval.
+			 */
 			if (*ksocknal_tunables.ksnd_timeout > n * p)
 				chunk = (chunk * n * p) /
 					*ksocknal_tunables.ksnd_timeout;
-			if (chunk == 0)
+			if (!chunk)
 				chunk = 1;
 
 			for (i = 0; i < chunk; i++) {
-				ksocknal_check_peer_timeouts (peer_index);
+				ksocknal_check_peer_timeouts(peer_index);
 				peer_index = (peer_index + 1) %
 					     ksocknal_data.ksnd_peer_hash_size;
 			}
@@ -2602,25 +2648,27 @@
 			deadline = cfs_time_add(deadline, cfs_time_seconds(p));
 		}
 
-		if (nenomem_conns != 0) {
-			/* Reduce my timeout if I rescheduled ENOMEM conns.
+		if (nenomem_conns) {
+			/*
+			 * Reduce my timeout if I rescheduled ENOMEM conns.
 			 * This also prevents me getting woken immediately
-			 * if any go back on my enomem list. */
+			 * if any go back on my enomem list.
+			 */
 			timeout = SOCKNAL_ENOMEM_RETRY;
 		}
 		ksocknal_data.ksnd_reaper_waketime =
 			cfs_time_add(cfs_time_current(), timeout);
 
-		set_current_state (TASK_INTERRUPTIBLE);
-		add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 		if (!ksocknal_data.ksnd_shuttingdown &&
-		    list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-		    list_empty (&ksocknal_data.ksnd_zombie_conns))
+		    list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+		    list_empty(&ksocknal_data.ksnd_zombie_conns))
 			schedule_timeout(timeout);
 
-		set_current_state (TASK_RUNNING);
-		remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 	}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index cf8e43b..3e1f24e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -45,13 +45,13 @@
 	/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
 	LASSERT(!conn->ksnc_closing);
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d getting sock peer IP\n", rc);
 		return rc;
 	}
 
 	rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d getting sock local IP\n", rc);
 		return rc;
 	}
@@ -67,9 +67,11 @@
 	if (conn->ksnc_proto == &ksocknal_protocol_v1x)
 		return 0;
 
-	/* ZC if the socket supports scatter/gather and doesn't need software
-	 * checksums */
-	return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
+	/*
+	 * ZC if the socket supports scatter/gather and doesn't need software
+	 * checksums
+	 */
+	return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
 }
 
 int
@@ -82,12 +84,13 @@
 	if (*ksocknal_tunables.ksnd_enable_csum	&& /* checksum enabled */
 	    conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection  */
 	    tx->tx_nob == tx->tx_resid		 && /* frist sending    */
-	    tx->tx_msg.ksm_csum == 0)		     /* not checksummed  */
+	    !tx->tx_msg.ksm_csum)		     /* not checksummed  */
 		ksocknal_lib_csum_tx(tx);
 
-	/* NB we can't trust socket ops to either consume our iovs
-	 * or leave them alone. */
-
+	/*
+	 * NB we can't trust socket ops to either consume our iovs
+	 * or leave them alone.
+	 */
 	{
 #if SOCKNAL_SINGLE_FRAG_TX
 		struct kvec scratch;
@@ -123,11 +126,13 @@
 	int nob;
 
 	/* Not NOOP message */
-	LASSERT(tx->tx_lnetmsg != NULL);
+	LASSERT(tx->tx_lnetmsg);
 
-	/* NB we can't trust socket ops to either consume our iovs
-	 * or leave them alone. */
-	if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
+	/*
+	 * NB we can't trust socket ops to either consume our iovs
+	 * or leave them alone.
+	 */
+	if (tx->tx_msg.ksm_zc_cookies[0]) {
 		/* Zero copy is enabled */
 		struct sock *sk = sock->sk;
 		struct page *page = kiov->kiov_page;
@@ -136,13 +141,13 @@
 		int msgflg = MSG_DONTWAIT;
 
 		CDEBUG(D_NET, "page %p + offset %x for %d\n",
-			       page, offset, kiov->kiov_len);
+		       page, offset, kiov->kiov_len);
 
 		if (!list_empty(&conn->ksnc_tx_queue) ||
 		    fragsize < tx->tx_resid)
 			msgflg |= MSG_MORE;
 
-		if (sk->sk_prot->sendpage != NULL) {
+		if (sk->sk_prot->sendpage) {
 			rc = sk->sk_prot->sendpage(sk, page,
 						   offset, fragsize, msgflg);
 		} else {
@@ -187,13 +192,14 @@
 	int opt = 1;
 	struct socket *sock = conn->ksnc_sock;
 
-	/* Remind the socket to ACK eagerly.  If I don't, the socket might
+	/*
+	 * Remind the socket to ACK eagerly.  If I don't, the socket might
 	 * think I'm about to send something it could piggy-back the ACK
 	 * on, introducing delay in completing zero-copy sends in my
-	 * peer. */
-
-	kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
-			       (char *)&opt, sizeof(opt));
+	 * peer.
+	 */
+	kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt,
+			  sizeof(opt));
 }
 
 int
@@ -218,8 +224,10 @@
 	int sum;
 	__u32 saved_csum;
 
-	/* NB we can't trust socket ops to either consume our iovs
-	 * or leave them alone. */
+	/*
+	 * NB we can't trust socket ops to either consume our iovs
+	 * or leave them alone.
+	 */
 	LASSERT(niov > 0);
 
 	for (nob = i = 0; i < niov; i++) {
@@ -228,8 +236,8 @@
 	}
 	LASSERT(nob <= conn->ksnc_rx_nob_wanted);
 
-	rc = kernel_recvmsg(conn->ksnc_sock, &msg,
-		scratchiov, niov, nob, MSG_DONTWAIT);
+	rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
+			    MSG_DONTWAIT);
 
 	saved_csum = 0;
 	if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -237,7 +245,7 @@
 		conn->ksnc_msg.ksm_csum = 0;
 	}
 
-	if (saved_csum != 0) {
+	if (saved_csum) {
 		/* accumulate checksum */
 		for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
 			LASSERT(i < niov);
@@ -258,7 +266,7 @@
 static void
 ksocknal_lib_kiov_vunmap(void *addr)
 {
-	if (addr == NULL)
+	if (!addr)
 		return;
 
 	vunmap(addr);
@@ -272,7 +280,7 @@
 	int nob;
 	int i;
 
-	if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
+	if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
 		return NULL;
 
 	LASSERT(niov <= LNET_MAX_IOV);
@@ -282,7 +290,7 @@
 		return NULL;
 
 	for (nob = i = 0; i < niov; i++) {
-		if ((kiov[i].kiov_offset != 0 && i > 0) ||
+		if ((kiov[i].kiov_offset && i > 0) ||
 		    (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
 			return NULL;
 
@@ -291,7 +299,7 @@
 	}
 
 	addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
-	if (addr == NULL)
+	if (!addr)
 		return NULL;
 
 	iov->iov_base = addr + kiov[0].kiov_offset;
@@ -329,10 +337,12 @@
 	int fragnob;
 	int n;
 
-	/* NB we can't trust socket ops to either consume our iovs
-	 * or leave them alone. */
+	/*
+	 * NB we can't trust socket ops to either consume our iovs
+	 * or leave them alone.
+	 */
 	addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
-	if (addr != NULL) {
+	if (addr) {
 		nob = scratchiov[0].iov_len;
 		n = 1;
 
@@ -347,17 +357,19 @@
 
 	LASSERT(nob <= conn->ksnc_rx_nob_wanted);
 
-	rc = kernel_recvmsg(conn->ksnc_sock, &msg,
-			(struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
+	rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
+			    n, nob, MSG_DONTWAIT);
 
-	if (conn->ksnc_msg.ksm_csum != 0) {
+	if (conn->ksnc_msg.ksm_csum) {
 		for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
 			LASSERT(i < niov);
 
-			/* Dang! have to kmap again because I have nowhere to
+			/*
+			 * Dang! have to kmap again because I have nowhere to
 			 * stash the mapped address.  But by doing it while the
 			 * page is still mapped, the kernel just bumps the map
-			 * count and returns me the address it stashed. */
+			 * count and returns me the address it stashed.
+			 */
 			base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
 			fragnob = kiov[i].kiov_len;
 			if (fragnob > sum)
@@ -370,7 +382,7 @@
 		}
 	}
 
-	if (addr != NULL) {
+	if (addr) {
 		ksocknal_lib_kiov_vunmap(addr);
 	} else {
 		for (i = 0; i < niov; i++)
@@ -388,7 +400,7 @@
 	void *base;
 
 	LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
-	LASSERT(tx->tx_conn != NULL);
+	LASSERT(tx->tx_conn);
 	LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
 
 	tx->tx_msg.ksm_csum = 0;
@@ -396,7 +408,7 @@
 	csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
 			     tx->tx_iov[0].iov_len);
 
-	if (tx->tx_kiov != NULL) {
+	if (tx->tx_kiov) {
 		for (i = 0; i < tx->tx_nkiov; i++) {
 			base = kmap(tx->tx_kiov[i].kiov_page) +
 			       tx->tx_kiov[i].kiov_offset;
@@ -427,22 +439,22 @@
 	int rc;
 
 	rc = ksocknal_connsock_addref(conn);
-	if (rc != 0) {
+	if (rc) {
 		LASSERT(conn->ksnc_closing);
 		*txmem = *rxmem = *nagle = 0;
 		return -ESHUTDOWN;
 	}
 
 	rc = lnet_sock_getbuf(sock, txmem, rxmem);
-	if (rc == 0) {
+	if (!rc) {
 		len = sizeof(*nagle);
 		rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
-					   (char *)nagle, &len);
+				       (char *)nagle, &len);
 	}
 
 	ksocknal_connsock_decref(conn);
 
-	if (rc == 0)
+	if (!rc)
 		*nagle = !*nagle;
 	else
 		*txmem = *rxmem = *nagle = 0;
@@ -463,23 +475,24 @@
 
 	sock->sk->sk_allocation = GFP_NOFS;
 
-	/* Ensure this socket aborts active sends immediately when we close
-	 * it. */
-
+	/*
+	 * Ensure this socket aborts active sends immediately when we close
+	 * it.
+	 */
 	linger.l_onoff = 0;
 	linger.l_linger = 0;
 
-	rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
-			      (char *)&linger, sizeof(linger));
-	if (rc != 0) {
+	rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
+			       sizeof(linger));
+	if (rc) {
 		CERROR("Can't set SO_LINGER: %d\n", rc);
 		return rc;
 	}
 
 	option = -1;
-	rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2,
-				    (char *)&option, sizeof(option));
-	if (rc != 0) {
+	rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
+			       sizeof(option));
+	if (rc) {
 		CERROR("Can't set SO_LINGER2: %d\n", rc);
 		return rc;
 	}
@@ -488,8 +501,8 @@
 		option = 1;
 
 		rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
-					    (char *)&option, sizeof(option));
-		if (rc != 0) {
+				       (char *)&option, sizeof(option));
+		if (rc) {
 			CERROR("Can't disable nagle: %d\n", rc);
 			return rc;
 		}
@@ -497,10 +510,10 @@
 
 	rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
 			      *ksocknal_tunables.ksnd_rx_buffer_size);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
-			*ksocknal_tunables.ksnd_tx_buffer_size,
-			*ksocknal_tunables.ksnd_rx_buffer_size, rc);
+		       *ksocknal_tunables.ksnd_tx_buffer_size,
+		       *ksocknal_tunables.ksnd_rx_buffer_size, rc);
 		return rc;
 	}
 
@@ -514,9 +527,9 @@
 	do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
 
 	option = (do_keepalive ? 1 : 0);
-	rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
-			      (char *)&option, sizeof(option));
-	if (rc != 0) {
+	rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
+			       sizeof(option));
+	if (rc) {
 		CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
 		return rc;
 	}
@@ -524,23 +537,23 @@
 	if (!do_keepalive)
 		return 0;
 
-	rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
-				    (char *)&keep_idle, sizeof(keep_idle));
-	if (rc != 0) {
+	rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
+			       sizeof(keep_idle));
+	if (rc) {
 		CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
 		return rc;
 	}
 
 	rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
-				    (char *)&keep_intvl, sizeof(keep_intvl));
-	if (rc != 0) {
+			       (char *)&keep_intvl, sizeof(keep_intvl));
+	if (rc) {
 		CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
 		return rc;
 	}
 
-	rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
-				    (char *)&keep_count, sizeof(keep_count));
-	if (rc != 0) {
+	rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
+			       sizeof(keep_count));
+	if (rc) {
 		CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
 		return rc;
 	}
@@ -558,7 +571,7 @@
 	int rc;
 
 	rc = ksocknal_connsock_addref(conn);
-	if (rc != 0)			    /* being shut down */
+	if (rc)			    /* being shut down */
 		return;
 
 	sk = conn->ksnc_sock->sk;
@@ -570,8 +583,8 @@
 	release_sock(sk);
 
 	rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
-				      (char *)&val, sizeof(val));
-	LASSERT(rc == 0);
+			       (char *)&val, sizeof(val));
+	LASSERT(!rc);
 
 	lock_sock(sk);
 	tp->nonagle = nonagle;
@@ -593,11 +606,12 @@
 	read_lock(&ksocknal_data.ksnd_global_lock);
 
 	conn = sk->sk_user_data;
-	if (conn == NULL) {	     /* raced with ksocknal_terminate_conn */
+	if (!conn) {	     /* raced with ksocknal_terminate_conn */
 		LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
 		sk->sk_data_ready(sk);
-	} else
+	} else {
 		ksocknal_read_callback(conn);
+	}
 
 	read_unlock(&ksocknal_data.ksnd_global_lock);
 }
@@ -619,14 +633,14 @@
 
 	CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
 	       sk, wspace, min_wpace, conn,
-	       (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
+	       !conn ? "" : (conn->ksnc_tx_ready ?
 				      " ready" : " blocked"),
-	       (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
+	       !conn ? "" : (conn->ksnc_tx_scheduled ?
 				      " scheduled" : " idle"),
-	       (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
+	       !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ?
 				      " empty" : " queued"));
 
-	if (conn == NULL) {	     /* raced with ksocknal_terminate_conn */
+	if (!conn) {	     /* raced with ksocknal_terminate_conn */
 		LASSERT(sk->sk_write_space != &ksocknal_write_space);
 		sk->sk_write_space(sk);
 
@@ -637,10 +651,11 @@
 	if (wspace >= min_wpace) {	      /* got enough space */
 		ksocknal_write_callback(conn);
 
-		/* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
+		/*
+		 * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
 		 * ENOMEM check in ksocknal_transmit is race-free (think about
-		 * it). */
-
+		 * it).
+		 */
 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 	}
 
@@ -666,15 +681,19 @@
 void
 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 {
-	/* Remove conn's network callbacks.
+	/*
+	 * Remove conn's network callbacks.
 	 * NB I _have_ to restore the callback, rather than storing a noop,
-	 * since the socket could survive past this module being unloaded!! */
+	 * since the socket could survive past this module being unloaded!!
+	 */
 	sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
 	sock->sk->sk_write_space = conn->ksnc_saved_write_space;
 
-	/* A callback could be in progress already; they hold a read lock
+	/*
+	 * A callback could be in progress already; they hold a read lock
 	 * on ksnd_global_lock (to serialise with me) and NOOP if
-	 * sk_user_data is NULL. */
+	 * sk_user_data is NULL.
+	 */
 	sock->sk->sk_user_data = NULL;
 
 	return ;
@@ -691,14 +710,16 @@
 
 	if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
 	    !conn->ksnc_tx_ready) {
-		/* SOCK_NOSPACE is set when the socket fills
+		/*
+		 * SOCK_NOSPACE is set when the socket fills
 		 * and cleared in the write_space callback
 		 * (which also sets ksnc_tx_ready).  If
 		 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
 		 * zero, I didn't fill the socket and
 		 * write_space won't reschedule me, so I
 		 * return -ENOMEM to get my caller to retry
-		 * after a timeout */
+		 * after a timeout
+		 */
 		rc = -ENOMEM;
 	}
 
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index fdb2b23..6329cbe 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -14,9 +14,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include "socklnd.h"
@@ -41,8 +38,10 @@
 module_param(peer_timeout, int, 0444);
 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
 
-/* Number of daemons in each thread pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's not set. */
+/*
+ * Number of daemons in each thread pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's not set.
+ */
 static unsigned int nscheds;
 module_param(nscheds, int, 0444);
 MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
@@ -72,7 +71,7 @@
 module_param(typed_conns, int, 0444);
 MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
 
-static int min_bulk = 1<<10;
+static int min_bulk = 1 << 10;
 module_param(min_bulk, int, 0644);
 MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
 
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 986bce4..976a072 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -19,9 +19,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include "socklnd.h"
@@ -56,7 +53,7 @@
 
 	/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
 	LASSERT(!list_empty(&conn->ksnc_tx_queue));
-	LASSERT(tx != NULL);
+	LASSERT(tx);
 
 	/* Next TX that can carry ZC-ACK or LNet message */
 	if (tx->tx_list.next == &conn->ksnc_tx_queue) {
@@ -75,8 +72,8 @@
 {
 	ksock_tx_t *tx = conn->ksnc_tx_carrier;
 
-	LASSERT(tx_ack == NULL ||
-		 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+	LASSERT(!tx_ack ||
+		tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
 
 	/*
 	 * Enqueue or piggyback tx_ack / cookie
@@ -85,10 +82,10 @@
 	 * . There is tx can piggyback cookie of tx_ack (or cookie),
 	 *   piggyback the cookie and return the tx.
 	 */
-	if (tx == NULL) {
-		if (tx_ack != NULL) {
+	if (!tx) {
+		if (tx_ack) {
 			list_add_tail(&tx_ack->tx_list,
-					  &conn->ksnc_tx_queue);
+				      &conn->ksnc_tx_queue);
 			conn->ksnc_tx_carrier = tx_ack;
 		}
 		return 0;
@@ -96,16 +93,16 @@
 
 	if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
 		/* tx is noop zc-ack, can't piggyback zc-ack cookie */
-		if (tx_ack != NULL)
+		if (tx_ack)
 			list_add_tail(&tx_ack->tx_list,
-					  &conn->ksnc_tx_queue);
+				      &conn->ksnc_tx_queue);
 		return 0;
 	}
 
 	LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
-	LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
+	LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
 
-	if (tx_ack != NULL)
+	if (tx_ack)
 		cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
 
 	/* piggyback the zc-ack cookie */
@@ -128,7 +125,7 @@
 	 * . If there is NOOP on the connection, piggyback the cookie
 	 *   and replace the NOOP tx, and return the NOOP tx.
 	 */
-	if (tx == NULL) { /* nothing on queue */
+	if (!tx) { /* nothing on queue */
 		list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
 		conn->ksnc_tx_carrier = tx_msg;
 		return NULL;
@@ -162,22 +159,22 @@
 		return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
 
 	/* non-blocking ZC-ACK (to router) */
-	LASSERT(tx_ack == NULL ||
-		 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+	LASSERT(!tx_ack ||
+		tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
 
 	tx = conn->ksnc_tx_carrier;
-	if (tx == NULL) {
-		if (tx_ack != NULL) {
+	if (!tx) {
+		if (tx_ack) {
 			list_add_tail(&tx_ack->tx_list,
-					  &conn->ksnc_tx_queue);
+				      &conn->ksnc_tx_queue);
 			conn->ksnc_tx_carrier = tx_ack;
 		}
 		return 0;
 	}
 
-	/* conn->ksnc_tx_carrier != NULL */
+	/* conn->ksnc_tx_carrier */
 
-	if (tx_ack != NULL)
+	if (tx_ack)
 		cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
 
 	if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
@@ -185,7 +182,7 @@
 
 	if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
 		/* replace the keepalive PING with a real ACK */
-		LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+		LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
 		tx->tx_msg.ksm_zc_cookies[1] = cookie;
 		return 1;
 	}
@@ -197,7 +194,7 @@
 		return 1; /* XXX return error in the future */
 	}
 
-	if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+	if (!tx->tx_msg.ksm_zc_cookies[0]) {
 		/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
 		if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
 			tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
@@ -233,7 +230,7 @@
 			tmp = tx->tx_msg.ksm_zc_cookies[0];
 		}
 
-		if (tmp != 0) {
+		if (tmp) {
 			/* range of cookies */
 			tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
 			tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
@@ -261,7 +258,7 @@
 	}
 
 	/* failed to piggyback ZC-ACK */
-	if (tx_ack != NULL) {
+	if (tx_ack) {
 		list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
 		/* the next tx can piggyback at least 1 ACK */
 		ksocknal_next_tx_carrier(conn);
@@ -280,7 +277,7 @@
 		return SOCKNAL_MATCH_YES;
 #endif
 
-	if (tx == NULL || tx->tx_lnetmsg == NULL) {
+	if (!tx || !tx->tx_lnetmsg) {
 		/* noop packet */
 		nob = offsetof(ksock_msg_t, ksm_u);
 	} else {
@@ -319,7 +316,7 @@
 {
 	int nob;
 
-	if (tx == NULL || tx->tx_lnetmsg == NULL)
+	if (!tx || !tx->tx_lnetmsg)
 		nob = offsetof(ksock_msg_t, ksm_u);
 	else
 		nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
@@ -334,7 +331,7 @@
 	case SOCKLND_CONN_ACK:
 		if (nonblk)
 			return SOCKNAL_MATCH_YES;
-		else if (tx == NULL || tx->tx_lnetmsg == NULL)
+		else if (!tx || !tx->tx_lnetmsg)
 			return SOCKNAL_MATCH_MAY;
 		else
 			return SOCKNAL_MATCH_NO;
@@ -369,10 +366,10 @@
 	read_lock(&ksocknal_data.ksnd_global_lock);
 
 	conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
-	if (conn != NULL) {
+	if (conn) {
 		ksock_sched_t *sched = conn->ksnc_scheduler;
 
-		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
 
 		spin_lock_bh(&sched->kss_lock);
 
@@ -390,11 +387,11 @@
 
 	/* ACK connection is not ready, or can't piggyback the ACK */
 	tx = ksocknal_alloc_tx_noop(cookie, !!remote);
-	if (tx == NULL)
+	if (!tx)
 		return -ENOMEM;
 
 	rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
-	if (rc == 0)
+	if (!rc)
 		return 0;
 
 	ksocknal_free_tx(tx);
@@ -411,7 +408,7 @@
 	LIST_HEAD(zlist);
 	int count;
 
-	if (cookie1 == 0)
+	if (!cookie1)
 		cookie1 = cookie2;
 
 	count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
@@ -424,8 +421,8 @@
 
 	spin_lock(&peer->ksnp_lock);
 
-	list_for_each_entry_safe(tx, tmp,
-				     &peer->ksnp_zc_req_list, tx_zc_list) {
+	list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list,
+				 tx_zc_list) {
 		__u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
 		if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
@@ -433,7 +430,7 @@
 			list_del(&tx->tx_zc_list);
 			list_add(&tx->tx_zc_list, &zlist);
 
-			if (--count == 0)
+			if (!--count)
 				break;
 		}
 	}
@@ -446,7 +443,7 @@
 		ksocknal_tx_decref(tx);
 	}
 
-	return count == 0 ? 0 : -EPROTO;
+	return !count ? 0 : -EPROTO;
 }
 
 static int
@@ -461,58 +458,59 @@
 	CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
 
 	LIBCFS_ALLOC(hdr, sizeof(*hdr));
-	if (hdr == NULL) {
+	if (!hdr) {
 		CERROR("Can't allocate lnet_hdr_t\n");
 		return -ENOMEM;
 	}
 
 	hmv = (lnet_magicversion_t *)&hdr->dest_nid;
 
-	/* Re-organize V2.x message header to V1.x (lnet_hdr_t)
-	 * header and send out */
-	hmv->magic         = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
-	hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
-	hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
+	/*
+	 * Re-organize V2.x message header to V1.x (lnet_hdr_t)
+	 * header and send out
+	 */
+	hmv->magic         = cpu_to_le32(LNET_PROTO_TCP_MAGIC);
+	hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
+	hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
 
-	if (the_lnet.ln_testprotocompat != 0) {
+	if (the_lnet.ln_testprotocompat) {
 		/* single-shot proto check */
 		LNET_LOCK();
-		if ((the_lnet.ln_testprotocompat & 1) != 0) {
+		if (the_lnet.ln_testprotocompat & 1) {
 			hmv->version_major++;   /* just different! */
 			the_lnet.ln_testprotocompat &= ~1;
 		}
-		if ((the_lnet.ln_testprotocompat & 2) != 0) {
+		if (the_lnet.ln_testprotocompat & 2) {
 			hmv->magic = LNET_PROTO_MAGIC;
 			the_lnet.ln_testprotocompat &= ~2;
 		}
 		LNET_UNLOCK();
 	}
 
-	hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
-	hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
-	hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
-	hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
-	hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
-	hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
+	hdr->src_nid = cpu_to_le64(hello->kshm_src_nid);
+	hdr->src_pid = cpu_to_le32(hello->kshm_src_pid);
+	hdr->type = cpu_to_le32(LNET_MSG_HELLO);
+	hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32));
+	hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype);
+	hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
 
 	rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
-	if (rc != 0) {
+	if (rc) {
 		CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
 			rc, &conn->ksnc_ipaddr, conn->ksnc_port);
 		goto out;
 	}
 
-	if (hello->kshm_nips == 0)
+	if (!hello->kshm_nips)
 		goto out;
 
-	for (i = 0; i < (int) hello->kshm_nips; i++) {
-		hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
-	}
+	for (i = 0; i < (int) hello->kshm_nips; i++)
+		hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
 
 	rc = lnet_sock_write(sock, hello->kshm_ips,
 			     hello->kshm_nips * sizeof(__u32),
 			     lnet_acceptor_timeout());
-	if (rc != 0) {
+	if (rc) {
 		CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
 			rc, hello->kshm_nips,
 			&conn->ksnc_ipaddr, conn->ksnc_port);
@@ -532,10 +530,10 @@
 	hello->kshm_magic   = LNET_PROTO_MAGIC;
 	hello->kshm_version = conn->ksnc_proto->pro_version;
 
-	if (the_lnet.ln_testprotocompat != 0) {
+	if (the_lnet.ln_testprotocompat) {
 		/* single-shot proto check */
 		LNET_LOCK();
-		if ((the_lnet.ln_testprotocompat & 1) != 0) {
+		if (the_lnet.ln_testprotocompat & 1) {
 			hello->kshm_version++;   /* just different! */
 			the_lnet.ln_testprotocompat &= ~1;
 		}
@@ -544,19 +542,19 @@
 
 	rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
 			     lnet_acceptor_timeout());
-	if (rc != 0) {
+	if (rc) {
 		CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
 			rc, &conn->ksnc_ipaddr, conn->ksnc_port);
 		return rc;
 	}
 
-	if (hello->kshm_nips == 0)
+	if (!hello->kshm_nips)
 		return 0;
 
 	rc = lnet_sock_write(sock, hello->kshm_ips,
 			     hello->kshm_nips * sizeof(__u32),
 			     lnet_acceptor_timeout());
-	if (rc != 0) {
+	if (rc) {
 		CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
 			rc, hello->kshm_nips,
 			&conn->ksnc_ipaddr, conn->ksnc_port);
@@ -575,7 +573,7 @@
 	int i;
 
 	LIBCFS_ALLOC(hdr, sizeof(*hdr));
-	if (hdr == NULL) {
+	if (!hdr) {
 		CERROR("Can't allocate lnet_hdr_t\n");
 		return -ENOMEM;
 	}
@@ -583,15 +581,15 @@
 	rc = lnet_sock_read(sock, &hdr->src_nid,
 			    sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid),
 			    timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
-			rc, &conn->ksnc_ipaddr);
+		       rc, &conn->ksnc_ipaddr);
 		LASSERT(rc < 0 && rc != -EALREADY);
 		goto out;
 	}
 
 	/* ...and check we got what we expected */
-	if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
+	if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) {
 		CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
 		       le32_to_cpu(hdr->type),
 		       &conn->ksnc_ipaddr);
@@ -613,14 +611,14 @@
 		goto out;
 	}
 
-	if (hello->kshm_nips == 0)
+	if (!hello->kshm_nips)
 		goto out;
 
 	rc = lnet_sock_read(sock, hello->kshm_ips,
 			    hello->kshm_nips * sizeof(__u32), timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading IPs from ip %pI4h\n",
-			rc, &conn->ksnc_ipaddr);
+		       rc, &conn->ksnc_ipaddr);
 		LASSERT(rc < 0 && rc != -EALREADY);
 		goto out;
 	}
@@ -628,7 +626,7 @@
 	for (i = 0; i < (int) hello->kshm_nips; i++) {
 		hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
 
-		if (hello->kshm_ips[i] == 0) {
+		if (!hello->kshm_ips[i]) {
 			CERROR("Zero IP[%d] from ip %pI4h\n",
 			       i, &conn->ksnc_ipaddr);
 			rc = -EPROTO;
@@ -657,9 +655,9 @@
 			    offsetof(ksock_hello_msg_t, kshm_ips) -
 				     offsetof(ksock_hello_msg_t, kshm_src_nid),
 			    timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
-			rc, &conn->ksnc_ipaddr);
+		       rc, &conn->ksnc_ipaddr);
 		LASSERT(rc < 0 && rc != -EALREADY);
 		return rc;
 	}
@@ -681,14 +679,14 @@
 		return -EPROTO;
 	}
 
-	if (hello->kshm_nips == 0)
+	if (!hello->kshm_nips)
 		return 0;
 
 	rc = lnet_sock_read(sock, hello->kshm_ips,
 			    hello->kshm_nips * sizeof(__u32), timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading IPs from ip %pI4h\n",
-			rc, &conn->ksnc_ipaddr);
+		       rc, &conn->ksnc_ipaddr);
 		LASSERT(rc < 0 && rc != -EALREADY);
 		return rc;
 	}
@@ -697,7 +695,7 @@
 		if (conn->ksnc_flip)
 			__swab32s(&hello->kshm_ips[i]);
 
-		if (hello->kshm_ips[i] == 0) {
+		if (!hello->kshm_ips[i]) {
 			CERROR("Zero IP[%d] from ip %pI4h\n",
 			       i, &conn->ksnc_ipaddr);
 			return -EPROTO;
@@ -712,12 +710,13 @@
 {
 	/* V1.x has no KSOCK_MSG_NOOP */
 	LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
-	LASSERT(tx->tx_lnetmsg != NULL);
+	LASSERT(tx->tx_lnetmsg);
 
 	tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
 	tx->tx_iov[0].iov_len  = sizeof(lnet_hdr_t);
 
-	tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+	tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+	tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
 }
 
 static void
@@ -725,17 +724,19 @@
 {
 	tx->tx_iov[0].iov_base = &tx->tx_msg;
 
-	if (tx->tx_lnetmsg != NULL) {
+	if (tx->tx_lnetmsg) {
 		LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
 
 		tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
 		tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
-		tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+		tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+		tx->tx_resid = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
 	} else {
 		LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
 
 		tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
-		tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
+		tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
+		tx->tx_resid = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
 	}
 	/* Don't checksum before start sending, because packet can be piggybacked with ACK */
 }
@@ -745,7 +746,8 @@
 {
 	msg->ksm_csum = 0;
 	msg->ksm_type = KSOCK_MSG_LNET;
-	msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1]  = 0;
+	msg->ksm_zc_cookies[0] = 0;
+	msg->ksm_zc_cookies[1] = 0;
 }
 
 static void
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index fed57d9..1452bb3 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -36,6 +36,7 @@
 
 #define DEBUG_SUBSYSTEM S_LNET
 #include <linux/completion.h>
+#include <net/sock.h>
 #include "../../include/linux/lnet/lib-lnet.h"
 
 static int   accept_port    = 988;
@@ -46,7 +47,9 @@
 	int			pta_shutdown;
 	struct socket		*pta_sock;
 	struct completion	pta_signal;
-} lnet_acceptor_state;
+} lnet_acceptor_state = {
+	.pta_shutdown = 1
+};
 
 int
 lnet_acceptor_port(void)
@@ -78,9 +81,11 @@
 static int
 lnet_acceptor_get_tunables(void)
 {
-	/* Userland acceptor uses 'accept_type' instead of 'accept', due to
+	/*
+	 * Userland acceptor uses 'accept_type' instead of 'accept', due to
 	 * conflict with 'accept(2)', but kernel acceptor still uses 'accept'
-	 * for compatibility. Hence the trick. */
+	 * for compatibility. Hence the trick.
+	 */
 	accept_type = accept;
 	return 0;
 }
@@ -140,7 +145,7 @@
 
 int
 lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
-	    __u32 local_ip, __u32 peer_ip, int peer_port)
+	     __u32 local_ip, __u32 peer_ip, int peer_port)
 {
 	lnet_acceptor_connreq_t cr;
 	struct socket *sock;
@@ -157,7 +162,7 @@
 
 		rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip,
 				       peer_port);
-		if (rc != 0) {
+		if (rc) {
 			if (fatal)
 				goto failed;
 			continue;
@@ -169,14 +174,14 @@
 		cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
 		cr.acr_nid     = peer_nid;
 
-		if (the_lnet.ln_testprotocompat != 0) {
+		if (the_lnet.ln_testprotocompat) {
 			/* single-shot proto check */
 			lnet_net_lock(LNET_LOCK_EX);
-			if ((the_lnet.ln_testprotocompat & 4) != 0) {
+			if (the_lnet.ln_testprotocompat & 4) {
 				cr.acr_version++;
 				the_lnet.ln_testprotocompat &= ~4;
 			}
-			if ((the_lnet.ln_testprotocompat & 8) != 0) {
+			if (the_lnet.ln_testprotocompat & 8) {
 				cr.acr_magic = LNET_PROTO_MAGIC;
 				the_lnet.ln_testprotocompat &= ~8;
 			}
@@ -184,7 +189,7 @@
 		}
 
 		rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
-		if (rc != 0)
+		if (rc)
 			goto failed_sock;
 
 		*sockp = sock;
@@ -202,8 +207,6 @@
 }
 EXPORT_SYMBOL(lnet_connect);
 
-/* Below is the code common for both kernel and MT user-space */
-
 static int
 lnet_accept(struct socket *sock, __u32 magic)
 {
@@ -218,23 +221,23 @@
 	LASSERT(sizeof(cr) <= 16);	     /* not too big for the stack */
 
 	rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
-	LASSERT(rc == 0);		      /* we succeeded before */
+	LASSERT(!rc);		      /* we succeeded before */
 
 	if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
-
 		if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
-			/* future version compatibility!
+			/*
+			 * future version compatibility!
 			 * When LNET unifies protocols over all LNDs, the first
-			 * thing sent will be a version query.  I send back
-			 * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */
-
+			 * thing sent will be a version query. I send back
+			 * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old"
+			 */
 			memset(&cr, 0, sizeof(cr));
 			cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
 			cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
 			rc = lnet_sock_write(sock, &cr, sizeof(cr),
 					     accept_timeout);
 
-			if (rc != 0)
+			if (rc)
 				CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
 				       &peer_ip, rc);
 			return -EPROTO;
@@ -254,9 +257,9 @@
 
 	rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version),
 			    accept_timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading connection request version from %pI4h\n",
-			rc, &peer_ip);
+		       rc, &peer_ip);
 		return -EIO;
 	}
 
@@ -264,10 +267,12 @@
 		__swab32s(&cr.acr_version);
 
 	if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) {
-		/* future version compatibility!
+		/*
+		 * future version compatibility!
 		 * An acceptor-specific protocol rev will first send a version
 		 * query.  I send back my current version to tell her I'm
-		 * "old". */
+		 * "old".
+		 */
 		int peer_version = cr.acr_version;
 
 		memset(&cr, 0, sizeof(cr));
@@ -275,7 +280,7 @@
 		cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
 
 		rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
-		if (rc != 0)
+		if (rc)
 			CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
 			       peer_version, &peer_ip, rc);
 		return -EPROTO;
@@ -285,9 +290,9 @@
 			    sizeof(cr) -
 			    offsetof(lnet_acceptor_connreq_t, acr_nid),
 			    accept_timeout);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d reading connection request from %pI4h\n",
-			rc, &peer_ip);
+		       rc, &peer_ip);
 		return -EIO;
 	}
 
@@ -295,20 +300,20 @@
 		__swab64s(&cr.acr_nid);
 
 	ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid));
-	if (ni == NULL ||	       /* no matching net */
+	if (!ni ||	       /* no matching net */
 	    ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
-		if (ni != NULL)
+		if (ni)
 			lnet_ni_decref(ni);
 		LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
 				   &peer_ip, libcfs_nid2str(cr.acr_nid));
 		return -EPERM;
 	}
 
-	if (ni->ni_lnd->lnd_accept == NULL) {
+	if (!ni->ni_lnd->lnd_accept) {
 		/* This catches a request for the loopback LND */
 		lnet_ni_decref(ni);
 		LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
-				  &peer_ip, libcfs_nid2str(cr.acr_nid));
+				   &peer_ip, libcfs_nid2str(cr.acr_nid));
 		return -EPERM;
 	}
 
@@ -331,13 +336,13 @@
 	int peer_port;
 	int secure = (int)((long_ptr_t)arg);
 
-	LASSERT(lnet_acceptor_state.pta_sock == NULL);
+	LASSERT(!lnet_acceptor_state.pta_sock);
 
 	cfs_block_allsigs();
 
 	rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
 			      accept_backlog);
-	if (rc != 0) {
+	if (rc) {
 		if (rc == -EADDRINUSE)
 			LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
 					   accept_port);
@@ -354,13 +359,12 @@
 	lnet_acceptor_state.pta_shutdown = rc;
 	complete(&lnet_acceptor_state.pta_signal);
 
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	while (!lnet_acceptor_state.pta_shutdown) {
-
 		rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
-		if (rc != 0) {
+		if (rc) {
 			if (rc != -EAGAIN) {
 				CWARN("Accept error %d: pausing...\n", rc);
 				set_current_state(TASK_UNINTERRUPTIBLE);
@@ -376,7 +380,7 @@
 		}
 
 		rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't determine new connection's address\n");
 			goto failed;
 		}
@@ -389,14 +393,14 @@
 
 		rc = lnet_sock_read(newsock, &magic, sizeof(magic),
 				    accept_timeout);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Error %d reading connection request from %pI4h\n",
-				rc, &peer_ip);
+			       rc, &peer_ip);
 			goto failed;
 		}
 
 		rc = lnet_accept(newsock, magic);
-		if (rc != 0)
+		if (rc)
 			goto failed;
 
 		continue;
@@ -436,14 +440,19 @@
 int
 lnet_acceptor_start(void)
 {
+	struct task_struct *task;
 	int rc;
 	long rc2;
 	long secure;
 
-	LASSERT(lnet_acceptor_state.pta_sock == NULL);
+	/* if acceptor is already running return immediately */
+	if (!lnet_acceptor_state.pta_shutdown)
+		return 0;
+
+	LASSERT(!lnet_acceptor_state.pta_sock);
 
 	rc = lnet_acceptor_get_tunables();
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	init_completion(&lnet_acceptor_state.pta_signal);
@@ -451,13 +460,13 @@
 	if (rc <= 0)
 		return rc;
 
-	if (lnet_count_acceptor_nis() == 0)  /* not required */
+	if (!lnet_count_acceptor_nis())  /* not required */
 		return 0;
 
-	rc2 = PTR_ERR(kthread_run(lnet_acceptor,
-				  (void *)(ulong_ptr_t)secure,
-				  "acceptor_%03ld", secure));
-	if (IS_ERR_VALUE(rc2)) {
+	task = kthread_run(lnet_acceptor, (void *)(ulong_ptr_t)secure,
+			   "acceptor_%03ld", secure);
+	if (IS_ERR(task)) {
+		rc2 = PTR_ERR(task);
 		CERROR("Can't start acceptor thread: %ld\n", rc2);
 
 		return -ESRCH;
@@ -468,11 +477,11 @@
 
 	if (!lnet_acceptor_state.pta_shutdown) {
 		/* started OK */
-		LASSERT(lnet_acceptor_state.pta_sock != NULL);
+		LASSERT(lnet_acceptor_state.pta_sock);
 		return 0;
 	}
 
-	LASSERT(lnet_acceptor_state.pta_sock == NULL);
+	LASSERT(!lnet_acceptor_state.pta_sock);
 
 	return -ENETDOWN;
 }
@@ -480,11 +489,17 @@
 void
 lnet_acceptor_stop(void)
 {
-	if (lnet_acceptor_state.pta_sock == NULL) /* not running */
+	struct sock *sk;
+
+	if (lnet_acceptor_state.pta_shutdown) /* not running */
 		return;
 
 	lnet_acceptor_state.pta_shutdown = 1;
-	wake_up_all(sk_sleep(lnet_acceptor_state.pta_sock->sk));
+
+	sk = lnet_acceptor_state.pta_sock->sk;
+
+	/* awake any sleepers using safe method */
+	sk->sk_state_change(sk);
 
 	/* block until acceptor signals exit */
 	wait_for_completion(&lnet_acceptor_state.pta_signal);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 362282f..3ecc96a 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -39,6 +39,7 @@
 #include <linux/ktime.h>
 
 #include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
 
 #define D_LNI D_CONSOLE
 
@@ -61,6 +62,9 @@
 module_param(rnet_htable_size, int, 0444);
 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
 
+static int lnet_ping(lnet_process_id_t id, int timeout_ms,
+		     lnet_process_id_t __user *ids, int n_ids);
+
 static char *
 lnet_get_routes(void)
 {
@@ -73,17 +77,17 @@
 	char *nets;
 	int rc;
 
-	if (*networks != 0 && *ip2nets != 0) {
+	if (*networks && *ip2nets) {
 		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
 		return NULL;
 	}
 
-	if (*ip2nets != 0) {
+	if (*ip2nets) {
 		rc = lnet_parse_ip2nets(&nets, ip2nets);
-		return (rc == 0) ? nets : NULL;
+		return !rc ? nets : NULL;
 	}
 
-	if (*networks != 0)
+	if (*networks)
 		return networks;
 
 	return "tcp";
@@ -94,6 +98,7 @@
 {
 	spin_lock_init(&the_lnet.ln_eq_wait_lock);
 	init_waitqueue_head(&the_lnet.ln_eq_waitq);
+	init_waitqueue_head(&the_lnet.ln_rc_waitq);
 	mutex_init(&the_lnet.ln_lnd_mutex);
 	mutex_init(&the_lnet.ln_api_mutex);
 }
@@ -104,10 +109,10 @@
 	int i;
 	struct list_head *hash;
 
-	LASSERT(the_lnet.ln_remote_nets_hash == NULL);
+	LASSERT(!the_lnet.ln_remote_nets_hash);
 	LASSERT(the_lnet.ln_remote_nets_hbits > 0);
 	LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
-	if (hash == NULL) {
+	if (!hash) {
 		CERROR("Failed to create remote nets hash table\n");
 		return -ENOMEM;
 	}
@@ -123,7 +128,7 @@
 {
 	int i;
 
-	if (the_lnet.ln_remote_nets_hash == NULL)
+	if (!the_lnet.ln_remote_nets_hash)
 		return;
 
 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
@@ -138,12 +143,12 @@
 static void
 lnet_destroy_locks(void)
 {
-	if (the_lnet.ln_res_lock != NULL) {
+	if (the_lnet.ln_res_lock) {
 		cfs_percpt_lock_free(the_lnet.ln_res_lock);
 		the_lnet.ln_res_lock = NULL;
 	}
 
-	if (the_lnet.ln_net_lock != NULL) {
+	if (the_lnet.ln_net_lock) {
 		cfs_percpt_lock_free(the_lnet.ln_net_lock);
 		the_lnet.ln_net_lock = NULL;
 	}
@@ -155,11 +160,11 @@
 	lnet_init_locks();
 
 	the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
-	if (the_lnet.ln_res_lock == NULL)
+	if (!the_lnet.ln_res_lock)
 		goto failed;
 
 	the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
-	if (the_lnet.ln_net_lock == NULL)
+	if (!the_lnet.ln_net_lock)
 		goto failed;
 
 	return 0;
@@ -171,10 +176,12 @@
 
 static void lnet_assert_wire_constants(void)
 {
-	/* Wire protocol assertions generated by 'wirecheck'
+	/*
+	 * Wire protocol assertions generated by 'wirecheck'
 	 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
 	 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
-	 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
+	 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
+	 */
 
 	/* Constants... */
 	CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
@@ -284,9 +291,8 @@
 {
 	mutex_lock(&the_lnet.ln_lnd_mutex);
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
-	LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
+	LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type));
 
 	list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
 	lnd->lnd_refcount = 0;
@@ -302,9 +308,8 @@
 {
 	mutex_lock(&the_lnet.ln_lnd_mutex);
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
-	LASSERT(lnd->lnd_refcount == 0);
+	LASSERT(!lnd->lnd_refcount);
 
 	list_del(&lnd->lnd_list);
 	CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
@@ -335,7 +340,6 @@
 		counters->recv_length  += ctr->recv_length;
 		counters->route_length += ctr->route_length;
 		counters->drop_length  += ctr->drop_length;
-
 	}
 	lnet_net_unlock(LNET_LOCK_EX);
 }
@@ -375,7 +379,7 @@
 {
 	int count = 0;
 
-	if (rec->rec_type == 0) /* not set yet, it's uninitialized */
+	if (!rec->rec_type) /* not set yet, it's uninitialized */
 		return;
 
 	while (!list_empty(&rec->rec_active)) {
@@ -395,14 +399,16 @@
 	}
 
 	if (count > 0) {
-		/* Found alive MD/ME/EQ, user really should unlink/free
+		/*
+		 * Found alive MD/ME/EQ, user really should unlink/free
 		 * all of them before finalize LNet, but if someone didn't,
-		 * we have to recycle garbage for him */
+		 * we have to recycle garbage for him
+		 */
 		CERROR("%d active elements on exit of %s container\n",
 		       count, lnet_res_type2str(rec->rec_type));
 	}
 
-	if (rec->rec_lh_hash != NULL) {
+	if (rec->rec_lh_hash) {
 		LIBCFS_FREE(rec->rec_lh_hash,
 			    LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
 		rec->rec_lh_hash = NULL;
@@ -417,7 +423,7 @@
 	int rc = 0;
 	int i;
 
-	LASSERT(rec->rec_type == 0);
+	LASSERT(!rec->rec_type);
 
 	rec->rec_type = type;
 	INIT_LIST_HEAD(&rec->rec_active);
@@ -426,7 +432,7 @@
 	/* Arbitrary choice of hash table size */
 	LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
 			 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
-	if (rec->rec_lh_hash == NULL) {
+	if (!rec->rec_lh_hash) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -464,7 +470,7 @@
 	int i;
 
 	recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
-	if (recs == NULL) {
+	if (!recs) {
 		CERROR("Failed to allocate %s resource containers\n",
 		       lnet_res_type2str(type));
 		return NULL;
@@ -472,7 +478,7 @@
 
 	cfs_percpt_for_each(rec, i, recs) {
 		rc = lnet_res_container_setup(rec, i, type);
-		if (rc != 0) {
+		if (rc) {
 			lnet_res_containers_destroy(recs);
 			return NULL;
 		}
@@ -518,7 +524,7 @@
 	list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
 }
 
-int lnet_unprepare(void);
+static int lnet_unprepare(void);
 
 static int
 lnet_prepare(lnet_pid_t requested_pid)
@@ -527,11 +533,16 @@
 	struct lnet_res_container **recs;
 	int rc = 0;
 
-	LASSERT(the_lnet.ln_refcount == 0);
+	if (requested_pid == LNET_PID_ANY) {
+		/* Don't instantiate LNET just for me */
+		return -ENETDOWN;
+	}
+
+	LASSERT(!the_lnet.ln_refcount);
 
 	the_lnet.ln_routing = 0;
 
-	LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
+	LASSERT(!(requested_pid & LNET_PID_USERFLAG));
 	the_lnet.ln_pid = requested_pid;
 
 	INIT_LIST_HEAD(&the_lnet.ln_test_peers);
@@ -541,7 +552,7 @@
 	INIT_LIST_HEAD(&the_lnet.ln_routers);
 
 	rc = lnet_create_remote_nets_table();
-	if (rc != 0)
+	if (rc)
 		goto failed;
 	/*
 	 * NB the interface cookie in wire handles guards against delayed
@@ -551,27 +562,27 @@
 
 	the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
 						sizeof(lnet_counters_t));
-	if (the_lnet.ln_counters == NULL) {
+	if (!the_lnet.ln_counters) {
 		CERROR("Failed to allocate counters for LNet\n");
 		rc = -ENOMEM;
 		goto failed;
 	}
 
 	rc = lnet_peer_tables_create();
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	rc = lnet_msg_containers_create();
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
 				      LNET_COOKIE_TYPE_EQ);
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
-	if (recs == NULL) {
+	if (!recs) {
 		rc = -ENOMEM;
 		goto failed;
 	}
@@ -579,7 +590,7 @@
 	the_lnet.ln_me_containers = recs;
 
 	recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
-	if (recs == NULL) {
+	if (!recs) {
 		rc = -ENOMEM;
 		goto failed;
 	}
@@ -587,7 +598,7 @@
 	the_lnet.ln_md_containers = recs;
 
 	rc = lnet_portals_create();
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Failed to create portals for LNet: %d\n", rc);
 		goto failed;
 	}
@@ -599,17 +610,18 @@
 	return rc;
 }
 
-int
+static int
 lnet_unprepare(void)
 {
-	/* NB no LNET_LOCK since this is the last reference.  All LND instances
+	/*
+	 * NB no LNET_LOCK since this is the last reference.  All LND instances
 	 * have shut down already, so it is safe to unlink and free all
 	 * descriptors, even those that appear committed to a network op (eg MD
-	 * with non-zero pending count) */
-
+	 * with non-zero pending count)
+	 */
 	lnet_fail_nid(LNET_NID_ANY, 0);
 
-	LASSERT(the_lnet.ln_refcount == 0);
+	LASSERT(!the_lnet.ln_refcount);
 	LASSERT(list_empty(&the_lnet.ln_test_peers));
 	LASSERT(list_empty(&the_lnet.ln_nis));
 	LASSERT(list_empty(&the_lnet.ln_nis_cpt));
@@ -617,12 +629,12 @@
 
 	lnet_portals_destroy();
 
-	if (the_lnet.ln_md_containers != NULL) {
+	if (the_lnet.ln_md_containers) {
 		lnet_res_containers_destroy(the_lnet.ln_md_containers);
 		the_lnet.ln_md_containers = NULL;
 	}
 
-	if (the_lnet.ln_me_containers != NULL) {
+	if (the_lnet.ln_me_containers) {
 		lnet_res_containers_destroy(the_lnet.ln_me_containers);
 		the_lnet.ln_me_containers = NULL;
 	}
@@ -631,9 +643,9 @@
 
 	lnet_msg_containers_destroy();
 	lnet_peer_tables_destroy();
-	lnet_rtrpools_free();
+	lnet_rtrpools_free(0);
 
-	if (the_lnet.ln_counters != NULL) {
+	if (the_lnet.ln_counters) {
 		cfs_percpt_free(the_lnet.ln_counters);
 		the_lnet.ln_counters = NULL;
 	}
@@ -709,7 +721,7 @@
 			if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
 				continue;
 
-			LASSERT(ni->ni_cpts != NULL);
+			LASSERT(ni->ni_cpts);
 			return ni->ni_cpts[lnet_nid_cpt_hash
 					   (nid, ni->ni_ncpts)];
 		}
@@ -747,12 +759,12 @@
 	cpt = lnet_net_lock_current();
 
 	ni = lnet_net2ni_locked(net, cpt);
-	if (ni != NULL)
+	if (ni)
 		lnet_ni_decref_locked(ni, cpt);
 
 	lnet_net_unlock(cpt);
 
-	return ni != NULL;
+	return !!ni;
 }
 
 lnet_ni_t  *
@@ -783,11 +795,11 @@
 
 	cpt = lnet_net_lock_current();
 	ni = lnet_nid2ni_locked(nid, cpt);
-	if (ni != NULL)
+	if (ni)
 		lnet_ni_decref_locked(ni, cpt);
 	lnet_net_unlock(cpt);
 
-	return ni != NULL;
+	return !!ni;
 }
 
 int
@@ -803,7 +815,7 @@
 	list_for_each(tmp, &the_lnet.ln_nis) {
 		ni = list_entry(tmp, lnet_ni_t, ni_list);
 
-		if (ni->ni_lnd->lnd_accept != NULL)
+		if (ni->ni_lnd->lnd_accept)
 			count++;
 	}
 
@@ -812,6 +824,229 @@
 	return count;
 }
 
+static lnet_ping_info_t *
+lnet_ping_info_create(int num_ni)
+{
+	lnet_ping_info_t *ping_info;
+	unsigned int infosz;
+
+	infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]);
+	LIBCFS_ALLOC(ping_info, infosz);
+	if (!ping_info) {
+		CERROR("Can't allocate ping info[%d]\n", num_ni);
+		return NULL;
+	}
+
+	ping_info->pi_nnis = num_ni;
+	ping_info->pi_pid = the_lnet.ln_pid;
+	ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
+	ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
+
+	return ping_info;
+}
+
+static inline int
+lnet_get_ni_count(void)
+{
+	struct lnet_ni *ni;
+	int count = 0;
+
+	lnet_net_lock(0);
+
+	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
+		count++;
+
+	lnet_net_unlock(0);
+
+	return count;
+}
+
+static inline void
+lnet_ping_info_free(lnet_ping_info_t *pinfo)
+{
+	LIBCFS_FREE(pinfo,
+		    offsetof(lnet_ping_info_t,
+			     pi_ni[pinfo->pi_nnis]));
+}
+
+static void
+lnet_ping_info_destroy(void)
+{
+	struct lnet_ni *ni;
+
+	lnet_net_lock(LNET_LOCK_EX);
+
+	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+		lnet_ni_lock(ni);
+		ni->ni_status = NULL;
+		lnet_ni_unlock(ni);
+	}
+
+	lnet_ping_info_free(the_lnet.ln_ping_info);
+	the_lnet.ln_ping_info = NULL;
+
+	lnet_net_unlock(LNET_LOCK_EX);
+}
+
+static void
+lnet_ping_event_handler(lnet_event_t *event)
+{
+	lnet_ping_info_t *pinfo = event->md.user_ptr;
+
+	if (event->unlinked)
+		pinfo->pi_features = LNET_PING_FEAT_INVAL;
+}
+
+static int
+lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle,
+		     int ni_count, bool set_eq)
+{
+	lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
+	lnet_handle_me_t me_handle;
+	lnet_md_t md = { NULL };
+	int rc, rc2;
+
+	if (set_eq) {
+		rc = LNetEQAlloc(0, lnet_ping_event_handler,
+				 &the_lnet.ln_ping_target_eq);
+		if (rc) {
+			CERROR("Can't allocate ping EQ: %d\n", rc);
+			return rc;
+		}
+	}
+
+	*ppinfo = lnet_ping_info_create(ni_count);
+	if (!*ppinfo) {
+		rc = -ENOMEM;
+		goto failed_0;
+	}
+
+	rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+			  LNET_PROTO_PING_MATCHBITS, 0,
+			  LNET_UNLINK, LNET_INS_AFTER,
+			  &me_handle);
+	if (rc) {
+		CERROR("Can't create ping ME: %d\n", rc);
+		goto failed_1;
+	}
+
+	/* initialize md content */
+	md.start = *ppinfo;
+	md.length = offsetof(lnet_ping_info_t,
+			     pi_ni[(*ppinfo)->pi_nnis]);
+	md.threshold = LNET_MD_THRESH_INF;
+	md.max_size = 0;
+	md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
+		     LNET_MD_MANAGE_REMOTE;
+	md.user_ptr  = NULL;
+	md.eq_handle = the_lnet.ln_ping_target_eq;
+	md.user_ptr = *ppinfo;
+
+	rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
+	if (rc) {
+		CERROR("Can't attach ping MD: %d\n", rc);
+		goto failed_2;
+	}
+
+	return 0;
+
+failed_2:
+	rc2 = LNetMEUnlink(me_handle);
+	LASSERT(!rc2);
+failed_1:
+	lnet_ping_info_free(*ppinfo);
+	*ppinfo = NULL;
+failed_0:
+	if (set_eq)
+		LNetEQFree(the_lnet.ln_ping_target_eq);
+	return rc;
+}
+
+static void
+lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle)
+{
+	sigset_t blocked = cfs_block_allsigs();
+
+	LNetMDUnlink(*md_handle);
+	LNetInvalidateHandle(md_handle);
+
+	/* NB md could be busy; this just starts the unlink */
+	while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
+		CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(cfs_time_seconds(1));
+	}
+
+	cfs_restore_sigs(blocked);
+}
+
+static void
+lnet_ping_info_install_locked(lnet_ping_info_t *ping_info)
+{
+	lnet_ni_status_t *ns;
+	lnet_ni_t *ni;
+	int i = 0;
+
+	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+		LASSERT(i < ping_info->pi_nnis);
+
+		ns = &ping_info->pi_ni[i];
+
+		ns->ns_nid = ni->ni_nid;
+
+		lnet_ni_lock(ni);
+		ns->ns_status = (ni->ni_status) ?
+				 ni->ni_status->ns_status : LNET_NI_STATUS_UP;
+		ni->ni_status = ns;
+		lnet_ni_unlock(ni);
+
+		i++;
+	}
+}
+
+static void
+lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle)
+{
+	lnet_ping_info_t *old_pinfo = NULL;
+	lnet_handle_md_t old_md;
+
+	/* switch the NIs to point to the new ping info created */
+	lnet_net_lock(LNET_LOCK_EX);
+
+	if (!the_lnet.ln_routing)
+		pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+	lnet_ping_info_install_locked(pinfo);
+
+	if (the_lnet.ln_ping_info) {
+		old_pinfo = the_lnet.ln_ping_info;
+		old_md = the_lnet.ln_ping_target_md;
+	}
+	the_lnet.ln_ping_target_md = md_handle;
+	the_lnet.ln_ping_info = pinfo;
+
+	lnet_net_unlock(LNET_LOCK_EX);
+
+	if (old_pinfo) {
+		/* unlink the old ping info */
+		lnet_ping_md_unlink(old_pinfo, &old_md);
+		lnet_ping_info_free(old_pinfo);
+	}
+}
+
+static void
+lnet_ping_target_fini(void)
+{
+	int rc;
+
+	lnet_ping_md_unlink(the_lnet.ln_ping_info,
+			    &the_lnet.ln_ping_target_md);
+
+	rc = LNetEQFree(the_lnet.ln_ping_target_eq);
+	LASSERT(!rc);
+
+	lnet_ping_info_destroy();
+}
+
 static int
 lnet_ni_tq_credits(lnet_ni_t *ni)
 {
@@ -830,72 +1065,40 @@
 }
 
 static void
-lnet_shutdown_lndnis(void)
+lnet_ni_unlink_locked(lnet_ni_t *ni)
+{
+	if (!list_empty(&ni->ni_cptlist)) {
+		list_del_init(&ni->ni_cptlist);
+		lnet_ni_decref_locked(ni, 0);
+	}
+
+	/* move it to zombie list and nobody can find it anymore */
+	LASSERT(!list_empty(&ni->ni_list));
+	list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
+	lnet_ni_decref_locked(ni, 0);	/* drop ln_nis' ref */
+}
+
+static void
+lnet_clear_zombies_nis_locked(void)
 {
 	int i;
 	int islo;
 	lnet_ni_t *ni;
 
-	/* NB called holding the global mutex */
-
-	/* All quiet on the API front */
-	LASSERT(!the_lnet.ln_shutdown);
-	LASSERT(the_lnet.ln_refcount == 0);
-	LASSERT(list_empty(&the_lnet.ln_nis_zombie));
-
-	lnet_net_lock(LNET_LOCK_EX);
-	the_lnet.ln_shutdown = 1;	/* flag shutdown */
-
-	/* Unlink NIs from the global table */
-	while (!list_empty(&the_lnet.ln_nis)) {
-		ni = list_entry(the_lnet.ln_nis.next,
-				    lnet_ni_t, ni_list);
-		/* move it to zombie list and nobody can find it anymore */
-		list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
-		lnet_ni_decref_locked(ni, 0);	/* drop ln_nis' ref */
-
-		if (!list_empty(&ni->ni_cptlist)) {
-			list_del_init(&ni->ni_cptlist);
-			lnet_ni_decref_locked(ni, 0);
-		}
-	}
-
-	/* Drop the cached eqwait NI. */
-	if (the_lnet.ln_eq_waitni != NULL) {
-		lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
-		the_lnet.ln_eq_waitni = NULL;
-	}
-
-	/* Drop the cached loopback NI. */
-	if (the_lnet.ln_loni != NULL) {
-		lnet_ni_decref_locked(the_lnet.ln_loni, 0);
-		the_lnet.ln_loni = NULL;
-	}
-
-	lnet_net_unlock(LNET_LOCK_EX);
-
-	/* Clear lazy portals and drop delayed messages which hold refs
-	 * on their lnet_msg_t::msg_rxpeer */
-	for (i = 0; i < the_lnet.ln_nportals; i++)
-		LNetClearLazyPortal(i);
-
-	/* Clear the peer table and wait for all peers to go (they hold refs on
-	 * their NIs) */
-	lnet_peer_tables_cleanup();
-
-	lnet_net_lock(LNET_LOCK_EX);
-	/* Now wait for the NI's I just nuked to show up on ln_zombie_nis
-	 * and shut them down in guaranteed thread context */
+	/*
+	 * Now wait for the NI's I just nuked to show up on ln_zombie_nis
+	 * and shut them down in guaranteed thread context
+	 */
 	i = 2;
 	while (!list_empty(&the_lnet.ln_nis_zombie)) {
 		int *ref;
 		int j;
 
 		ni = list_entry(the_lnet.ln_nis_zombie.next,
-				    lnet_ni_t, ni_list);
+				lnet_ni_t, ni_list);
 		list_del_init(&ni->ni_list);
 		cfs_percpt_for_each(ref, j, ni->ni_refs) {
-			if (*ref == 0)
+			if (!*ref)
 				continue;
 			/* still busy, add it back to zombie list */
 			list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
@@ -921,11 +1124,12 @@
 		islo = ni->ni_lnd->lnd_type == LOLND;
 
 		LASSERT(!in_interrupt());
-		(ni->ni_lnd->lnd_shutdown)(ni);
+		ni->ni_lnd->lnd_shutdown(ni);
 
-		/* can't deref lnd anymore now; it might have unregistered
-		 * itself...  */
-
+		/*
+		 * can't deref lnd anymore now; it might have unregistered
+		 * itself...
+		 */
 		if (!islo)
 			CDEBUG(D_LNI, "Removed LNI %s\n",
 			       libcfs_nid2str(ni->ni_nid));
@@ -935,166 +1139,255 @@
 
 		lnet_net_lock(LNET_LOCK_EX);
 	}
+}
 
-	the_lnet.ln_shutdown = 0;
+static void
+lnet_shutdown_lndnis(void)
+{
+	lnet_ni_t *ni;
+	int i;
+
+	/* NB called holding the global mutex */
+
+	/* All quiet on the API front */
+	LASSERT(!the_lnet.ln_shutdown);
+	LASSERT(!the_lnet.ln_refcount);
+	LASSERT(list_empty(&the_lnet.ln_nis_zombie));
+
+	lnet_net_lock(LNET_LOCK_EX);
+	the_lnet.ln_shutdown = 1;	/* flag shutdown */
+
+	/* Unlink NIs from the global table */
+	while (!list_empty(&the_lnet.ln_nis)) {
+		ni = list_entry(the_lnet.ln_nis.next,
+				lnet_ni_t, ni_list);
+		lnet_ni_unlink_locked(ni);
+	}
+
+	/* Drop the cached loopback NI. */
+	if (the_lnet.ln_loni) {
+		lnet_ni_decref_locked(the_lnet.ln_loni, 0);
+		the_lnet.ln_loni = NULL;
+	}
+
 	lnet_net_unlock(LNET_LOCK_EX);
 
-	if (the_lnet.ln_network_tokens != NULL) {
-		LIBCFS_FREE(the_lnet.ln_network_tokens,
-			    the_lnet.ln_network_tokens_nob);
-		the_lnet.ln_network_tokens = NULL;
-	}
+	/*
+	 * Clear lazy portals and drop delayed messages which hold refs
+	 * on their lnet_msg_t::msg_rxpeer
+	 */
+	for (i = 0; i < the_lnet.ln_nportals; i++)
+		LNetClearLazyPortal(i);
+
+	/*
+	 * Clear the peer table and wait for all peers to go (they hold refs on
+	 * their NIs)
+	 */
+	lnet_peer_tables_cleanup(NULL);
+
+	lnet_net_lock(LNET_LOCK_EX);
+
+	lnet_clear_zombies_nis_locked();
+	the_lnet.ln_shutdown = 0;
+	lnet_net_unlock(LNET_LOCK_EX);
+}
+
+/* shutdown down the NI and release refcount */
+static void
+lnet_shutdown_lndni(struct lnet_ni *ni)
+{
+	int i;
+
+	lnet_net_lock(LNET_LOCK_EX);
+	lnet_ni_unlink_locked(ni);
+	lnet_net_unlock(LNET_LOCK_EX);
+
+	/* clear messages for this NI on the lazy portal */
+	for (i = 0; i < the_lnet.ln_nportals; i++)
+		lnet_clear_lazy_portal(ni, i, "Shutting down NI");
+
+	/* Do peer table cleanup for this ni */
+	lnet_peer_tables_cleanup(ni);
+
+	lnet_net_lock(LNET_LOCK_EX);
+	lnet_clear_zombies_nis_locked();
+	lnet_net_unlock(LNET_LOCK_EX);
 }
 
 static int
-lnet_startup_lndnis(void)
+lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
+		   __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
 {
+	int rc = -EINVAL;
+	int lnd_type;
 	lnd_t *lnd;
-	struct lnet_ni *ni;
 	struct lnet_tx_queue *tq;
-	struct list_head nilist;
 	int i;
-	int rc = 0;
-	__u32 lnd_type;
-	int nicount = 0;
-	char *nets = lnet_get_networks();
 
-	INIT_LIST_HEAD(&nilist);
+	lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
 
-	if (nets == NULL)
-		goto failed;
+	LASSERT(libcfs_isknown_lnd(lnd_type));
 
-	rc = lnet_parse_networks(&nilist, nets);
-	if (rc != 0)
-		goto failed;
+	if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
+	    lnd_type == IIBLND || lnd_type == VIBLND) {
+		CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
+		goto failed0;
+	}
 
-	while (!list_empty(&nilist)) {
-		ni = list_entry(nilist.next, lnet_ni_t, ni_list);
-		lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
-
-		LASSERT(libcfs_isknown_lnd(lnd_type));
-
-		if (lnd_type == CIBLND    ||
-		    lnd_type == OPENIBLND ||
-		    lnd_type == IIBLND    ||
-		    lnd_type == VIBLND) {
-			CERROR("LND %s obsoleted\n",
-			       libcfs_lnd2str(lnd_type));
-			goto failed;
+	/* Make sure this new NI is unique. */
+	lnet_net_lock(LNET_LOCK_EX);
+	rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis);
+	lnet_net_unlock(LNET_LOCK_EX);
+	if (!rc) {
+		if (lnd_type == LOLND) {
+			lnet_ni_free(ni);
+			return 0;
 		}
 
-		mutex_lock(&the_lnet.ln_lnd_mutex);
-		lnd = lnet_find_lnd_by_type(lnd_type);
+		CERROR("Net %s is not unique\n",
+		       libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
+		rc = -EEXIST;
+		goto failed0;
+	}
 
-		if (lnd == NULL) {
-			mutex_unlock(&the_lnet.ln_lnd_mutex);
-			rc = request_module("%s",
-						libcfs_lnd2modname(lnd_type));
-			mutex_lock(&the_lnet.ln_lnd_mutex);
+	mutex_lock(&the_lnet.ln_lnd_mutex);
+	lnd = lnet_find_lnd_by_type(lnd_type);
 
-			lnd = lnet_find_lnd_by_type(lnd_type);
-			if (lnd == NULL) {
-				mutex_unlock(&the_lnet.ln_lnd_mutex);
-				CERROR("Can't load LND %s, module %s, rc=%d\n",
-				       libcfs_lnd2str(lnd_type),
-				       libcfs_lnd2modname(lnd_type), rc);
-				goto failed;
-			}
-		}
-
-		lnet_net_lock(LNET_LOCK_EX);
-		lnd->lnd_refcount++;
-		lnet_net_unlock(LNET_LOCK_EX);
-
-		ni->ni_lnd = lnd;
-
-		rc = (lnd->lnd_startup)(ni);
-
+	if (!lnd) {
 		mutex_unlock(&the_lnet.ln_lnd_mutex);
+		rc = request_module("%s", libcfs_lnd2modname(lnd_type));
+		mutex_lock(&the_lnet.ln_lnd_mutex);
 
-		if (rc != 0) {
-			LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
-					   rc, libcfs_lnd2str(lnd->lnd_type));
-			lnet_net_lock(LNET_LOCK_EX);
-			lnd->lnd_refcount--;
-			lnet_net_unlock(LNET_LOCK_EX);
-			goto failed;
+		lnd = lnet_find_lnd_by_type(lnd_type);
+		if (!lnd) {
+			mutex_unlock(&the_lnet.ln_lnd_mutex);
+			CERROR("Can't load LND %s, module %s, rc=%d\n",
+			       libcfs_lnd2str(lnd_type),
+			       libcfs_lnd2modname(lnd_type), rc);
+			rc = -EINVAL;
+			goto failed0;
 		}
+	}
 
-		LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
+	lnet_net_lock(LNET_LOCK_EX);
+	lnd->lnd_refcount++;
+	lnet_net_unlock(LNET_LOCK_EX);
 
-		list_del(&ni->ni_list);
+	ni->ni_lnd = lnd;
 
+	rc = lnd->lnd_startup(ni);
+
+	mutex_unlock(&the_lnet.ln_lnd_mutex);
+
+	if (rc) {
+		LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
+				   rc, libcfs_lnd2str(lnd->lnd_type));
 		lnet_net_lock(LNET_LOCK_EX);
-		/* refcount for ln_nis */
-		lnet_ni_addref_locked(ni, 0);
-		list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
-		if (ni->ni_cpts != NULL) {
-			list_add_tail(&ni->ni_cptlist,
-					  &the_lnet.ln_nis_cpt);
-			lnet_ni_addref_locked(ni, 0);
-		}
-
+		lnd->lnd_refcount--;
 		lnet_net_unlock(LNET_LOCK_EX);
-
-		if (lnd->lnd_type == LOLND) {
-			lnet_ni_addref(ni);
-			LASSERT(the_lnet.ln_loni == NULL);
-			the_lnet.ln_loni = ni;
-			continue;
-		}
-
-		if (ni->ni_peertxcredits == 0 ||
-		    ni->ni_maxtxcredits == 0) {
-			LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
-					   libcfs_lnd2str(lnd->lnd_type),
-					   ni->ni_peertxcredits == 0 ?
-					   "" : "per-peer ");
-			goto failed;
-		}
-
-		cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
-			tq->tq_credits_min =
-			tq->tq_credits_max =
-			tq->tq_credits = lnet_ni_tq_credits(ni);
-		}
-
-		CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
-		       libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
-		       lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
-		       ni->ni_peerrtrcredits, ni->ni_peertimeout);
-
-		nicount++;
+		goto failed0;
 	}
 
-	if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
-		lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
-		LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n",
-				   libcfs_lnd2str(lnd_type));
-		goto failed;
+	/*
+	 * If given some LND tunable parameters, parse those now to
+	 * override the values in the NI structure.
+	 */
+	if (peer_buf_cr >= 0)
+		ni->ni_peerrtrcredits = peer_buf_cr;
+	if (peer_timeout >= 0)
+		ni->ni_peertimeout = peer_timeout;
+	/*
+	 * TODO
+	 * Note: For now, don't allow the user to change
+	 * peertxcredits as this number is used in the
+	 * IB LND to control queue depth.
+	 * if (peer_cr != -1)
+	 *	ni->ni_peertxcredits = peer_cr;
+	 */
+	if (credits >= 0)
+		ni->ni_maxtxcredits = credits;
+
+	LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
+
+	lnet_net_lock(LNET_LOCK_EX);
+	/* refcount for ln_nis */
+	lnet_ni_addref_locked(ni, 0);
+	list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+	if (ni->ni_cpts) {
+		lnet_ni_addref_locked(ni, 0);
+		list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt);
 	}
 
+	lnet_net_unlock(LNET_LOCK_EX);
+
+	if (lnd->lnd_type == LOLND) {
+		lnet_ni_addref(ni);
+		LASSERT(!the_lnet.ln_loni);
+		the_lnet.ln_loni = ni;
+		return 0;
+	}
+
+	if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) {
+		LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
+				   libcfs_lnd2str(lnd->lnd_type),
+				   !ni->ni_peertxcredits ?
+				   "" : "per-peer ");
+		/*
+		 * shutdown the NI since if we get here then it must've already
+		 * been started
+		 */
+                lnet_shutdown_lndni(ni);
+                return -EINVAL;
+	}
+
+	cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
+		tq->tq_credits_min =
+		tq->tq_credits_max =
+		tq->tq_credits = lnet_ni_tq_credits(ni);
+	}
+
+	CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
+	       libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
+	       lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
+	       ni->ni_peerrtrcredits, ni->ni_peertimeout);
+
 	return 0;
+failed0:
+	lnet_ni_free(ni);
+	return rc;
+}
 
- failed:
-	lnet_shutdown_lndnis();
+static int
+lnet_startup_lndnis(struct list_head *nilist)
+{
+	struct lnet_ni *ni;
+	int rc;
+	int ni_count = 0;
 
-	while (!list_empty(&nilist)) {
-		ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+	while (!list_empty(nilist)) {
+		ni = list_entry(nilist->next, lnet_ni_t, ni_list);
 		list_del(&ni->ni_list);
-		lnet_ni_free(ni);
+		rc = lnet_startup_lndni(ni, -1, -1, -1, -1);
+
+		if (rc < 0)
+			goto failed;
+
+		ni_count++;
 	}
 
-	return -ENETDOWN;
+	return ni_count;
+failed:
+	lnet_shutdown_lndnis();
+
+	return rc;
 }
 
 /**
  * Initialize LNet library.
  *
- * Only userspace program needs to call this function - it's automatically
- * called in the kernel at module loading time. Caller has to call lnet_fini()
- * after a call to lnet_init(), if and only if the latter returned 0. It must
- * be called exactly once.
+ * Automatically called at module loading time. Caller has to call
+ * lnet_exit() after a call to lnet_init(), if and only if the
+ * latter returned 0. It must be called exactly once.
  *
  * \return 0 on success, and -ve on failures.
  */
@@ -1104,7 +1397,6 @@
 	int rc;
 
 	lnet_assert_wire_constants();
-	LASSERT(!the_lnet.ln_init);
 
 	memset(&the_lnet, 0, sizeof(the_lnet));
 
@@ -1124,21 +1416,22 @@
 		the_lnet.ln_cpt_bits++;
 
 	rc = lnet_create_locks();
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create LNet global locks: %d\n", rc);
 		return -1;
 	}
 
 	the_lnet.ln_refcount = 0;
-	the_lnet.ln_init = 1;
 	LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
 	INIT_LIST_HEAD(&the_lnet.ln_lnds);
 	INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
 	INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
 
-	/* The hash table size is the number of bits it takes to express the set
+	/*
+	 * The hash table size is the number of bits it takes to express the set
 	 * ln_num_routes, minus 1 (better to under estimate than over so we
-	 * don't waste memory). */
+	 * don't waste memory).
+	 */
 	if (rnet_htable_size <= 0)
 		rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
 	else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
@@ -1146,9 +1439,11 @@
 	the_lnet.ln_remote_nets_hbits = max_t(int, 1,
 					   order_base_2(rnet_htable_size) - 1);
 
-	/* All LNDs apart from the LOLND are in separate modules.  They
+	/*
+	 * All LNDs apart from the LOLND are in separate modules.  They
 	 * register themselves when their module loads, and unregister
-	 * themselves when their module is unloaded. */
+	 * themselves when their module is unloaded.
+	 */
 	lnet_register_lnd(&the_lolnd);
 	return 0;
 }
@@ -1156,30 +1451,23 @@
 /**
  * Finalize LNet library.
  *
- * Only userspace program needs to call this function. It can be called
- * at most once.
- *
  * \pre lnet_init() called with success.
  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
  */
 void
 lnet_fini(void)
 {
-	LASSERT(the_lnet.ln_init);
-	LASSERT(the_lnet.ln_refcount == 0);
+	LASSERT(!the_lnet.ln_refcount);
 
 	while (!list_empty(&the_lnet.ln_lnds))
 		lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
-						   lnd_t, lnd_list));
+					       lnd_t, lnd_list));
 	lnet_destroy_locks();
-
-	the_lnet.ln_init = 0;
 }
 
 /**
  * Set LNet PID and start LNet interfaces, routing, and forwarding.
  *
- * Userspace program should call this after a successful call to lnet_init().
  * Users must call this function at least once before any other functions.
  * For each successful call there must be a corresponding call to
  * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
@@ -1197,77 +1485,113 @@
 {
 	int im_a_router = 0;
 	int rc;
+	int ni_count;
+	lnet_ping_info_t *pinfo;
+	lnet_handle_md_t md_handle;
+	struct list_head net_head;
+
+	INIT_LIST_HEAD(&net_head);
 
 	mutex_lock(&the_lnet.ln_api_mutex);
 
-	LASSERT(the_lnet.ln_init);
 	CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
 
 	if (the_lnet.ln_refcount > 0) {
 		rc = the_lnet.ln_refcount++;
-		goto out;
-	}
-
-	if (requested_pid == LNET_PID_ANY) {
-		/* Don't instantiate LNET just for me */
-		rc = -ENETDOWN;
-		goto failed0;
+		mutex_unlock(&the_lnet.ln_api_mutex);
+		return rc;
 	}
 
 	rc = lnet_prepare(requested_pid);
-	if (rc != 0)
-		goto failed0;
+	if (rc) {
+		mutex_unlock(&the_lnet.ln_api_mutex);
+		return rc;
+	}
 
-	rc = lnet_startup_lndnis();
-	if (rc != 0)
-		goto failed1;
+	/* Add in the loopback network */
+	if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) {
+		rc = -ENOMEM;
+		goto err_empty_list;
+	}
 
-	rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
-	if (rc != 0)
-		goto failed2;
+	/*
+	 * If LNet is being initialized via DLC it is possible
+	 * that the user requests not to load module parameters (ones which
+	 * are supported by DLC) on initialization.  Therefore, make sure not
+	 * to load networks, routes and forwarding from module parameters
+	 * in this case. On cleanup in case of failure only clean up
+	 * routes if it has been loaded
+	 */
+	if (!the_lnet.ln_nis_from_mod_params) {
+		rc = lnet_parse_networks(&net_head, lnet_get_networks());
+		if (rc < 0)
+			goto err_empty_list;
+	}
 
-	rc = lnet_check_routes();
-	if (rc != 0)
-		goto failed2;
+	ni_count = lnet_startup_lndnis(&net_head);
+	if (ni_count < 0) {
+		rc = ni_count;
+		goto err_empty_list;
+	}
 
-	rc = lnet_rtrpools_alloc(im_a_router);
-	if (rc != 0)
-		goto failed2;
+	if (!the_lnet.ln_nis_from_mod_params) {
+		rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
+		if (rc)
+			goto err_shutdown_lndnis;
+
+		rc = lnet_check_routes();
+		if (rc)
+			goto err_destory_routes;
+
+		rc = lnet_rtrpools_alloc(im_a_router);
+		if (rc)
+			goto err_destory_routes;
+	}
 
 	rc = lnet_acceptor_start();
-	if (rc != 0)
-		goto failed2;
+	if (rc)
+		goto err_destory_routes;
 
 	the_lnet.ln_refcount = 1;
 	/* Now I may use my own API functions... */
 
-	/* NB router checker needs the_lnet.ln_ping_info in
-	 * lnet_router_checker -> lnet_update_ni_status_locked */
-	rc = lnet_ping_target_init();
-	if (rc != 0)
-		goto failed3;
+	rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
+	if (rc)
+		goto err_acceptor_stop;
+
+	lnet_ping_target_update(pinfo, md_handle);
 
 	rc = lnet_router_checker_start();
-	if (rc != 0)
-		goto failed4;
+	if (rc)
+		goto err_stop_ping;
 
 	lnet_router_debugfs_init();
-	goto out;
 
- failed4:
+	mutex_unlock(&the_lnet.ln_api_mutex);
+
+	return 0;
+
+err_stop_ping:
 	lnet_ping_target_fini();
- failed3:
+err_acceptor_stop:
 	the_lnet.ln_refcount = 0;
 	lnet_acceptor_stop();
- failed2:
-	lnet_destroy_routes();
+err_destory_routes:
+	if (!the_lnet.ln_nis_from_mod_params)
+		lnet_destroy_routes();
+err_shutdown_lndnis:
 	lnet_shutdown_lndnis();
- failed1:
+err_empty_list:
 	lnet_unprepare();
- failed0:
 	LASSERT(rc < 0);
- out:
 	mutex_unlock(&the_lnet.ln_api_mutex);
+	while (!list_empty(&net_head)) {
+		struct lnet_ni *ni;
+
+		ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+		list_del_init(&ni->ni_list);
+		lnet_ni_free(ni);
+	}
 	return rc;
 }
 EXPORT_SYMBOL(LNetNIInit);
@@ -1286,7 +1610,6 @@
 {
 	mutex_lock(&the_lnet.ln_api_mutex);
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	if (the_lnet.ln_refcount != 1) {
@@ -1313,30 +1636,233 @@
 EXPORT_SYMBOL(LNetNIFini);
 
 /**
- * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
- * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet
- * internal ioctl handler.
+ * Grabs the ni data from the ni structure and fills the out
+ * parameters
  *
- * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it.
+ * \param[in] ni network       interface structure
+ * \param[out] cpt_count       the number of cpts the ni is on
+ * \param[out] nid             Network Interface ID
+ * \param[out] peer_timeout    NI peer timeout
+ * \param[out] peer_tx_crdits  NI peer transmit credits
+ * \param[out] peer_rtr_credits NI peer router credits
+ * \param[out] max_tx_credits  NI max transmit credit
+ * \param[out] net_config      Network configuration
+ */
+static void
+lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
+		  int *peer_timeout, int *peer_tx_credits,
+		  int *peer_rtr_credits, int *max_tx_credits,
+		  struct lnet_ioctl_net_config *net_config)
+{
+	int i;
+
+	if (!ni)
+		return;
+
+	if (!net_config)
+		return;
+
+	BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
+		     ARRAY_SIZE(net_config->ni_interfaces));
+
+	for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
+		if (!ni->ni_interfaces[i])
+			break;
+
+		strncpy(net_config->ni_interfaces[i],
+			ni->ni_interfaces[i],
+			sizeof(net_config->ni_interfaces[i]));
+	}
+
+	*nid = ni->ni_nid;
+	*peer_timeout = ni->ni_peertimeout;
+	*peer_tx_credits = ni->ni_peertxcredits;
+	*peer_rtr_credits = ni->ni_peerrtrcredits;
+	*max_tx_credits = ni->ni_maxtxcredits;
+
+	net_config->ni_status = ni->ni_status->ns_status;
+
+	if (ni->ni_cpts) {
+		int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
+
+		for (i = 0; i < num_cpts; i++)
+			net_config->ni_cpts[i] = ni->ni_cpts[i];
+
+		*cpt_count = num_cpts;
+	}
+}
+
+int
+lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
+		    int *peer_tx_credits, int *peer_rtr_credits,
+		    int *max_tx_credits,
+		    struct lnet_ioctl_net_config *net_config)
+{
+	struct lnet_ni *ni;
+	struct list_head *tmp;
+	int cpt, i = 0;
+	int rc = -ENOENT;
+
+	cpt = lnet_net_lock_current();
+
+	list_for_each(tmp, &the_lnet.ln_nis) {
+		if (i++ != idx)
+			continue;
+
+		ni = list_entry(tmp, lnet_ni_t, ni_list);
+		lnet_ni_lock(ni);
+		lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout,
+				  peer_tx_credits, peer_rtr_credits,
+				  max_tx_credits, net_config);
+		lnet_ni_unlock(ni);
+		rc = 0;
+		break;
+	}
+
+	lnet_net_unlock(cpt);
+	return rc;
+}
+
+int
+lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
+		__s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
+		__s32 credits)
+{
+	lnet_ping_info_t *pinfo;
+	lnet_handle_md_t md_handle;
+	struct lnet_ni *ni;
+	struct list_head net_head;
+	lnet_remotenet_t *rnet;
+	int rc;
+
+	INIT_LIST_HEAD(&net_head);
+
+	/* Create a ni structure for the network string */
+	rc = lnet_parse_networks(&net_head, nets);
+	if (rc <= 0)
+		return !rc ? -EINVAL : rc;
+
+	mutex_lock(&the_lnet.ln_api_mutex);
+
+	if (rc > 1) {
+		rc = -EINVAL; /* only add one interface per call */
+		goto failed0;
+	}
+
+	ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+
+	lnet_net_lock(LNET_LOCK_EX);
+	rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid));
+	lnet_net_unlock(LNET_LOCK_EX);
+	/*
+	 * make sure that the net added doesn't invalidate the current
+	 * configuration LNet is keeping
+	 */
+	if (rnet) {
+		CERROR("Adding net %s will invalidate routing configuration\n",
+		       nets);
+		rc = -EUSERS;
+		goto failed0;
+	}
+
+	rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
+				  false);
+	if (rc)
+		goto failed0;
+
+	list_del_init(&ni->ni_list);
+
+	rc = lnet_startup_lndni(ni, peer_timeout, peer_cr,
+				peer_buf_cr, credits);
+	if (rc)
+		goto failed1;
+
+	if (ni->ni_lnd->lnd_accept) {
+		rc = lnet_acceptor_start();
+		if (rc < 0) {
+			/* shutdown the ni that we just started */
+			CERROR("Failed to start up acceptor thread\n");
+			lnet_shutdown_lndni(ni);
+			goto failed1;
+		}
+	}
+
+	lnet_ping_target_update(pinfo, md_handle);
+	mutex_unlock(&the_lnet.ln_api_mutex);
+
+	return 0;
+
+failed1:
+	lnet_ping_md_unlink(pinfo, &md_handle);
+	lnet_ping_info_free(pinfo);
+failed0:
+	mutex_unlock(&the_lnet.ln_api_mutex);
+	while (!list_empty(&net_head)) {
+		ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+		list_del_init(&ni->ni_list);
+		lnet_ni_free(ni);
+	}
+	return rc;
+}
+
+int
+lnet_dyn_del_ni(__u32 net)
+{
+	lnet_ni_t *ni;
+	lnet_ping_info_t *pinfo;
+	lnet_handle_md_t md_handle;
+	int rc;
+
+	/* don't allow userspace to shutdown the LOLND */
+	if (LNET_NETTYP(net) == LOLND)
+		return -EINVAL;
+
+	mutex_lock(&the_lnet.ln_api_mutex);
+	/* create and link a new ping info, before removing the old one */
+	rc = lnet_ping_info_setup(&pinfo, &md_handle,
+				  lnet_get_ni_count() - 1, false);
+	if (rc)
+		goto out;
+
+	ni = lnet_net2ni(net);
+	if (!ni) {
+		rc = -EINVAL;
+		goto failed;
+	}
+
+	/* decrement the reference counter taken by lnet_net2ni() */
+	lnet_ni_decref_locked(ni, 0);
+
+	lnet_shutdown_lndni(ni);
+
+	if (!lnet_count_acceptor_nis())
+		lnet_acceptor_stop();
+
+	lnet_ping_target_update(pinfo, md_handle);
+	goto out;
+failed:
+	lnet_ping_md_unlink(pinfo, &md_handle);
+	lnet_ping_info_free(pinfo);
+out:
+	mutex_unlock(&the_lnet.ln_api_mutex);
+
+	return rc;
+}
+
+/**
+ * LNet ioctl handler.
  *
- * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer.
- * The data will be printed to system console. Don't use it excessively.
- * \param arg A pointer to lnet_process_id_t, process ID of the peer.
- *
- * \return Always return 0 when called by users directly (i.e., not via ioctl).
  */
 int
 LNetCtl(unsigned int cmd, void *arg)
 {
 	struct libcfs_ioctl_data *data = arg;
+	struct lnet_ioctl_config_data *config;
 	lnet_process_id_t id = {0};
 	lnet_ni_t *ni;
 	int rc;
 	unsigned long secs_passed;
 
-	LASSERT(the_lnet.ln_init);
-	LASSERT(the_lnet.ln_refcount > 0);
-
 	switch (cmd) {
 	case IOC_LIBCFS_GET_NI:
 		rc = LNetGetId(data->ioc_count, &id);
@@ -1347,27 +1873,148 @@
 		return lnet_fail_nid(data->ioc_nid, data->ioc_count);
 
 	case IOC_LIBCFS_ADD_ROUTE:
-		rc = lnet_add_route(data->ioc_net, data->ioc_count,
-				    data->ioc_nid, data->ioc_priority);
-		return (rc != 0) ? rc : lnet_check_routes();
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < sizeof(*config))
+			return -EINVAL;
+
+		mutex_lock(&the_lnet.ln_api_mutex);
+		rc = lnet_add_route(config->cfg_net,
+				    config->cfg_config_u.cfg_route.rtr_hop,
+				    config->cfg_nid,
+				    config->cfg_config_u.cfg_route.rtr_priority);
+		if (!rc) {
+			rc = lnet_check_routes();
+			if (rc)
+				lnet_del_route(config->cfg_net,
+					       config->cfg_nid);
+		}
+		mutex_unlock(&the_lnet.ln_api_mutex);
+		return rc;
 
 	case IOC_LIBCFS_DEL_ROUTE:
-		return lnet_del_route(data->ioc_net, data->ioc_nid);
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < sizeof(*config))
+			return -EINVAL;
+
+		mutex_lock(&the_lnet.ln_api_mutex);
+		rc = lnet_del_route(config->cfg_net, config->cfg_nid);
+		mutex_unlock(&the_lnet.ln_api_mutex);
+		return rc;
 
 	case IOC_LIBCFS_GET_ROUTE:
-		return lnet_get_route(data->ioc_count,
-				      &data->ioc_net, &data->ioc_count,
-				      &data->ioc_nid, &data->ioc_flags,
-				      &data->ioc_priority);
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < sizeof(*config))
+			return -EINVAL;
+
+		return lnet_get_route(config->cfg_count,
+				      &config->cfg_net,
+				      &config->cfg_config_u.cfg_route.rtr_hop,
+				      &config->cfg_nid,
+				      &config->cfg_config_u.cfg_route.rtr_flags,
+				      &config->cfg_config_u.cfg_route.rtr_priority);
+
+	case IOC_LIBCFS_GET_NET: {
+		struct lnet_ioctl_net_config *net_config;
+		size_t total = sizeof(*config) + sizeof(*net_config);
+
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < total)
+			return -EINVAL;
+
+		net_config = (struct lnet_ioctl_net_config *)
+				config->cfg_bulk;
+		if (!net_config)
+			return -EINVAL;
+
+		return lnet_get_net_config(config->cfg_count,
+					   &config->cfg_ncpts,
+					   &config->cfg_nid,
+					   &config->cfg_config_u.cfg_net.net_peer_timeout,
+					   &config->cfg_config_u.cfg_net.net_peer_tx_credits,
+					   &config->cfg_config_u.cfg_net.net_peer_rtr_credits,
+					   &config->cfg_config_u.cfg_net.net_max_tx_credits,
+					   net_config);
+	}
+
+	case IOC_LIBCFS_GET_LNET_STATS: {
+		struct lnet_ioctl_lnet_stats *lnet_stats = arg;
+
+		if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
+			return -EINVAL;
+
+		lnet_counters_get(&lnet_stats->st_cntrs);
+		return 0;
+	}
+
+	case IOC_LIBCFS_CONFIG_RTR:
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < sizeof(*config))
+			return -EINVAL;
+
+		mutex_lock(&the_lnet.ln_api_mutex);
+		if (config->cfg_config_u.cfg_buffers.buf_enable) {
+			rc = lnet_rtrpools_enable();
+			mutex_unlock(&the_lnet.ln_api_mutex);
+			return rc;
+		}
+		lnet_rtrpools_disable();
+		mutex_unlock(&the_lnet.ln_api_mutex);
+		return 0;
+
+	case IOC_LIBCFS_ADD_BUF:
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < sizeof(*config))
+			return -EINVAL;
+
+		mutex_lock(&the_lnet.ln_api_mutex);
+		rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny,
+					  config->cfg_config_u.cfg_buffers.buf_small,
+					  config->cfg_config_u.cfg_buffers.buf_large);
+		mutex_unlock(&the_lnet.ln_api_mutex);
+		return rc;
+
+	case IOC_LIBCFS_GET_BUF: {
+		struct lnet_ioctl_pool_cfg *pool_cfg;
+		size_t total = sizeof(*config) + sizeof(*pool_cfg);
+
+		config = arg;
+
+		if (config->cfg_hdr.ioc_len < total)
+			return -EINVAL;
+
+		pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
+		return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
+	}
+
+	case IOC_LIBCFS_GET_PEER_INFO: {
+		struct lnet_ioctl_peer *peer_info = arg;
+
+		if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
+			return -EINVAL;
+
+		return lnet_get_peer_info(peer_info->pr_count,
+			&peer_info->pr_nid,
+			peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
+			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
+	}
+
 	case IOC_LIBCFS_NOTIFY_ROUTER:
 		secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]);
 		return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
 				   jiffies - secs_passed * HZ);
 
-	case IOC_LIBCFS_PORTALS_COMPATIBILITY:
-		/* This can be removed once lustre stops calling it */
-		return 0;
-
 	case IOC_LIBCFS_LNET_DIST:
 		rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
 		if (rc < 0 && rc != -EHOSTUNREACH)
@@ -1386,42 +2033,19 @@
 		id.nid = data->ioc_nid;
 		id.pid = data->ioc_u32[0];
 		rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
-			       (lnet_process_id_t *)data->ioc_pbuf1,
-			       data->ioc_plen1/sizeof(lnet_process_id_t));
+			       data->ioc_pbuf1,
+			       data->ioc_plen1 / sizeof(lnet_process_id_t));
 		if (rc < 0)
 			return rc;
 		data->ioc_count = rc;
 		return 0;
 
-	case IOC_LIBCFS_DEBUG_PEER: {
-		/* CAVEAT EMPTOR: this one designed for calling directly; not
-		 * via an ioctl */
-		id = *((lnet_process_id_t *) arg);
-
-		lnet_debug_peer(id.nid);
-
-		ni = lnet_net2ni(LNET_NIDNET(id.nid));
-		if (ni == NULL) {
-			CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
-		} else {
-			if (ni->ni_lnd->lnd_ctl == NULL) {
-				CDEBUG(D_WARNING, "No ctl for %s\n",
-				       libcfs_id2str(id));
-			} else {
-				(void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
-			}
-
-			lnet_ni_decref(ni);
-		}
-		return 0;
-	}
-
 	default:
 		ni = lnet_net2ni(data->ioc_net);
-		if (ni == NULL)
+		if (!ni)
 			return -EINVAL;
 
-		if (ni->ni_lnd->lnd_ctl == NULL)
+		if (!ni->ni_lnd->lnd_ctl)
 			rc = -EINVAL;
 		else
 			rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
@@ -1433,6 +2057,12 @@
 }
 EXPORT_SYMBOL(LNetCtl);
 
+void LNetDebugPeer(lnet_process_id_t id)
+{
+	lnet_debug_peer(id.nid);
+}
+EXPORT_SYMBOL(LNetDebugPeer);
+
 /**
  * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
  * all interfaces share a same PID, as requested by LNetNIInit().
@@ -1452,16 +2082,14 @@
 	int cpt;
 	int rc = -ENOENT;
 
-	LASSERT(the_lnet.ln_init);
-
 	/* LNetNI initilization failed? */
-	if (the_lnet.ln_refcount == 0)
+	if (!the_lnet.ln_refcount)
 		return rc;
 
 	cpt = lnet_net_lock_current();
 
 	list_for_each(tmp, &the_lnet.ln_nis) {
-		if (index-- != 0)
+		if (index--)
 			continue;
 
 		ni = list_entry(tmp, lnet_ni_t, ni_list);
@@ -1488,192 +2116,8 @@
 }
 EXPORT_SYMBOL(LNetSnprintHandle);
 
-static int
-lnet_create_ping_info(void)
-{
-	int i;
-	int n;
-	int rc;
-	unsigned int infosz;
-	lnet_ni_t *ni;
-	lnet_process_id_t id;
-	lnet_ping_info_t *pinfo;
-
-	for (n = 0; ; n++) {
-		rc = LNetGetId(n, &id);
-		if (rc == -ENOENT)
-			break;
-
-		LASSERT(rc == 0);
-	}
-
-	infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
-	LIBCFS_ALLOC(pinfo, infosz);
-	if (pinfo == NULL) {
-		CERROR("Can't allocate ping info[%d]\n", n);
-		return -ENOMEM;
-	}
-
-	pinfo->pi_nnis    = n;
-	pinfo->pi_pid     = the_lnet.ln_pid;
-	pinfo->pi_magic   = LNET_PROTO_PING_MAGIC;
-	pinfo->pi_features = LNET_PING_FEAT_NI_STATUS;
-
-	for (i = 0; i < n; i++) {
-		lnet_ni_status_t *ns = &pinfo->pi_ni[i];
-
-		rc = LNetGetId(i, &id);
-		LASSERT(rc == 0);
-
-		ns->ns_nid    = id.nid;
-		ns->ns_status = LNET_NI_STATUS_UP;
-
-		lnet_net_lock(0);
-
-		ni = lnet_nid2ni_locked(id.nid, 0);
-		LASSERT(ni != NULL);
-
-		lnet_ni_lock(ni);
-		LASSERT(ni->ni_status == NULL);
-		ni->ni_status = ns;
-		lnet_ni_unlock(ni);
-
-		lnet_ni_decref_locked(ni, 0);
-		lnet_net_unlock(0);
-	}
-
-	the_lnet.ln_ping_info = pinfo;
-	return 0;
-}
-
-static void
-lnet_destroy_ping_info(void)
-{
-	struct lnet_ni *ni;
-
-	lnet_net_lock(0);
-
-	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
-		lnet_ni_lock(ni);
-		ni->ni_status = NULL;
-		lnet_ni_unlock(ni);
-	}
-
-	lnet_net_unlock(0);
-
-	LIBCFS_FREE(the_lnet.ln_ping_info,
-		    offsetof(lnet_ping_info_t,
-			     pi_ni[the_lnet.ln_ping_info->pi_nnis]));
-	the_lnet.ln_ping_info = NULL;
-}
-
-int
-lnet_ping_target_init(void)
-{
-	lnet_md_t md = { NULL };
-	lnet_handle_me_t meh;
-	lnet_process_id_t id;
-	int rc;
-	int rc2;
-	int infosz;
-
-	rc = lnet_create_ping_info();
-	if (rc != 0)
-		return rc;
-
-	/* We can have a tiny EQ since we only need to see the unlink event on
-	 * teardown, which by definition is the last one! */
-	rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
-	if (rc != 0) {
-		CERROR("Can't allocate ping EQ: %d\n", rc);
-		goto failed_0;
-	}
-
-	memset(&id, 0, sizeof(lnet_process_id_t));
-	id.nid = LNET_NID_ANY;
-	id.pid = LNET_PID_ANY;
-
-	rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
-			  LNET_PROTO_PING_MATCHBITS, 0,
-			  LNET_UNLINK, LNET_INS_AFTER,
-			  &meh);
-	if (rc != 0) {
-		CERROR("Can't create ping ME: %d\n", rc);
-		goto failed_1;
-	}
-
-	/* initialize md content */
-	infosz = offsetof(lnet_ping_info_t,
-			  pi_ni[the_lnet.ln_ping_info->pi_nnis]);
-	md.start     = the_lnet.ln_ping_info;
-	md.length    = infosz;
-	md.threshold = LNET_MD_THRESH_INF;
-	md.max_size  = 0;
-	md.options   = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
-		       LNET_MD_MANAGE_REMOTE;
-	md.user_ptr  = NULL;
-	md.eq_handle = the_lnet.ln_ping_target_eq;
-
-	rc = LNetMDAttach(meh, md,
-			  LNET_RETAIN,
-			  &the_lnet.ln_ping_target_md);
-	if (rc != 0) {
-		CERROR("Can't attach ping MD: %d\n", rc);
-		goto failed_2;
-	}
-
-	return 0;
-
- failed_2:
-	rc2 = LNetMEUnlink(meh);
-	LASSERT(rc2 == 0);
- failed_1:
-	rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
-	LASSERT(rc2 == 0);
- failed_0:
-	lnet_destroy_ping_info();
-	return rc;
-}
-
-void
-lnet_ping_target_fini(void)
-{
-	lnet_event_t event;
-	int rc;
-	int which;
-	int timeout_ms = 1000;
-	sigset_t blocked = cfs_block_allsigs();
-
-	LNetMDUnlink(the_lnet.ln_ping_target_md);
-	/* NB md could be busy; this just starts the unlink */
-
-	for (;;) {
-		rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
-				timeout_ms, &event, &which);
-
-		/* I expect overflow... */
-		LASSERT(rc >= 0 || rc == -EOVERFLOW);
-
-		if (rc == 0) {
-			/* timed out: provide a diagnostic */
-			CWARN("Still waiting for ping MD to unlink\n");
-			timeout_ms *= 2;
-			continue;
-		}
-
-		/* Got a valid event */
-		if (event.unlinked)
-			break;
-	}
-
-	rc = LNetEQFree(the_lnet.ln_ping_target_eq);
-	LASSERT(rc == 0);
-	lnet_destroy_ping_info();
-	cfs_restore_sigs(blocked);
-}
-
-int
-lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
+static int lnet_ping(lnet_process_id_t id, int timeout_ms,
+		     lnet_process_id_t __user *ids, int n_ids)
 {
 	lnet_handle_eq_t eqh;
 	lnet_handle_md_t mdh;
@@ -1683,7 +2127,7 @@
 	int unlinked = 0;
 	int replied = 0;
 	const int a_long_time = 60000; /* mS */
-	int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+	int infosz;
 	lnet_ping_info_t *info;
 	lnet_process_id_t tmpid;
 	int i;
@@ -1692,6 +2136,8 @@
 	int rc2;
 	sigset_t blocked;
 
+	infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+
 	if (n_ids <= 0 ||
 	    id.nid == LNET_NID_ANY ||
 	    timeout_ms > 500000 ||	      /* arbitrary limit! */
@@ -1699,15 +2145,15 @@
 		return -EINVAL;
 
 	if (id.pid == LNET_PID_ANY)
-		id.pid = LUSTRE_SRV_LNET_PID;
+		id.pid = LNET_PID_LUSTRE;
 
 	LIBCFS_ALLOC(info, infosz);
-	if (info == NULL)
+	if (!info)
 		return -ENOMEM;
 
 	/* NB 2 events max (including any unlink event) */
 	rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't allocate EQ: %d\n", rc);
 		goto out_0;
 	}
@@ -1722,7 +2168,7 @@
 	md.eq_handle = eqh;
 
 	rc = LNetMDBind(md, LNET_UNLINK, &mdh);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't bind MD: %d\n", rc);
 		goto out_1;
 	}
@@ -1731,11 +2177,11 @@
 		     LNET_RESERVED_PORTAL,
 		     LNET_PROTO_PING_MATCHBITS, 0);
 
-	if (rc != 0) {
+	if (rc) {
 		/* Don't CERROR; this could be deliberate! */
 
 		rc2 = LNetMDUnlink(mdh);
-		LASSERT(rc2 == 0);
+		LASSERT(!rc2);
 
 		/* NB must wait for the UNLINK event below... */
 		unlinked = 1;
@@ -1759,11 +2205,11 @@
 
 		LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
 
-		if (rc2 <= 0 || event.status != 0) {
+		if (rc2 <= 0 || event.status) {
 			/* timeout or error */
-			if (!replied && rc == 0)
+			if (!replied && !rc)
 				rc = (rc2 < 0) ? rc2 :
-				     (rc2 == 0) ? -ETIMEDOUT :
+				     !rc2 ? -ETIMEDOUT :
 				     event.status;
 
 			if (!unlinked) {
@@ -1772,7 +2218,7 @@
 				/* No assertion (racing with network) */
 				unlinked = 1;
 				timeout_ms = a_long_time;
-			} else if (rc2 == 0) {
+			} else if (!rc2) {
 				/* timed out waiting for unlink */
 				CWARN("ping %s: late network completion\n",
 				      libcfs_id2str(id));
@@ -1812,7 +2258,7 @@
 		goto out_1;
 	}
 
-	if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
+	if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) {
 		CERROR("%s: ping w/o NI status: 0x%x\n",
 		       libcfs_id2str(id), info->pi_features);
 		goto out_1;
@@ -1846,9 +2292,9 @@
 
  out_1:
 	rc2 = LNetEQFree(eqh);
-	if (rc2 != 0)
+	if (rc2)
 		CERROR("rc2 %d\n", rc2);
-	LASSERT(rc2 == 0);
+	LASSERT(!rc2);
 
  out_0:
 	LIBCFS_FREE(info, infosz);
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 284a3c2..8c80625 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -37,15 +37,15 @@
 #define DEBUG_SUBSYSTEM S_LNET
 #include "../../include/linux/lnet/lib-lnet.h"
 
-struct lnet_text_buf_t {	    /* tmp struct for parsing routes */
+struct lnet_text_buf {	    /* tmp struct for parsing routes */
 	struct list_head ltb_list;	/* stash on lists */
 	int ltb_size;	/* allocated size */
 	char ltb_text[0];     /* text buffer */
 };
 
 static int lnet_tbnob;			/* track text buf allocation */
-#define LNET_MAX_TEXTBUF_NOB     (64<<10)	/* bound allocation */
-#define LNET_SINGLE_TEXTBUF_NOB  (4<<10)
+#define LNET_MAX_TEXTBUF_NOB     (64 << 10)	/* bound allocation */
+#define LNET_SINGLE_TEXTBUF_NOB  (4 << 10)
 
 static void
 lnet_syntax(char *name, char *str, int offset, int width)
@@ -54,9 +54,9 @@
 	static char dashes[LNET_SINGLE_TEXTBUF_NOB];
 
 	memset(dots, '.', sizeof(dots));
-	dots[sizeof(dots)-1] = 0;
+	dots[sizeof(dots) - 1] = 0;
 	memset(dashes, '-', sizeof(dashes));
-	dashes[sizeof(dashes)-1] = 0;
+	dashes[sizeof(dashes) - 1] = 0;
 
 	LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
 	LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
@@ -77,7 +77,7 @@
 	}
 }
 
-static int
+int
 lnet_net_unique(__u32 net, struct list_head *nilist)
 {
 	struct list_head *tmp;
@@ -96,19 +96,25 @@
 void
 lnet_ni_free(struct lnet_ni *ni)
 {
-	if (ni->ni_refs != NULL)
+	int i;
+
+	if (ni->ni_refs)
 		cfs_percpt_free(ni->ni_refs);
 
-	if (ni->ni_tx_queues != NULL)
+	if (ni->ni_tx_queues)
 		cfs_percpt_free(ni->ni_tx_queues);
 
-	if (ni->ni_cpts != NULL)
+	if (ni->ni_cpts)
 		cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
 
+	for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
+		LIBCFS_FREE(ni->ni_interfaces[i],
+			    strlen(ni->ni_interfaces[i]) + 1);
+	}
 	LIBCFS_FREE(ni, sizeof(*ni));
 }
 
-static lnet_ni_t *
+lnet_ni_t *
 lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
 {
 	struct lnet_tx_queue *tq;
@@ -123,7 +129,7 @@
 	}
 
 	LIBCFS_ALLOC(ni, sizeof(*ni));
-	if (ni == NULL) {
+	if (!ni) {
 		CERROR("Out of memory creating network %s\n",
 		       libcfs_net2str(net));
 		return NULL;
@@ -133,18 +139,18 @@
 	INIT_LIST_HEAD(&ni->ni_cptlist);
 	ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
 				       sizeof(*ni->ni_refs[0]));
-	if (ni->ni_refs == NULL)
+	if (!ni->ni_refs)
 		goto failed;
 
 	ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
 					    sizeof(*ni->ni_tx_queues[0]));
-	if (ni->ni_tx_queues == NULL)
+	if (!ni->ni_tx_queues)
 		goto failed;
 
 	cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
 		INIT_LIST_HEAD(&tq->tq_delayed);
 
-	if (el == NULL) {
+	if (!el) {
 		ni->ni_cpts  = NULL;
 		ni->ni_ncpts = LNET_CPT_NUMBER;
 	} else {
@@ -178,13 +184,19 @@
 lnet_parse_networks(struct list_head *nilist, char *networks)
 {
 	struct cfs_expr_list *el = NULL;
-	int tokensize = strlen(networks) + 1;
+	int tokensize;
 	char *tokens;
 	char *str;
 	char *tmp;
 	struct lnet_ni *ni;
 	__u32 net;
 	int nnets = 0;
+	struct list_head *temp_node;
+
+	if (!networks) {
+		CERROR("networks string is undefined\n");
+		return -EINVAL;
+	}
 
 	if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
 		/* _WAY_ conservative */
@@ -193,23 +205,19 @@
 		return -EINVAL;
 	}
 
+	tokensize = strlen(networks) + 1;
+
 	LIBCFS_ALLOC(tokens, tokensize);
-	if (tokens == NULL) {
+	if (!tokens) {
 		CERROR("Can't allocate net tokens\n");
 		return -ENOMEM;
 	}
 
-	the_lnet.ln_network_tokens = tokens;
-	the_lnet.ln_network_tokens_nob = tokensize;
 	memcpy(tokens, networks, tokensize);
-	str = tmp = tokens;
+	tmp = tokens;
+	str = tokens;
 
-	/* Add in the loopback network */
-	ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist);
-	if (ni == NULL)
-		goto failed;
-
-	while (str != NULL && *str != 0) {
+	while (str && *str) {
 		char *comma = strchr(str, ',');
 		char *bracket = strchr(str, '(');
 		char *square = strchr(str, '[');
@@ -217,26 +225,29 @@
 		int niface;
 		int rc;
 
-		/* NB we don't check interface conflicts here; it's the LNDs
-		 * responsibility (if it cares at all) */
-
-		if (square != NULL && (comma == NULL || square < comma)) {
-			/* i.e: o2ib0(ib0)[1,2], number between square
-			 * brackets are CPTs this NI needs to be bond */
-			if (bracket != NULL && bracket > square) {
+		/*
+		 * NB we don't check interface conflicts here; it's the LNDs
+		 * responsibility (if it cares at all)
+		 */
+		if (square && (!comma || square < comma)) {
+			/*
+			 * i.e: o2ib0(ib0)[1,2], number between square
+			 * brackets are CPTs this NI needs to be bond
+			 */
+			if (bracket && bracket > square) {
 				tmp = square;
 				goto failed_syntax;
 			}
 
 			tmp = strchr(square, ']');
-			if (tmp == NULL) {
+			if (!tmp) {
 				tmp = square;
 				goto failed_syntax;
 			}
 
 			rc = cfs_expr_list_parse(square, tmp - square + 1,
 						 0, LNET_CPT_NUMBER - 1, &el);
-			if (rc != 0) {
+			if (rc) {
 				tmp = square;
 				goto failed_syntax;
 			}
@@ -245,12 +256,10 @@
 				*square++ = ' ';
 		}
 
-		if (bracket == NULL ||
-		    (comma != NULL && comma < bracket)) {
-
+		if (!bracket || (comma && comma < bracket)) {
 			/* no interface list specified */
 
-			if (comma != NULL)
+			if (comma)
 				*comma++ = 0;
 			net = libcfs_str2net(cfs_trimwhite(str));
 
@@ -262,10 +271,10 @@
 			}
 
 			if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
-			    lnet_ni_alloc(net, el, nilist) == NULL)
+			    !lnet_ni_alloc(net, el, nilist))
 				goto failed;
 
-			if (el != NULL) {
+			if (el) {
 				cfs_expr_list_free(el);
 				el = NULL;
 			}
@@ -281,12 +290,11 @@
 			goto failed_syntax;
 		}
 
-		nnets++;
 		ni = lnet_ni_alloc(net, el, nilist);
-		if (ni == NULL)
+		if (!ni)
 			goto failed;
 
-		if (el != NULL) {
+		if (el) {
 			cfs_expr_list_free(el);
 			el = NULL;
 		}
@@ -295,7 +303,7 @@
 		iface = bracket + 1;
 
 		bracket = strchr(iface, ')');
-		if (bracket == NULL) {
+		if (!bracket) {
 			tmp = iface;
 			goto failed_syntax;
 		}
@@ -303,11 +311,11 @@
 		*bracket = 0;
 		do {
 			comma = strchr(iface, ',');
-			if (comma != NULL)
+			if (comma)
 				*comma++ = 0;
 
 			iface = cfs_trimwhite(iface);
-			if (*iface == 0) {
+			if (!*iface) {
 				tmp = iface;
 				goto failed_syntax;
 			}
@@ -319,16 +327,32 @@
 				goto failed;
 			}
 
-			ni->ni_interfaces[niface++] = iface;
+			/*
+			 * Allocate a separate piece of memory and copy
+			 * into it the string, so we don't have
+			 * a depencency on the tokens string.  This way we
+			 * can free the tokens at the end of the function.
+			 * The newly allocated ni_interfaces[] can be
+			 * freed when freeing the NI
+			 */
+			LIBCFS_ALLOC(ni->ni_interfaces[niface],
+				     strlen(iface) + 1);
+			if (!ni->ni_interfaces[niface]) {
+				CERROR("Can't allocate net interface name\n");
+				goto failed;
+			}
+			strncpy(ni->ni_interfaces[niface], iface,
+				strlen(iface));
+			niface++;
 			iface = comma;
-		} while (iface != NULL);
+		} while (iface);
 
 		str = bracket + 1;
 		comma = strchr(bracket + 1, ',');
-		if (comma != NULL) {
+		if (comma) {
 			*comma = 0;
 			str = cfs_trimwhite(str);
-			if (*str != 0) {
+			if (*str) {
 				tmp = str;
 				goto failed_syntax;
 			}
@@ -337,14 +361,17 @@
 		}
 
 		str = cfs_trimwhite(str);
-		if (*str != 0) {
+		if (*str) {
 			tmp = str;
 			goto failed_syntax;
 		}
 	}
 
-	LASSERT(!list_empty(nilist));
-	return 0;
+	list_for_each(temp_node, nilist)
+		nnets++;
+
+	LIBCFS_FREE(tokens, tokensize);
+	return nnets;
 
  failed_syntax:
 	lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
@@ -356,23 +383,22 @@
 		lnet_ni_free(ni);
 	}
 
-	if (el != NULL)
+	if (el)
 		cfs_expr_list_free(el);
 
 	LIBCFS_FREE(tokens, tokensize);
-	the_lnet.ln_network_tokens = NULL;
 
 	return -EINVAL;
 }
 
-static struct lnet_text_buf_t *
+static struct lnet_text_buf *
 lnet_new_text_buf(int str_len)
 {
-	struct lnet_text_buf_t *ltb;
+	struct lnet_text_buf *ltb;
 	int nob;
 
 	/* NB allocate space for the terminating 0 */
-	nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]);
+	nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]);
 	if (nob > LNET_SINGLE_TEXTBUF_NOB) {
 		/* _way_ conservative for "route net gateway..." */
 		CERROR("text buffer too big\n");
@@ -385,7 +411,7 @@
 	}
 
 	LIBCFS_ALLOC(ltb, nob);
-	if (ltb == NULL)
+	if (!ltb)
 		return NULL;
 
 	ltb->ltb_size = nob;
@@ -395,7 +421,7 @@
 }
 
 static void
-lnet_free_text_buf(struct lnet_text_buf_t *ltb)
+lnet_free_text_buf(struct lnet_text_buf *ltb)
 {
 	lnet_tbnob -= ltb->ltb_size;
 	LIBCFS_FREE(ltb, ltb->ltb_size);
@@ -404,10 +430,10 @@
 static void
 lnet_free_text_bufs(struct list_head *tbs)
 {
-	struct lnet_text_buf_t *ltb;
+	struct lnet_text_buf *ltb;
 
 	while (!list_empty(tbs)) {
-		ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
+		ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
 
 		list_del(&ltb->ltb_list);
 		lnet_free_text_buf(ltb);
@@ -421,7 +447,7 @@
 	char *sep;
 	int nob;
 	int i;
-	struct lnet_text_buf_t *ltb;
+	struct lnet_text_buf *ltb;
 
 	INIT_LIST_HEAD(&pending);
 
@@ -432,14 +458,14 @@
 			str++;
 
 		/* scan for separator or comment */
-		for (sep = str; *sep != 0; sep++)
+		for (sep = str; *sep; sep++)
 			if (lnet_issep(*sep) || *sep == '#')
 				break;
 
 		nob = (int)(sep - str);
 		if (nob > 0) {
 			ltb = lnet_new_text_buf(nob);
-			if (ltb == NULL) {
+			if (!ltb) {
 				lnet_free_text_bufs(&pending);
 				return -1;
 			}
@@ -459,10 +485,10 @@
 			/* scan for separator */
 			do {
 				sep++;
-			} while (*sep != 0 && !lnet_issep(*sep));
+			} while (*sep && !lnet_issep(*sep));
 		}
 
-		if (*sep == 0)
+		if (!*sep)
 			break;
 
 		str = sep + 1;
@@ -479,18 +505,18 @@
 {
 	int len1 = (int)(sep1 - str);
 	int len2 = strlen(sep2 + 1);
-	struct lnet_text_buf_t *ltb;
+	struct lnet_text_buf *ltb;
 
 	LASSERT(*sep1 == '[');
 	LASSERT(*sep2 == ']');
 
 	ltb = lnet_new_text_buf(len1 + itemlen + len2);
-	if (ltb == NULL)
+	if (!ltb)
 		return -ENOMEM;
 
 	memcpy(ltb->ltb_text, str, len1);
 	memcpy(&ltb->ltb_text[len1], item, itemlen);
-	memcpy(&ltb->ltb_text[len1+itemlen], sep2 + 1, len2);
+	memcpy(&ltb->ltb_text[len1 + itemlen], sep2 + 1, len2);
 	ltb->ltb_text[len1 + itemlen + len2] = 0;
 
 	list_add_tail(&ltb->ltb_list, list);
@@ -516,15 +542,14 @@
 	INIT_LIST_HEAD(&pending);
 
 	sep = strchr(str, '[');
-	if (sep == NULL)			/* nothing to expand */
+	if (!sep)			/* nothing to expand */
 		return 0;
 
 	sep2 = strchr(sep, ']');
-	if (sep2 == NULL)
+	if (!sep2)
 		goto failed;
 
 	for (parsed = sep; parsed < sep2; parsed = enditem) {
-
 		enditem = ++parsed;
 		while (enditem < sep2 && *enditem != ',')
 			enditem++;
@@ -534,17 +559,13 @@
 
 		if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi,
 			   &stride, &scanned) < 3) {
-
 			if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
-
 				/* simple string enumeration */
-				if (lnet_expand1tb(
-				     &pending, str, sep, sep2,
-				     parsed,
-				     (int)(enditem - parsed)) != 0) {
+				if (lnet_expand1tb(&pending, str, sep, sep2,
+						   parsed,
+						   (int)(enditem - parsed))) {
 					goto failed;
 				}
-
 				continue;
 			}
 
@@ -557,18 +578,17 @@
 			goto failed;
 
 		if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
-		    (hi - lo) % stride != 0)
+		    (hi - lo) % stride)
 			goto failed;
 
 		for (i = lo; i <= hi; i += stride) {
-
 			snprintf(num, sizeof(num), "%d", i);
 			nob = strlen(num);
 			if (nob + 1 == sizeof(num))
 				goto failed;
 
 			if (lnet_expand1tb(&pending, str, sep, sep2,
-					   num, nob) != 0)
+					   num, nob))
 				goto failed;
 		}
 	}
@@ -602,15 +622,17 @@
 	int len;
 
 	sep = strchr(str, LNET_PRIORITY_SEPARATOR);
-	if (sep == NULL) {
+	if (!sep) {
 		*priority = 0;
 		return 0;
 	}
 	len = strlen(sep + 1);
 
-	if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) {
-		/* Update the caller's token pointer so it treats the found
-		   priority as the token to report in the error message. */
+	if ((sscanf((sep + 1), "%u%n", priority, &nob) < 1) || (len != nob)) {
+		/*
+		 * Update the caller's token pointer so it treats the found
+		 * priority as the token to report in the error message.
+		 */
 		*token += sep - str + 1;
 		return -1;
 	}
@@ -636,7 +658,7 @@
 	struct list_head *tmp2;
 	__u32 net;
 	lnet_nid_t nid;
-	struct lnet_text_buf_t *ltb;
+	struct lnet_text_buf *ltb;
 	int rc;
 	char *sep;
 	char *token = str;
@@ -658,7 +680,7 @@
 		/* scan for token start */
 		while (isspace(*sep))
 			sep++;
-		if (*sep == 0) {
+		if (!*sep) {
 			if (ntokens < (got_hops ? 3 : 2))
 				goto token_error;
 			break;
@@ -668,9 +690,9 @@
 		token = sep++;
 
 		/* scan for token end */
-		while (*sep != 0 && !isspace(*sep))
+		while (*sep && !isspace(*sep))
 			sep++;
-		if (*sep != 0)
+		if (*sep)
 			*sep++ = 0;
 
 		if (ntokens == 1) {
@@ -684,7 +706,7 @@
 		}
 
 		ltb = lnet_new_text_buf(strlen(token));
-		if (ltb == NULL)
+		if (!ltb)
 			goto out;
 
 		strcpy(ltb->ltb_text, token);
@@ -692,8 +714,7 @@
 		list_add_tail(tmp1, tmp2);
 
 		while (tmp1 != tmp2) {
-			ltb = list_entry(tmp1, struct lnet_text_buf_t,
-					 ltb_list);
+			ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
 
 			rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
 			if (rc < 0)
@@ -733,13 +754,12 @@
 	LASSERT(!list_empty(&gateways));
 
 	list_for_each(tmp1, &nets) {
-		ltb = list_entry(tmp1, struct lnet_text_buf_t, ltb_list);
+		ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
 		net = libcfs_str2net(ltb->ltb_text);
 		LASSERT(net != LNET_NIDNET(LNET_NID_ANY));
 
 		list_for_each(tmp2, &gateways) {
-			ltb = list_entry(tmp2, struct lnet_text_buf_t,
-					 ltb_list);
+			ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list);
 			nid = libcfs_str2nid(ltb->ltb_text);
 			LASSERT(nid != LNET_NID_ANY);
 
@@ -749,7 +769,7 @@
 			}
 
 			rc = lnet_add_route(net, hops, nid, priority);
-			if (rc != 0) {
+			if (rc && rc != -EEXIST && rc != -EHOSTUNREACH) {
 				CERROR("Can't create route to %s via %s\n",
 				       libcfs_net2str(net),
 				       libcfs_nid2str(nid));
@@ -772,10 +792,10 @@
 static int
 lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
 {
-	struct lnet_text_buf_t *ltb;
+	struct lnet_text_buf *ltb;
 
 	while (!list_empty(tbs)) {
-		ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
+		ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
 
 		if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
 			lnet_free_text_bufs(tbs);
@@ -806,7 +826,7 @@
 		rc = lnet_parse_route_tbs(&tbs, im_a_router);
 	}
 
-	LASSERT(lnet_tbnob == 0);
+	LASSERT(!lnet_tbnob);
 	return rc;
 }
 
@@ -818,7 +838,7 @@
 	int i;
 
 	rc = cfs_ip_addr_parse(token, len, &list);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	for (rc = i = 0; !rc && i < nip; i++)
@@ -851,18 +871,18 @@
 		/* scan for token start */
 		while (isspace(*sep))
 			sep++;
-		if (*sep == 0)
+		if (!*sep)
 			break;
 
 		token = sep++;
 
 		/* scan for token end */
-		while (*sep != 0 && !isspace(*sep))
+		while (*sep && !isspace(*sep))
 			sep++;
-		if (*sep != 0)
+		if (*sep)
 			*sep++ = 0;
 
-		if (ntokens++ == 0) {
+		if (!ntokens++) {
 			net = token;
 			continue;
 		}
@@ -876,7 +896,8 @@
 			return rc;
 		}
 
-		matched |= (rc != 0);
+		if (rc)
+			matched |= 1;
 	}
 
 	if (!matched)
@@ -892,12 +913,12 @@
 	char *bracket = strchr(netspec, '(');
 	__u32 net;
 
-	if (bracket != NULL)
+	if (bracket)
 		*bracket = 0;
 
 	net = libcfs_str2net(netspec);
 
-	if (bracket != NULL)
+	if (bracket)
 		*bracket = '(';
 
 	return net;
@@ -909,8 +930,8 @@
 	int offset = 0;
 	int offset2;
 	int len;
-	struct lnet_text_buf_t *tb;
-	struct lnet_text_buf_t *tb2;
+	struct lnet_text_buf *tb;
+	struct lnet_text_buf *tb2;
 	struct list_head *t;
 	char *sep;
 	char *bracket;
@@ -919,15 +940,13 @@
 	LASSERT(!list_empty(nets));
 	LASSERT(nets->next == nets->prev);     /* single entry */
 
-	tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list);
+	tb = list_entry(nets->next, struct lnet_text_buf, ltb_list);
 
 	for (;;) {
 		sep = strchr(tb->ltb_text, ',');
 		bracket = strchr(tb->ltb_text, '(');
 
-		if (sep != NULL &&
-		    bracket != NULL &&
-		    bracket < sep) {
+		if (sep && bracket && bracket < sep) {
 			/* netspec lists interfaces... */
 
 			offset2 = offset + (int)(bracket - tb->ltb_text);
@@ -935,16 +954,16 @@
 
 			bracket = strchr(bracket + 1, ')');
 
-			if (bracket == NULL ||
-			    !(bracket[1] == ',' || bracket[1] == 0)) {
+			if (!bracket ||
+			    !(bracket[1] == ',' || !bracket[1])) {
 				lnet_syntax("ip2nets", source, offset2, len);
 				return -EINVAL;
 			}
 
-			sep = (bracket[1] == 0) ? NULL : bracket + 1;
+			sep = !bracket[1] ? NULL : bracket + 1;
 		}
 
-		if (sep != NULL)
+		if (sep)
 			*sep++ = 0;
 
 		net = lnet_netspec2net(tb->ltb_text);
@@ -955,7 +974,7 @@
 		}
 
 		list_for_each(t, nets) {
-			tb2 = list_entry(t, struct lnet_text_buf_t, ltb_list);
+			tb2 = list_entry(t, struct lnet_text_buf, ltb_list);
 
 			if (tb2 == tb)
 				continue;
@@ -968,13 +987,13 @@
 			}
 		}
 
-		if (sep == NULL)
+		if (!sep)
 			return 0;
 
 		offset += (int)(sep - tb->ltb_text);
 		len = strlen(sep);
 		tb2 = lnet_new_text_buf(len);
-		if (tb2 == NULL)
+		if (!tb2)
 			return -ENOMEM;
 
 		strncpy(tb2->ltb_text, sep, len);
@@ -996,8 +1015,8 @@
 	struct list_head current_nets;
 	struct list_head *t;
 	struct list_head *t2;
-	struct lnet_text_buf_t *tb;
-	struct lnet_text_buf_t *tb2;
+	struct lnet_text_buf *tb;
+	struct lnet_text_buf *tb2;
 	__u32 net1;
 	__u32 net2;
 	int len;
@@ -1008,7 +1027,7 @@
 	INIT_LIST_HEAD(&raw_entries);
 	if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
 		CERROR("Error parsing ip2nets\n");
-		LASSERT(lnet_tbnob == 0);
+		LASSERT(!lnet_tbnob);
 		return -EINVAL;
 	}
 
@@ -1020,11 +1039,10 @@
 	rc = 0;
 
 	while (!list_empty(&raw_entries)) {
-		tb = list_entry(raw_entries.next, struct lnet_text_buf_t,
-				    ltb_list);
-
+		tb = list_entry(raw_entries.next, struct lnet_text_buf,
+				ltb_list);
 		strncpy(source, tb->ltb_text, sizeof(source));
-		source[sizeof(source)-1] = '\0';
+		source[sizeof(source) - 1] = '\0';
 
 		/* replace ltb_text with the network(s) add on match */
 		rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
@@ -1033,7 +1051,7 @@
 
 		list_del(&tb->ltb_list);
 
-		if (rc == 0) {		  /* no match */
+		if (!rc) {		  /* no match */
 			lnet_free_text_buf(tb);
 			continue;
 		}
@@ -1047,13 +1065,13 @@
 
 		dup = 0;
 		list_for_each(t, &current_nets) {
-			tb = list_entry(t, struct lnet_text_buf_t, ltb_list);
+			tb = list_entry(t, struct lnet_text_buf, ltb_list);
 			net1 = lnet_netspec2net(tb->ltb_text);
 			LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
 
 			list_for_each(t2, &matched_nets) {
-				tb2 = list_entry(t2, struct lnet_text_buf_t,
-						     ltb_list);
+				tb2 = list_entry(t2, struct lnet_text_buf,
+						 ltb_list);
 				net2 = lnet_netspec2net(tb2->ltb_text);
 				LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
 
@@ -1073,13 +1091,13 @@
 		}
 
 		list_for_each_safe(t, t2, &current_nets) {
-			tb = list_entry(t, struct lnet_text_buf_t, ltb_list);
+			tb = list_entry(t, struct lnet_text_buf, ltb_list);
 
 			list_del(&tb->ltb_list);
 			list_add_tail(&tb->ltb_list, &matched_nets);
 
 			len += snprintf(networks + len, sizeof(networks) - len,
-					"%s%s", (len == 0) ? "" : ",",
+					"%s%s", !len ? "" : ",",
 					tb->ltb_text);
 
 			if (len >= sizeof(networks)) {
@@ -1096,7 +1114,7 @@
 	lnet_free_text_bufs(&raw_entries);
 	lnet_free_text_bufs(&matched_nets);
 	lnet_free_text_bufs(&current_nets);
-	LASSERT(lnet_tbnob == 0);
+	LASSERT(!lnet_tbnob);
 
 	if (rc < 0)
 		return rc;
@@ -1122,7 +1140,7 @@
 		return nif;
 
 	LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
-	if (ipaddrs == NULL) {
+	if (!ipaddrs) {
 		CERROR("Can't allocate ipaddrs[%d]\n", nif);
 		lnet_ipif_free_enumeration(ifnames, nif);
 		return -ENOMEM;
@@ -1133,7 +1151,7 @@
 			continue;
 
 		rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask);
-		if (rc != 0) {
+		if (rc) {
 			CWARN("Can't query interface %s: %d\n",
 			      ifnames[i], rc);
 			continue;
@@ -1155,7 +1173,7 @@
 	} else {
 		if (nip > 0) {
 			LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
-			if (ipaddrs2 == NULL) {
+			if (!ipaddrs2) {
 				CERROR("Can't allocate ipaddrs[%d]\n", nip);
 				nip = -ENOMEM;
 			} else {
@@ -1184,7 +1202,7 @@
 		return nip;
 	}
 
-	if (nip == 0) {
+	if (!nip) {
 		LCONSOLE_ERROR_MSG(0x118,
 				   "No local IP interfaces for ip2nets to match\n");
 		return -ENOENT;
@@ -1198,7 +1216,7 @@
 		return rc;
 	}
 
-	if (rc == 0) {
+	if (!rc) {
 		LCONSOLE_ERROR_MSG(0x11a,
 				   "ip2nets does not match any local IP interfaces\n");
 		return -ENOENT;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 64f94a6..042e974 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -72,33 +72,38 @@
 {
 	lnet_eq_t *eq;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
-	/* We need count to be a power of 2 so that when eq_{enq,deq}_seq
+	/*
+	 * We need count to be a power of 2 so that when eq_{enq,deq}_seq
 	 * overflow, they don't skip entries, so the queue has the same
-	 * apparent capacity at all times */
+	 * apparent capacity at all times
+	 */
+	if (count)
+		count = roundup_pow_of_two(count);
 
-	count = roundup_pow_of_two(count);
-
-	if (callback != LNET_EQ_HANDLER_NONE && count != 0)
+	if (callback != LNET_EQ_HANDLER_NONE && count)
 		CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
 
-	/* count can be 0 if only need callback, we can eliminate
-	 * overhead of enqueue event */
-	if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
+	/*
+	 * count can be 0 if only need callback, we can eliminate
+	 * overhead of enqueue event
+	 */
+	if (!count && callback == LNET_EQ_HANDLER_NONE)
 		return -EINVAL;
 
 	eq = lnet_eq_alloc();
-	if (eq == NULL)
+	if (!eq)
 		return -ENOMEM;
 
-	if (count != 0) {
+	if (count) {
 		LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
-		if (eq->eq_events == NULL)
+		if (!eq->eq_events)
 			goto failed;
-		/* NB allocator has set all event sequence numbers to 0,
-		 * so all them should be earlier than eq_deq_seq */
+		/*
+		 * NB allocator has set all event sequence numbers to 0,
+		 * so all them should be earlier than eq_deq_seq
+		 */
 	}
 
 	eq->eq_deq_seq = 1;
@@ -108,13 +113,15 @@
 
 	eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
 				       sizeof(*eq->eq_refs[0]));
-	if (eq->eq_refs == NULL)
+	if (!eq->eq_refs)
 		goto failed;
 
 	/* MUST hold both exclusive lnet_res_lock */
 	lnet_res_lock(LNET_LOCK_EX);
-	/* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
-	 * both EQ lookup and poll event with only lnet_eq_wait_lock */
+	/*
+	 * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+	 * both EQ lookup and poll event with only lnet_eq_wait_lock
+	 */
 	lnet_eq_wait_lock();
 
 	lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
@@ -127,10 +134,10 @@
 	return 0;
 
 failed:
-	if (eq->eq_events != NULL)
+	if (eq->eq_events)
 		LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
 
-	if (eq->eq_refs != NULL)
+	if (eq->eq_refs)
 		cfs_percpt_free(eq->eq_refs);
 
 	lnet_eq_free(eq);
@@ -159,23 +166,24 @@
 	int size = 0;
 	int i;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	lnet_res_lock(LNET_LOCK_EX);
-	/* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
-	 * both EQ lookup and poll event with only lnet_eq_wait_lock */
+	/*
+	 * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+	 * both EQ lookup and poll event with only lnet_eq_wait_lock
+	 */
 	lnet_eq_wait_lock();
 
 	eq = lnet_handle2eq(&eqh);
-	if (eq == NULL) {
+	if (!eq) {
 		rc = -ENOENT;
 		goto out;
 	}
 
 	cfs_percpt_for_each(ref, i, eq->eq_refs) {
 		LASSERT(*ref >= 0);
-		if (*ref == 0)
+		if (!*ref)
 			continue;
 
 		CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
@@ -196,9 +204,9 @@
 	lnet_eq_wait_unlock();
 	lnet_res_unlock(LNET_LOCK_EX);
 
-	if (events != NULL)
+	if (events)
 		LIBCFS_FREE(events, size * sizeof(lnet_event_t));
-	if (refs != NULL)
+	if (refs)
 		cfs_percpt_free(refs);
 
 	return rc;
@@ -211,7 +219,7 @@
 	/* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
 	int index;
 
-	if (eq->eq_size == 0) {
+	if (!eq->eq_size) {
 		LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
 		eq->eq_callback(ev);
 		return;
@@ -255,8 +263,10 @@
 	if (eq->eq_deq_seq == new_event->sequence) {
 		rc = 1;
 	} else {
-		/* don't complain with CERROR: some EQs are sized small
-		 * anyway; if it's important, the caller should complain */
+		/*
+		 * don't complain with CERROR: some EQs are sized small
+		 * anyway; if it's important, the caller should complain
+		 */
 		CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
 		       eq->eq_deq_seq, new_event->sequence);
 		rc = -EOVERFLOW;
@@ -309,7 +319,7 @@
 	wait_queue_t wl;
 	unsigned long now;
 
-	if (tms == 0)
+	if (!tms)
 		return -1; /* don't want to wait and no new event */
 
 	init_waitqueue_entry(&wl, current);
@@ -320,7 +330,6 @@
 
 	if (tms < 0) {
 		schedule();
-
 	} else {
 		now = jiffies;
 		schedule_timeout(msecs_to_jiffies(tms));
@@ -329,7 +338,7 @@
 			tms = 0;
 	}
 
-	wait = tms != 0; /* might need to call here again */
+	wait = tms; /* might need to call here again */
 	*timeout_ms = tms;
 
 	lnet_eq_wait_lock();
@@ -372,7 +381,6 @@
 	int rc;
 	int i;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	if (neq < 1)
@@ -384,20 +392,20 @@
 		for (i = 0; i < neq; i++) {
 			lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
 
-			if (eq == NULL) {
+			if (!eq) {
 				lnet_eq_wait_unlock();
 				return -ENOENT;
 			}
 
 			rc = lnet_eq_dequeue_event(eq, event);
-			if (rc != 0) {
+			if (rc) {
 				lnet_eq_wait_unlock();
 				*which = i;
 				return rc;
 			}
 		}
 
-		if (wait == 0)
+		if (!wait)
 			break;
 
 		/*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 758f5be..c74514f 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -46,16 +46,18 @@
 void
 lnet_md_unlink(lnet_libmd_t *md)
 {
-	if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
+	if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
 		/* first unlink attempt... */
 		lnet_me_t *me = md->md_me;
 
 		md->md_flags |= LNET_MD_FLAG_ZOMBIE;
 
-		/* Disassociate from ME (if any),
+		/*
+		 * Disassociate from ME (if any),
 		 * and unlink it if it was created
-		 * with LNET_UNLINK */
-		if (me != NULL) {
+		 * with LNET_UNLINK
+		 */
+		if (me) {
 			/* detach MD from portal */
 			lnet_ptl_detach_md(me, md);
 			if (me->me_unlink == LNET_UNLINK)
@@ -66,14 +68,14 @@
 		lnet_res_lh_invalidate(&md->md_lh);
 	}
 
-	if (md->md_refcount != 0) {
+	if (md->md_refcount) {
 		CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
 		return;
 	}
 
 	CDEBUG(D_NET, "Unlinking md %p\n", md);
 
-	if (md->md_eq != NULL) {
+	if (md->md_eq) {
 		int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
 
 		LASSERT(*md->md_eq->eq_refs[cpt] > 0);
@@ -103,12 +105,12 @@
 	lmd->md_refcount = 0;
 	lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
 
-	if ((umd->options & LNET_MD_IOVEC) != 0) {
-
-		if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
+	if (umd->options & LNET_MD_IOVEC) {
+		if (umd->options & LNET_MD_KIOV) /* Can't specify both */
 			return -EINVAL;
 
-		lmd->md_niov = niov = umd->length;
+		niov = umd->length;
+		lmd->md_niov = umd->length;
 		memcpy(lmd->md_iov.iov, umd->start,
 		       niov * sizeof(lmd->md_iov.iov[0]));
 
@@ -123,13 +125,14 @@
 
 		lmd->md_length = total_length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */
+		if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */
 		    (umd->max_size < 0 ||
 		     umd->max_size > total_length)) /* illegal max_size */
 			return -EINVAL;
 
-	} else if ((umd->options & LNET_MD_KIOV) != 0) {
-		lmd->md_niov = niov = umd->length;
+	} else if (umd->options & LNET_MD_KIOV) {
+		niov = umd->length;
+		lmd->md_niov = umd->length;
 		memcpy(lmd->md_iov.kiov, umd->start,
 		       niov * sizeof(lmd->md_iov.kiov[0]));
 
@@ -144,17 +147,18 @@
 
 		lmd->md_length = total_length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+		if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
 		    (umd->max_size < 0 ||
 		     umd->max_size > total_length)) /* illegal max_size */
 			return -EINVAL;
 	} else {   /* contiguous */
 		lmd->md_length = umd->length;
-		lmd->md_niov = niov = 1;
+		niov = 1;
+		lmd->md_niov = 1;
 		lmd->md_iov.iov[0].iov_base = umd->start;
 		lmd->md_iov.iov[0].iov_len = umd->length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+		if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
 		    (umd->max_size < 0 ||
 		     umd->max_size > (int)umd->length)) /* illegal max_size */
 			return -EINVAL;
@@ -169,22 +173,26 @@
 {
 	struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
 
-	/* NB we are passed an allocated, but inactive md.
+	/*
+	 * NB we are passed an allocated, but inactive md.
 	 * if we return success, caller may lnet_md_unlink() it.
 	 * otherwise caller may only lnet_md_free() it.
 	 */
-	/* This implementation doesn't know how to create START events or
+	/*
+	 * This implementation doesn't know how to create START events or
 	 * disable END events.  Best to LASSERT our caller is compliant so
-	 * we find out quickly...  */
-	/*  TODO - reevaluate what should be here in light of
+	 * we find out quickly...
+	 */
+	/*
+	 * TODO - reevaluate what should be here in light of
 	 * the removal of the start and end events
 	 * maybe there we shouldn't even allow LNET_EQ_NONE!)
-	 * LASSERT (eq == NULL);
+	 * LASSERT(!eq);
 	 */
 	if (!LNetHandleIsInvalid(eq_handle)) {
 		md->md_eq = lnet_handle2eq(&eq_handle);
 
-		if (md->md_eq == NULL)
+		if (!md->md_eq)
 			return -ENOENT;
 
 		(*md->md_eq->eq_refs[cpt])++;
@@ -208,8 +216,8 @@
 	 * and that's all.
 	 */
 	umd->start = lmd->md_start;
-	umd->length = ((lmd->md_options &
-			(LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
+	umd->length = !(lmd->md_options &
+		      (LNET_MD_IOVEC | LNET_MD_KIOV)) ?
 		      lmd->md_length : lmd->md_niov;
 	umd->threshold = lmd->md_threshold;
 	umd->max_size = lmd->md_max_size;
@@ -221,13 +229,13 @@
 static int
 lnet_md_validate(lnet_md_t *umd)
 {
-	if (umd->start == NULL && umd->length != 0) {
+	if (!umd->start && umd->length) {
 		CERROR("MD start pointer can not be NULL with length %u\n",
 		       umd->length);
 		return -EINVAL;
 	}
 
-	if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
+	if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) &&
 	    umd->length > LNET_MAX_IOV) {
 		CERROR("Invalid option: too many fragments %u, %d max\n",
 		       umd->length, LNET_MAX_IOV);
@@ -273,41 +281,42 @@
 	int cpt;
 	int rc;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
-	if (lnet_md_validate(&umd) != 0)
+	if (lnet_md_validate(&umd))
 		return -EINVAL;
 
-	if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
+	if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
 		CERROR("Invalid option: no MD_OP set\n");
 		return -EINVAL;
 	}
 
 	md = lnet_md_alloc(&umd);
-	if (md == NULL)
+	if (!md)
 		return -ENOMEM;
 
 	rc = lnet_md_build(md, &umd, unlink);
 	cpt = lnet_cpt_of_cookie(meh.cookie);
 
 	lnet_res_lock(cpt);
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	me = lnet_handle2me(&meh);
-	if (me == NULL)
+	if (!me)
 		rc = -ENOENT;
-	else if (me->me_md != NULL)
+	else if (me->me_md)
 		rc = -EBUSY;
 	else
 		rc = lnet_md_link(md, umd.eq_handle, cpt);
 
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
-	/* attach this MD to portal of ME and check if it matches any
-	 * blocked msgs on this portal */
+	/*
+	 * attach this MD to portal of ME and check if it matches any
+	 * blocked msgs on this portal
+	 */
 	lnet_ptl_attach_md(me, md, &matches, &drops);
 
 	lnet_md2handle(handle, md);
@@ -350,29 +359,28 @@
 	int cpt;
 	int rc;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
-	if (lnet_md_validate(&umd) != 0)
+	if (lnet_md_validate(&umd))
 		return -EINVAL;
 
-	if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
+	if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
 		CERROR("Invalid option: GET|PUT illegal on active MDs\n");
 		return -EINVAL;
 	}
 
 	md = lnet_md_alloc(&umd);
-	if (md == NULL)
+	if (!md)
 		return -ENOMEM;
 
 	rc = lnet_md_build(md, &umd, unlink);
 
 	cpt = lnet_res_lock_current();
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	rc = lnet_md_link(md, umd.eq_handle, cpt);
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	lnet_md2handle(handle, md);
@@ -425,23 +433,24 @@
 	lnet_libmd_t *md;
 	int cpt;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	cpt = lnet_cpt_of_cookie(mdh.cookie);
 	lnet_res_lock(cpt);
 
 	md = lnet_handle2md(&mdh);
-	if (md == NULL) {
+	if (!md) {
 		lnet_res_unlock(cpt);
 		return -ENOENT;
 	}
 
 	md->md_flags |= LNET_MD_FLAG_ABORTED;
-	/* If the MD is busy, lnet_md_unlink just marks it for deletion, and
+	/*
+	 * If the MD is busy, lnet_md_unlink just marks it for deletion, and
 	 * when the LND is done, the completion event flags that the MD was
-	 * unlinked.  Otherwise, we enqueue an event now... */
-	if (md->md_eq != NULL && md->md_refcount == 0) {
+	 * unlinked.  Otherwise, we enqueue an event now...
+	 */
+	if (md->md_eq && !md->md_refcount) {
 		lnet_build_unlink_event(md, &ev);
 		lnet_eq_enqueue_event(md->md_eq, &ev);
 	}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index 42fc99e..e671aed 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -83,7 +83,6 @@
 	struct lnet_me *me;
 	struct list_head *head;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	if ((int)portal >= the_lnet.ln_nportals)
@@ -91,11 +90,11 @@
 
 	mtable = lnet_mt_of_attach(portal, match_id,
 				   match_bits, ignore_bits, pos);
-	if (mtable == NULL) /* can't match portal type */
+	if (!mtable) /* can't match portal type */
 		return -EPERM;
 
 	me = lnet_me_alloc();
-	if (me == NULL)
+	if (!me)
 		return -ENOMEM;
 
 	lnet_res_lock(mtable->mt_cpt);
@@ -109,7 +108,7 @@
 
 	lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
 			       &me->me_lh);
-	if (ignore_bits != 0)
+	if (ignore_bits)
 		head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
 	else
 		head = lnet_mt_match_head(mtable, match_id, match_bits);
@@ -156,14 +155,13 @@
 	struct lnet_portal *ptl;
 	int cpt;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	if (pos == LNET_INS_LOCAL)
 		return -EPERM;
 
 	new_me = lnet_me_alloc();
-	if (new_me == NULL)
+	if (!new_me)
 		return -ENOMEM;
 
 	cpt = lnet_cpt_of_cookie(current_meh.cookie);
@@ -171,7 +169,7 @@
 	lnet_res_lock(cpt);
 
 	current_me = lnet_handle2me(&current_meh);
-	if (current_me == NULL) {
+	if (!current_me) {
 		lnet_me_free(new_me);
 
 		lnet_res_unlock(cpt);
@@ -233,22 +231,21 @@
 	lnet_event_t ev;
 	int cpt;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	cpt = lnet_cpt_of_cookie(meh.cookie);
 	lnet_res_lock(cpt);
 
 	me = lnet_handle2me(&meh);
-	if (me == NULL) {
+	if (!me) {
 		lnet_res_unlock(cpt);
 		return -ENOENT;
 	}
 
 	md = me->me_md;
-	if (md != NULL) {
+	if (md) {
 		md->md_flags |= LNET_MD_FLAG_ABORTED;
-		if (md->md_eq != NULL && md->md_refcount == 0) {
+		if (md->md_eq && !md->md_refcount) {
 			lnet_build_unlink_event(md, &ev);
 			lnet_eq_enqueue_event(md->md_eq, &ev);
 		}
@@ -267,7 +264,7 @@
 {
 	list_del(&me->me_list);
 
-	if (me->me_md != NULL) {
+	if (me->me_md) {
 		lnet_libmd_t *md = me->me_md;
 
 		/* detach MD from portal of this ME */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index fb8f7be0..7bc3e91 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -42,6 +42,11 @@
 
 #include "../../include/linux/lnet/lib-lnet.h"
 
+/** lnet message has credit and can be submitted to lnd for send/receive */
+#define LNET_CREDIT_OK		0
+/** lnet message is waiting for credit */
+#define LNET_CREDIT_WAIT	1
+
 static int local_nid_dist_zero = 1;
 module_param(local_nid_dist_zero, int, 0444);
 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
@@ -54,13 +59,11 @@
 	struct list_head *next;
 	struct list_head cull;
 
-	LASSERT(the_lnet.ln_init);
-
 	/* NB: use lnet_net_lock(0) to serialize operations on test peers */
-	if (threshold != 0) {
+	if (threshold) {
 		/* Adding a new entry */
 		LIBCFS_ALLOC(tp, sizeof(*tp));
-		if (tp == NULL)
+		if (!tp)
 			return -ENOMEM;
 
 		tp->tp_nid = nid;
@@ -80,7 +83,7 @@
 	list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
 		tp = list_entry(el, lnet_test_peer_t, tp_list);
 
-		if (tp->tp_threshold == 0 ||    /* needs culling anyway */
+		if (!tp->tp_threshold ||    /* needs culling anyway */
 		    nid == LNET_NID_ANY ||       /* removing all entries */
 		    tp->tp_nid == nid) {	  /* matched this one */
 			list_del(&tp->tp_list);
@@ -116,12 +119,14 @@
 	list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
 		tp = list_entry(el, lnet_test_peer_t, tp_list);
 
-		if (tp->tp_threshold == 0) {
+		if (!tp->tp_threshold) {
 			/* zombie entry */
 			if (outgoing) {
-				/* only cull zombies on outgoing tests,
+				/*
+				 * only cull zombies on outgoing tests,
 				 * since we may be at interrupt priority on
-				 * incoming messages. */
+				 * incoming messages.
+				 */
 				list_del(&tp->tp_list);
 				list_add(&tp->tp_list, &cull);
 			}
@@ -135,7 +140,7 @@
 			if (tp->tp_threshold != LNET_MD_THRESH_INF) {
 				tp->tp_threshold--;
 				if (outgoing &&
-				    tp->tp_threshold == 0) {
+				    !tp->tp_threshold) {
 					/* see above */
 					list_del(&tp->tp_list);
 					list_add(&tp->tp_list, &cull);
@@ -171,13 +176,13 @@
 
 void
 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
-		   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
-		   unsigned int nob)
+		  unsigned int nsiov, struct kvec *siov, unsigned int soffset,
+		  unsigned int nob)
 {
 	/* NB diov, siov are READ-ONLY */
 	unsigned int this_nob;
 
-	if (nob == 0)
+	if (!nob)
 		return;
 
 	/* skip complete frags before 'doffset' */
@@ -206,7 +211,7 @@
 		this_nob = min(this_nob, nob);
 
 		memcpy((char *)diov->iov_base + doffset,
-			(char *)siov->iov_base + soffset, this_nob);
+		       (char *)siov->iov_base + soffset, this_nob);
 		nob -= this_nob;
 
 		if (diov->iov_len > doffset + this_nob) {
@@ -230,16 +235,18 @@
 
 int
 lnet_extract_iov(int dst_niov, struct kvec *dst,
-		  int src_niov, struct kvec *src,
-		  unsigned int offset, unsigned int len)
+		 int src_niov, struct kvec *src,
+		 unsigned int offset, unsigned int len)
 {
-	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
+	/*
+	 * Initialise 'dst' to the subset of 'src' starting at 'offset',
 	 * for exactly 'len' bytes, and return the number of entries.
-	 * NB not destructive to 'src' */
+	 * NB not destructive to 'src'
+	 */
 	unsigned int frag_len;
 	unsigned int niov;
 
-	if (len == 0)			   /* no data => */
+	if (!len)			   /* no data => */
 		return 0;		     /* no frags */
 
 	LASSERT(src_niov > 0);
@@ -297,7 +304,7 @@
 	char *daddr = NULL;
 	char *saddr = NULL;
 
-	if (nob == 0)
+	if (!nob)
 		return;
 
 	LASSERT(!in_interrupt());
@@ -325,17 +332,18 @@
 			       siov->kiov_len - soffset);
 		this_nob = min(this_nob, nob);
 
-		if (daddr == NULL)
+		if (!daddr)
 			daddr = ((char *)kmap(diov->kiov_page)) +
 				diov->kiov_offset + doffset;
-		if (saddr == NULL)
+		if (!saddr)
 			saddr = ((char *)kmap(siov->kiov_page)) +
 				siov->kiov_offset + soffset;
 
-		/* Vanishing risk of kmap deadlock when mapping 2 pages.
+		/*
+		 * Vanishing risk of kmap deadlock when mapping 2 pages.
 		 * However in practice at least one of the kiovs will be mapped
-		 * kernel pages and the map/unmap will be NOOPs */
-
+		 * kernel pages and the map/unmap will be NOOPs
+		 */
 		memcpy(daddr, saddr, this_nob);
 		nob -= this_nob;
 
@@ -362,9 +370,9 @@
 		}
 	} while (nob > 0);
 
-	if (daddr != NULL)
+	if (daddr)
 		kunmap(diov->kiov_page);
-	if (saddr != NULL)
+	if (saddr)
 		kunmap(siov->kiov_page);
 }
 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
@@ -378,7 +386,7 @@
 	unsigned int this_nob;
 	char *addr = NULL;
 
-	if (nob == 0)
+	if (!nob)
 		return;
 
 	LASSERT(!in_interrupt());
@@ -406,7 +414,7 @@
 			       (__kernel_size_t) kiov->kiov_len - kiovoffset);
 		this_nob = min(this_nob, nob);
 
-		if (addr == NULL)
+		if (!addr)
 			addr = ((char *)kmap(kiov->kiov_page)) +
 				kiov->kiov_offset + kiovoffset;
 
@@ -434,7 +442,7 @@
 
 	} while (nob > 0);
 
-	if (addr != NULL)
+	if (addr)
 		kunmap(kiov->kiov_page);
 }
 EXPORT_SYMBOL(lnet_copy_kiov2iov);
@@ -449,7 +457,7 @@
 	unsigned int this_nob;
 	char *addr = NULL;
 
-	if (nob == 0)
+	if (!nob)
 		return;
 
 	LASSERT(!in_interrupt());
@@ -477,7 +485,7 @@
 			       iov->iov_len - iovoffset);
 		this_nob = min(this_nob, nob);
 
-		if (addr == NULL)
+		if (!addr)
 			addr = ((char *)kmap(kiov->kiov_page)) +
 				kiov->kiov_offset + kiovoffset;
 
@@ -504,23 +512,25 @@
 		}
 	} while (nob > 0);
 
-	if (addr != NULL)
+	if (addr)
 		kunmap(kiov->kiov_page);
 }
 EXPORT_SYMBOL(lnet_copy_iov2kiov);
 
 int
 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
-		   int src_niov, lnet_kiov_t *src,
-		   unsigned int offset, unsigned int len)
+		  int src_niov, lnet_kiov_t *src,
+		  unsigned int offset, unsigned int len)
 {
-	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
+	/*
+	 * Initialise 'dst' to the subset of 'src' starting at 'offset',
 	 * for exactly 'len' bytes, and return the number of entries.
-	 * NB not destructive to 'src' */
+	 * NB not destructive to 'src'
+	 */
 	unsigned int frag_len;
 	unsigned int niov;
 
-	if (len == 0)			   /* no data => */
+	if (!len)			   /* no data => */
 		return 0;		     /* no frags */
 
 	LASSERT(src_niov > 0);
@@ -543,7 +553,7 @@
 		if (len <= frag_len) {
 			dst->kiov_len = len;
 			LASSERT(dst->kiov_offset + dst->kiov_len
-					     <= PAGE_CACHE_SIZE);
+					<= PAGE_CACHE_SIZE);
 			return niov;
 		}
 
@@ -570,9 +580,9 @@
 	int rc;
 
 	LASSERT(!in_interrupt());
-	LASSERT(mlen == 0 || msg != NULL);
+	LASSERT(!mlen || msg);
 
-	if (msg != NULL) {
+	if (msg) {
 		LASSERT(msg->msg_receiving);
 		LASSERT(!msg->msg_sending);
 		LASSERT(rlen == msg->msg_len);
@@ -582,18 +592,18 @@
 
 		msg->msg_receiving = 0;
 
-		if (mlen != 0) {
+		if (mlen) {
 			niov = msg->msg_niov;
 			iov  = msg->msg_iov;
 			kiov = msg->msg_kiov;
 
 			LASSERT(niov > 0);
-			LASSERT((iov == NULL) != (kiov == NULL));
+			LASSERT(!iov != !kiov);
 		}
 	}
 
-	rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
-				    niov, iov, kiov, offset, mlen, rlen);
+	rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed,
+				  niov, iov, kiov, offset, mlen, rlen);
 	if (rc < 0)
 		lnet_finalize(ni, msg, rc);
 }
@@ -605,13 +615,13 @@
 
 	LASSERT(msg->msg_len > 0);
 	LASSERT(!msg->msg_routing);
-	LASSERT(md != NULL);
-	LASSERT(msg->msg_niov == 0);
-	LASSERT(msg->msg_iov == NULL);
-	LASSERT(msg->msg_kiov == NULL);
+	LASSERT(md);
+	LASSERT(!msg->msg_niov);
+	LASSERT(!msg->msg_iov);
+	LASSERT(!msg->msg_kiov);
 
 	msg->msg_niov = md->md_niov;
-	if ((md->md_options & LNET_MD_KIOV) != 0)
+	if (md->md_options & LNET_MD_KIOV)
 		msg->msg_kiov = md->md_iov.kiov;
 	else
 		msg->msg_iov = md->md_iov.iov;
@@ -626,7 +636,7 @@
 	msg->msg_len = len;
 	msg->msg_offset = offset;
 
-	if (len != 0)
+	if (len)
 		lnet_setpayloadbuffer(msg);
 
 	memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
@@ -646,9 +656,9 @@
 
 	LASSERT(!in_interrupt());
 	LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
-		 (msg->msg_txcredit && msg->msg_peertxcredit));
+		(msg->msg_txcredit && msg->msg_peertxcredit));
 
-	rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
+	rc = ni->ni_lnd->lnd_send(ni, priv, msg);
 	if (rc < 0)
 		lnet_finalize(ni, msg, rc);
 }
@@ -661,12 +671,12 @@
 	LASSERT(!msg->msg_sending);
 	LASSERT(msg->msg_receiving);
 	LASSERT(!msg->msg_rx_ready_delay);
-	LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
+	LASSERT(ni->ni_lnd->lnd_eager_recv);
 
 	msg->msg_rx_ready_delay = 1;
-	rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
-					  &msg->msg_private);
-	if (rc != 0) {
+	rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
+					&msg->msg_private);
+	if (rc) {
 		CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
 		       libcfs_nid2str(msg->msg_rxpeer->lp_nid),
 		       libcfs_id2str(msg->msg_target), rc);
@@ -683,15 +693,15 @@
 	unsigned long last_alive = 0;
 
 	LASSERT(lnet_peer_aliveness_enabled(lp));
-	LASSERT(ni->ni_lnd->lnd_query != NULL);
+	LASSERT(ni->ni_lnd->lnd_query);
 
 	lnet_net_unlock(lp->lp_cpt);
-	(ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
+	ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
 	lnet_net_lock(lp->lp_cpt);
 
 	lp->lp_last_query = cfs_time_current();
 
-	if (last_alive != 0) /* NI has updated timestamp */
+	if (last_alive) /* NI has updated timestamp */
 		lp->lp_last_alive = last_alive;
 }
 
@@ -720,14 +730,16 @@
 	 * case, and moreover lp_last_alive at peer creation is assumed.
 	 */
 	if (alive && !lp->lp_alive &&
-	    !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
+	    !(lnet_isrouter(lp) && !lp->lp_alive_count))
 		lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
 
 	return alive;
 }
 
-/* NB: returns 1 when alive, 0 when dead, negative when error;
- *     may drop the lnet_net_lock */
+/*
+ * NB: returns 1 when alive, 0 when dead, negative when error;
+ *     may drop the lnet_net_lock
+ */
 static int
 lnet_peer_alive_locked(lnet_peer_t *lp)
 {
@@ -739,9 +751,11 @@
 	if (lnet_peer_is_alive(lp, now))
 		return 1;
 
-	/* Peer appears dead, but we should avoid frequent NI queries (at
-	 * most once per lnet_queryinterval seconds). */
-	if (lp->lp_last_query != 0) {
+	/*
+	 * Peer appears dead, but we should avoid frequent NI queries (at
+	 * most once per lnet_queryinterval seconds).
+	 */
+	if (lp->lp_last_query) {
 		static const int lnet_queryinterval = 1;
 
 		unsigned long next_query =
@@ -775,10 +789,10 @@
  *	  lnet_send() is going to lnet_net_unlock immediately after this, so
  *	  it sets do_send FALSE and I don't do the unlock/send/lock bit.
  *
- * \retval 0 If \a msg sent or OK to send.
- * \retval EAGAIN If \a msg blocked for credit.
- * \retval EHOSTUNREACH If the next hop of the message appears dead.
- * \retval ECANCELED If the MD of the message has been unlinked.
+ * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
+ * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
+ * \retval -EHOSTUNREACH If the next hop of the message appears dead.
+ * \retval -ECANCELED If the MD of the message has been unlinked.
  */
 static int
 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
@@ -794,8 +808,8 @@
 	LASSERT(msg->msg_tx_committed);
 
 	/* NB 'lp' is always the next hop */
-	if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
-	    lnet_peer_alive_locked(lp) == 0) {
+	if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
+	    !lnet_peer_alive_locked(lp)) {
 		the_lnet.ln_counters[cpt]->drop_count++;
 		the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
 		lnet_net_unlock(cpt);
@@ -806,11 +820,11 @@
 			lnet_finalize(ni, msg, -EHOSTUNREACH);
 
 		lnet_net_lock(cpt);
-		return EHOSTUNREACH;
+		return -EHOSTUNREACH;
 	}
 
-	if (msg->msg_md != NULL &&
-	    (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
+	if (msg->msg_md &&
+	    (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
 		lnet_net_unlock(cpt);
 
 		CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
@@ -819,12 +833,12 @@
 			lnet_finalize(ni, msg, -ECANCELED);
 
 		lnet_net_lock(cpt);
-		return ECANCELED;
+		return -ECANCELED;
 	}
 
 	if (!msg->msg_peertxcredit) {
 		LASSERT((lp->lp_txcredits < 0) ==
-			 !list_empty(&lp->lp_txq));
+			!list_empty(&lp->lp_txq));
 
 		msg->msg_peertxcredit = 1;
 		lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
@@ -836,7 +850,7 @@
 		if (lp->lp_txcredits < 0) {
 			msg->msg_tx_delayed = 1;
 			list_add_tail(&msg->msg_list, &lp->lp_txq);
-			return EAGAIN;
+			return LNET_CREDIT_WAIT;
 		}
 	}
 
@@ -853,7 +867,7 @@
 		if (tq->tq_credits < 0) {
 			msg->msg_tx_delayed = 1;
 			list_add_tail(&msg->msg_list, &tq->tq_delayed);
-			return EAGAIN;
+			return LNET_CREDIT_WAIT;
 		}
 	}
 
@@ -862,7 +876,7 @@
 		lnet_ni_send(ni, msg);
 		lnet_net_lock(cpt);
 	}
-	return 0;
+	return LNET_CREDIT_OK;
 }
 
 static lnet_rtrbufpool_t *
@@ -888,16 +902,19 @@
 static int
 lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
 {
-	/* lnet_parse is going to lnet_net_unlock immediately after this, so it
-	 * sets do_recv FALSE and I don't do the unlock/send/lock bit.  I
-	 * return EAGAIN if msg blocked and 0 if received or OK to receive */
+	/*
+	 * lnet_parse is going to lnet_net_unlock immediately after this, so it
+	 * sets do_recv FALSE and I don't do the unlock/send/lock bit.
+	 * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
+	 * received or OK to receive
+	 */
 	lnet_peer_t *lp = msg->msg_rxpeer;
 	lnet_rtrbufpool_t *rbp;
 	lnet_rtrbuf_t *rb;
 
-	LASSERT(msg->msg_iov == NULL);
-	LASSERT(msg->msg_kiov == NULL);
-	LASSERT(msg->msg_niov == 0);
+	LASSERT(!msg->msg_iov);
+	LASSERT(!msg->msg_kiov);
+	LASSERT(!msg->msg_niov);
 	LASSERT(msg->msg_routing);
 	LASSERT(msg->msg_receiving);
 	LASSERT(!msg->msg_sending);
@@ -907,7 +924,7 @@
 
 	if (!msg->msg_peerrtrcredit) {
 		LASSERT((lp->lp_rtrcredits < 0) ==
-			 !list_empty(&lp->lp_rtrq));
+			!list_empty(&lp->lp_rtrq));
 
 		msg->msg_peerrtrcredit = 1;
 		lp->lp_rtrcredits--;
@@ -919,16 +936,13 @@
 			LASSERT(msg->msg_rx_ready_delay);
 			msg->msg_rx_delayed = 1;
 			list_add_tail(&msg->msg_list, &lp->lp_rtrq);
-			return EAGAIN;
+			return LNET_CREDIT_WAIT;
 		}
 	}
 
 	rbp = lnet_msg2bufpool(msg);
 
 	if (!msg->msg_rtrcredit) {
-		LASSERT((rbp->rbp_credits < 0) ==
-			 !list_empty(&rbp->rbp_msgs));
-
 		msg->msg_rtrcredit = 1;
 		rbp->rbp_credits--;
 		if (rbp->rbp_credits < rbp->rbp_mincredits)
@@ -939,7 +953,7 @@
 			LASSERT(msg->msg_rx_ready_delay);
 			msg->msg_rx_delayed = 1;
 			list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
-			return EAGAIN;
+			return LNET_CREDIT_WAIT;
 		}
 	}
 
@@ -958,7 +972,7 @@
 			     0, msg->msg_len, msg->msg_len);
 		lnet_net_lock(cpt);
 	}
-	return 0;
+	return LNET_CREDIT_OK;
 }
 
 void
@@ -980,7 +994,7 @@
 		tq->tq_credits++;
 		if (tq->tq_credits <= 0) {
 			msg2 = list_entry(tq->tq_delayed.next,
-					      lnet_msg_t, msg_list);
+					  lnet_msg_t, msg_list);
 			list_del(&msg2->msg_list);
 
 			LASSERT(msg2->msg_txpeer->lp_ni == ni);
@@ -1003,7 +1017,7 @@
 		txpeer->lp_txcredits++;
 		if (txpeer->lp_txcredits <= 0) {
 			msg2 = list_entry(txpeer->lp_txq.next,
-					      lnet_msg_t, msg_list);
+					  lnet_msg_t, msg_list);
 			list_del(&msg2->msg_list);
 
 			LASSERT(msg2->msg_txpeer == txpeer);
@@ -1013,13 +1027,50 @@
 		}
 	}
 
-	if (txpeer != NULL) {
+	if (txpeer) {
 		msg->msg_txpeer = NULL;
 		lnet_peer_decref_locked(txpeer);
 	}
 }
 
 void
+lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
+{
+	lnet_msg_t *msg;
+
+	if (list_empty(&rbp->rbp_msgs))
+		return;
+	msg = list_entry(rbp->rbp_msgs.next,
+			 lnet_msg_t, msg_list);
+	list_del(&msg->msg_list);
+
+	(void)lnet_post_routed_recv_locked(msg, 1);
+}
+
+void
+lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
+{
+	struct list_head drop;
+	lnet_msg_t *msg;
+	lnet_msg_t *tmp;
+
+	INIT_LIST_HEAD(&drop);
+
+	list_splice_init(list, &drop);
+
+	lnet_net_unlock(cpt);
+
+	list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
+		lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
+			     0, 0, 0, msg->msg_hdr.payload_length);
+		list_del_init(&msg->msg_list);
+		lnet_finalize(NULL, msg, -ECANCELED);
+	}
+
+	lnet_net_lock(cpt);
+}
+
+void
 lnet_return_rx_credits_locked(lnet_msg_t *msg)
 {
 	lnet_peer_t *rxpeer = msg->msg_rxpeer;
@@ -1030,34 +1081,51 @@
 		lnet_rtrbuf_t *rb;
 		lnet_rtrbufpool_t *rbp;
 
-		/* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
+		/*
+		 * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
 		 * there until it gets one allocated, or aborts the wait
-		 * itself */
-		LASSERT(msg->msg_kiov != NULL);
+		 * itself
+		 */
+		LASSERT(msg->msg_kiov);
 
 		rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
 		rbp = rb->rb_pool;
-		LASSERT(rbp == lnet_msg2bufpool(msg));
 
 		msg->msg_kiov = NULL;
 		msg->msg_rtrcredit = 0;
 
-		LASSERT((rbp->rbp_credits < 0) ==
-			!list_empty(&rbp->rbp_msgs));
+		LASSERT(rbp == lnet_msg2bufpool(msg));
+
 		LASSERT((rbp->rbp_credits > 0) ==
 			!list_empty(&rbp->rbp_bufs));
 
-		list_add(&rb->rb_list, &rbp->rbp_bufs);
-		rbp->rbp_credits++;
-		if (rbp->rbp_credits <= 0) {
-			msg2 = list_entry(rbp->rbp_msgs.next,
-					      lnet_msg_t, msg_list);
-			list_del(&msg2->msg_list);
+		/*
+		 * If routing is now turned off, we just drop this buffer and
+		 * don't bother trying to return credits.
+		 */
+		if (!the_lnet.ln_routing) {
+			lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+			goto routing_off;
+		}
 
-			(void) lnet_post_routed_recv_locked(msg2, 1);
+		/*
+		 * It is possible that a user has lowered the desired number of
+		 * buffers in this pool.  Make sure we never put back
+		 * more buffers than the stated number.
+		 */
+		if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
+			/* Discard this buffer so we don't have too many. */
+			lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+			rbp->rbp_nbuffers--;
+		} else {
+			list_add(&rb->rb_list, &rbp->rbp_bufs);
+			rbp->rbp_credits++;
+			if (rbp->rbp_credits <= 0)
+				lnet_schedule_blocked_locked(rbp);
 		}
 	}
 
+routing_off:
 	if (msg->msg_peerrtrcredit) {
 		/* give back peer router credits */
 		msg->msg_peerrtrcredit = 0;
@@ -1066,15 +1134,22 @@
 			!list_empty(&rxpeer->lp_rtrq));
 
 		rxpeer->lp_rtrcredits++;
-		if (rxpeer->lp_rtrcredits <= 0) {
+		/*
+		 * drop all messages which are queued to be routed on that
+		 * peer.
+		 */
+		if (!the_lnet.ln_routing) {
+			lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
+						     msg->msg_rx_cpt);
+		} else if (rxpeer->lp_rtrcredits <= 0) {
 			msg2 = list_entry(rxpeer->lp_rtrq.next,
-					      lnet_msg_t, msg_list);
+					  lnet_msg_t, msg_list);
 			list_del(&msg2->msg_list);
 
 			(void) lnet_post_routed_recv_locked(msg2, 1);
 		}
 	}
-	if (rxpeer != NULL) {
+	if (rxpeer) {
 		msg->msg_rxpeer = NULL;
 		lnet_peer_decref_locked(rxpeer);
 	}
@@ -1120,59 +1195,62 @@
 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
 {
 	lnet_remotenet_t *rnet;
-	lnet_route_t *rtr;
-	lnet_route_t *rtr_best;
-	lnet_route_t *rtr_last;
+	lnet_route_t *route;
+	lnet_route_t *best_route;
+	lnet_route_t *last_route;
 	struct lnet_peer *lp_best;
 	struct lnet_peer *lp;
 	int rc;
 
-	/* If @rtr_nid is not LNET_NID_ANY, return the gateway with
-	 * rtr_nid nid, otherwise find the best gateway I can use */
-
+	/*
+	 * If @rtr_nid is not LNET_NID_ANY, return the gateway with
+	 * rtr_nid nid, otherwise find the best gateway I can use
+	 */
 	rnet = lnet_find_net_locked(LNET_NIDNET(target));
-	if (rnet == NULL)
+	if (!rnet)
 		return NULL;
 
 	lp_best = NULL;
-	rtr_best = rtr_last = NULL;
-	list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
-		lp = rtr->lr_gateway;
+	best_route = NULL;
+	last_route = NULL;
+	list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+		lp = route->lr_gateway;
 
-		if (!lp->lp_alive || /* gateway is down */
-		    ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
-		     rtr->lr_downis != 0)) /* NI to target is down */
+		if (!lnet_is_route_alive(route))
 			continue;
 
-		if (ni != NULL && lp->lp_ni != ni)
+		if (ni && lp->lp_ni != ni)
 			continue;
 
 		if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
 			return lp;
 
-		if (lp_best == NULL) {
-			rtr_best = rtr_last = rtr;
+		if (!lp_best) {
+			best_route = route;
+			last_route = route;
 			lp_best = lp;
 			continue;
 		}
 
 		/* no protection on below fields, but it's harmless */
-		if (rtr_last->lr_seq - rtr->lr_seq < 0)
-			rtr_last = rtr;
+		if (last_route->lr_seq - route->lr_seq < 0)
+			last_route = route;
 
-		rc = lnet_compare_routes(rtr, rtr_best);
+		rc = lnet_compare_routes(route, best_route);
 		if (rc < 0)
 			continue;
 
-		rtr_best = rtr;
+		best_route = route;
 		lp_best = lp;
 	}
 
-	/* set sequence number on the best router to the latest sequence + 1
+	/*
+	 * set sequence number on the best router to the latest sequence + 1
 	 * so we can round-robin all routers, it's race and inaccurate but
-	 * harmless and functional  */
-	if (rtr_best != NULL)
-		rtr_best->lr_seq = rtr_last->lr_seq + 1;
+	 * harmless and functional
+	 */
+	if (best_route)
+		best_route->lr_seq = last_route->lr_seq + 1;
 	return lp_best;
 }
 
@@ -1187,11 +1265,13 @@
 	int cpt2;
 	int rc;
 
-	/* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
+	/*
+	 * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
 	 * but we might want to use pre-determined router for ACK/REPLY
-	 * in the future */
-	/* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
-	LASSERT(msg->msg_txpeer == NULL);
+	 * in the future
+	 */
+	/* NB: ni == interface pre-determined (ACK/REPLY) */
+	LASSERT(!msg->msg_txpeer);
 	LASSERT(!msg->msg_sending);
 	LASSERT(!msg->msg_target_is_router);
 	LASSERT(!msg->msg_receiving);
@@ -1212,7 +1292,7 @@
 		src_ni = NULL;
 	} else {
 		src_ni = lnet_nid2ni_locked(src_nid, cpt);
-		if (src_ni == NULL) {
+		if (!src_ni) {
 			lnet_net_unlock(cpt);
 			LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
 				      libcfs_nid2str(dst_nid),
@@ -1225,8 +1305,8 @@
 	/* Is this for someone on a local network? */
 	local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
 
-	if (local_ni != NULL) {
-		if (src_ni == NULL) {
+	if (local_ni) {
+		if (!src_ni) {
 			src_ni = local_ni;
 			src_nid = src_ni->ni_nid;
 		} else if (src_ni == local_ni) {
@@ -1261,7 +1341,7 @@
 		rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
 		/* lp has ref on src_ni; lose mine */
 		lnet_ni_decref_locked(src_ni, cpt);
-		if (rc != 0) {
+		if (rc) {
 			lnet_net_unlock(cpt);
 			LCONSOLE_WARN("Error %d finding peer %s\n", rc,
 				      libcfs_nid2str(dst_nid));
@@ -1272,8 +1352,8 @@
 	} else {
 		/* sending to a remote network */
 		lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
-		if (lp == NULL) {
-			if (src_ni != NULL)
+		if (!lp) {
+			if (src_ni)
 				lnet_ni_decref_locked(src_ni, cpt);
 			lnet_net_unlock(cpt);
 
@@ -1283,14 +1363,16 @@
 			return -EHOSTUNREACH;
 		}
 
-		/* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
+		/*
+		 * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
 		 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
 		 * pre-determined router, this can happen if router table
-		 * was changed when we release the lock */
+		 * was changed when we release the lock
+		 */
 		if (rtr_nid != lp->lp_nid) {
 			cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
 			if (cpt2 != cpt) {
-				if (src_ni != NULL)
+				if (src_ni)
 					lnet_ni_decref_locked(src_ni, cpt);
 				lnet_net_unlock(cpt);
 
@@ -1304,7 +1386,7 @@
 		       libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
 		       lnet_msgtyp2str(msg->msg_type), msg->msg_len);
 
-		if (src_ni == NULL) {
+		if (!src_ni) {
 			src_ni = lp->lp_ni;
 			src_nid = src_ni->ni_nid;
 		} else {
@@ -1324,27 +1406,27 @@
 
 		msg->msg_target_is_router = 1;
 		msg->msg_target.nid = lp->lp_nid;
-		msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
+		msg->msg_target.pid = LNET_PID_LUSTRE;
 	}
 
 	/* 'lp' is our best choice of peer */
 
 	LASSERT(!msg->msg_peertxcredit);
 	LASSERT(!msg->msg_txcredit);
-	LASSERT(msg->msg_txpeer == NULL);
+	LASSERT(!msg->msg_txpeer);
 
 	msg->msg_txpeer = lp;		   /* msg takes my ref on lp */
 
 	rc = lnet_post_send_locked(msg, 0);
 	lnet_net_unlock(cpt);
 
-	if (rc == EHOSTUNREACH || rc == ECANCELED)
-		return -rc;
+	if (rc < 0)
+		return rc;
 
-	if (rc == 0)
+	if (rc == LNET_CREDIT_OK)
 		lnet_ni_send(src_ni, msg);
 
-	return 0; /* rc == 0 or EAGAIN */
+	return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
 }
 
 static void
@@ -1363,15 +1445,17 @@
 {
 	lnet_hdr_t *hdr = &msg->msg_hdr;
 
-	if (msg->msg_wanted != 0)
+	if (msg->msg_wanted)
 		lnet_setpayloadbuffer(msg);
 
 	lnet_build_msg_event(msg, LNET_EVENT_PUT);
 
-	/* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
-	 * it back into the ACK during lnet_finalize() */
-	msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
-			(msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
+	/*
+	 * Must I ACK?  If so I'll grab the ack_wmd out of the header and put
+	 * it back into the ACK during lnet_finalize()
+	 */
+	msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
+		       !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
 
 	lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
 		     msg->msg_offset, msg->msg_wanted, hdr->payload_length);
@@ -1397,7 +1481,7 @@
 	info.mi_roffset	= hdr->msg.put.offset;
 	info.mi_mbits	= hdr->msg.put.match_bits;
 
-	msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
+	msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
 
  again:
 	rc = lnet_ptl_match_md(&info, msg);
@@ -1414,7 +1498,7 @@
 			return 0;
 
 		rc = lnet_ni_eager_recv(ni, msg);
-		if (rc == 0)
+		if (!rc)
 			goto again;
 		/* fall through */
 
@@ -1510,13 +1594,13 @@
 
 	/* NB handles only looked up by creator (no flips) */
 	md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
-	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+	if (!md || !md->md_threshold || md->md_me) {
 		CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
 			libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
-			(md == NULL) ? "invalid" : "inactive",
+			!md ? "invalid" : "inactive",
 			hdr->msg.reply.dst_wmd.wh_interface_cookie,
 			hdr->msg.reply.dst_wmd.wh_object_cookie);
-		if (md != NULL && md->md_me != NULL)
+		if (md && md->md_me)
 			CERROR("REPLY MD also attached to portal %d\n",
 			       md->md_me->me_portal);
 
@@ -1524,13 +1608,13 @@
 		return ENOENT;		  /* +ve: OK but no match */
 	}
 
-	LASSERT(md->md_offset == 0);
+	LASSERT(!md->md_offset);
 
 	rlength = hdr->payload_length;
 	mlength = min_t(uint, rlength, md->md_length);
 
 	if (mlength < rlength &&
-	    (md->md_options & LNET_MD_TRUNCATE) == 0) {
+	    !(md->md_options & LNET_MD_TRUNCATE)) {
 		CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
 			libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
 			rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
@@ -1545,7 +1629,7 @@
 
 	lnet_msg_attach_md(msg, md, 0, mlength);
 
-	if (mlength != 0)
+	if (mlength)
 		lnet_setpayloadbuffer(msg);
 
 	lnet_res_unlock(cpt);
@@ -1576,15 +1660,15 @@
 
 	/* NB handles only looked up by creator (no flips) */
 	md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
-	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+	if (!md || !md->md_threshold || md->md_me) {
 		/* Don't moan; this is expected */
 		CDEBUG(D_NET,
 		       "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
 		       libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
-		       (md == NULL) ? "invalid" : "inactive",
+		       !md ? "invalid" : "inactive",
 		       hdr->msg.ack.dst_wmd.wh_interface_cookie,
 		       hdr->msg.ack.dst_wmd.wh_object_cookie);
-		if (md != NULL && md->md_me != NULL)
+		if (md && md->md_me)
 			CERROR("Source MD also attached to portal %d\n",
 			       md->md_me->me_portal);
 
@@ -1606,14 +1690,22 @@
 	return 0;
 }
 
+/**
+ * \retval LNET_CREDIT_OK	If \a msg is forwarded
+ * \retval LNET_CREDIT_WAIT	If \a msg is blocked because w/o buffer
+ * \retval -ve			error code
+ */
 static int
 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
 {
 	int rc = 0;
 
+	if (!the_lnet.ln_routing)
+		return -ECANCELED;
+
 	if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
 	    lnet_msg2bufpool(msg)->rbp_credits <= 0) {
-		if (ni->ni_lnd->lnd_eager_recv == NULL) {
+		if (!ni->ni_lnd->lnd_eager_recv) {
 			msg->msg_rx_ready_delay = 1;
 		} else {
 			lnet_net_unlock(msg->msg_rx_cpt);
@@ -1622,7 +1714,7 @@
 		}
 	}
 
-	if (rc == 0)
+	if (!rc)
 		rc = lnet_post_routed_recv_locked(msg, 0);
 	return rc;
 }
@@ -1702,7 +1794,6 @@
 		      hdr->msg.reply.dst_wmd.wh_object_cookie,
 		      hdr->payload_length);
 	}
-
 }
 
 int
@@ -1765,20 +1856,20 @@
 
 	if (the_lnet.ln_routing &&
 	    ni->ni_last_alive != ktime_get_real_seconds()) {
-		lnet_ni_lock(ni);
-
 		/* NB: so far here is the only place to set NI status to "up */
+		lnet_ni_lock(ni);
 		ni->ni_last_alive = ktime_get_real_seconds();
-		if (ni->ni_status != NULL &&
+		if (ni->ni_status &&
 		    ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
 			ni->ni_status->ns_status = LNET_NI_STATUS_UP;
 		lnet_ni_unlock(ni);
 	}
 
-	/* Regard a bad destination NID as a protocol error.  Senders should
+	/*
+	 * Regard a bad destination NID as a protocol error.  Senders should
 	 * know what they're doing; if they don't they're misconfigured, buggy
-	 * or malicious so we chop them off at the knees :) */
-
+	 * or malicious so we chop them off at the knees :)
+	 */
 	if (!for_me) {
 		if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
 			/* should have gone direct */
@@ -1790,8 +1881,10 @@
 		}
 
 		if (lnet_islocalnid(dest_nid)) {
-			/* dest is another local NI; sender should have used
-			 * this node's NID on its own network */
+			/*
+			 * dest is another local NI; sender should have used
+			 * this node's NID on its own network
+			 */
 			CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
 			       libcfs_nid2str(from_nid),
 			       libcfs_nid2str(src_nid),
@@ -1816,9 +1909,10 @@
 		}
 	}
 
-	/* Message looks OK; we're not going to return an error, so we MUST
-	 * call back lnd_recv() come what may... */
-
+	/*
+	 * Message looks OK; we're not going to return an error, so we MUST
+	 * call back lnd_recv() come what may...
+	 */
 	if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
 	    fail_peer(src_nid, 0)) {	     /* shall we now? */
 		CERROR("%s, src %s: Dropping %s to simulate failure\n",
@@ -1828,7 +1922,7 @@
 	}
 
 	msg = lnet_msg_alloc();
-	if (msg == NULL) {
+	if (!msg) {
 		CERROR("%s, src %s: Dropping %s (out of memory)\n",
 		       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
 		       lnet_msgtyp2str(type));
@@ -1838,11 +1932,11 @@
 	/* msg zeroed in lnet_msg_alloc;
 	 * i.e. flags all clear, pointers NULL etc
 	 */
-
 	msg->msg_type = type;
 	msg->msg_private = private;
 	msg->msg_receiving = 1;
-	msg->msg_len = msg->msg_wanted = payload_length;
+	msg->msg_wanted = payload_length;
+	msg->msg_len = payload_length;
 	msg->msg_offset = 0;
 	msg->msg_hdr = *hdr;
 	/* for building message event */
@@ -1864,7 +1958,7 @@
 
 	lnet_net_lock(cpt);
 	rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
-	if (rc != 0) {
+	if (rc) {
 		lnet_net_unlock(cpt);
 		CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
 		       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
@@ -1894,7 +1988,8 @@
 
 		if (rc < 0)
 			goto free_drop;
-		if (rc == 0) {
+
+		if (rc == LNET_CREDIT_OK) {
 			lnet_ni_recv(ni, msg->msg_private, msg, 0,
 				     0, payload_length, payload_length);
 		}
@@ -1922,13 +2017,13 @@
 		goto free_drop;  /* prevent an unused label if !kernel */
 	}
 
-	if (rc == 0)
+	if (!rc)
 		return 0;
 
 	LASSERT(rc == ENOENT);
 
  free_drop:
-	LASSERT(msg->msg_md == NULL);
+	LASSERT(!msg->msg_md);
 	lnet_finalize(ni, msg, rc);
 
  drop:
@@ -1950,9 +2045,9 @@
 		id.nid = msg->msg_hdr.src_nid;
 		id.pid = msg->msg_hdr.src_pid;
 
-		LASSERT(msg->msg_md == NULL);
+		LASSERT(!msg->msg_md);
 		LASSERT(msg->msg_rx_delayed);
-		LASSERT(msg->msg_rxpeer != NULL);
+		LASSERT(msg->msg_rxpeer);
 		LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
 
 		CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
@@ -1962,10 +2057,11 @@
 		      msg->msg_hdr.msg.put.offset,
 		      msg->msg_hdr.payload_length, reason);
 
-		/* NB I can't drop msg's ref on msg_rxpeer until after I've
+		/*
+		 * NB I can't drop msg's ref on msg_rxpeer until after I've
 		 * called lnet_drop_message(), so I just hang onto msg as well
-		 * until that's done */
-
+		 * until that's done
+		 */
 		lnet_drop_message(msg->msg_rxpeer->lp_ni,
 				  msg->msg_rxpeer->lp_cpt,
 				  msg->msg_private, msg->msg_len);
@@ -1988,15 +2084,16 @@
 		msg = list_entry(head->next, lnet_msg_t, msg_list);
 		list_del(&msg->msg_list);
 
-		/* md won't disappear under me, since each msg
-		 * holds a ref on it */
-
+		/*
+		 * md won't disappear under me, since each msg
+		 * holds a ref on it
+		 */
 		id.nid = msg->msg_hdr.src_nid;
 		id.pid = msg->msg_hdr.src_pid;
 
 		LASSERT(msg->msg_rx_delayed);
-		LASSERT(msg->msg_md != NULL);
-		LASSERT(msg->msg_rxpeer != NULL);
+		LASSERT(msg->msg_md);
+		LASSERT(msg->msg_rxpeer);
 		LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
 
 		CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
@@ -2064,7 +2161,6 @@
 	int cpt;
 	int rc;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
@@ -2075,7 +2171,7 @@
 	}
 
 	msg = lnet_msg_alloc();
-	if (msg == NULL) {
+	if (!msg) {
 		CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
 		       libcfs_id2str(target));
 		return -ENOMEM;
@@ -2086,11 +2182,11 @@
 	lnet_res_lock(cpt);
 
 	md = lnet_handle2md(&mdh);
-	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+	if (!md || !md->md_threshold || md->md_me) {
 		CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
 		       match_bits, portal, libcfs_id2str(target),
-		       md == NULL ? -1 : md->md_threshold);
-		if (md != NULL && md->md_me != NULL)
+		       !md ? -1 : md->md_threshold);
+		if (md && md->md_me)
 			CERROR("Source MD also attached to portal %d\n",
 			       md->md_me->me_portal);
 		lnet_res_unlock(cpt);
@@ -2128,9 +2224,9 @@
 	lnet_build_msg_event(msg, LNET_EVENT_SEND);
 
 	rc = lnet_send(self, msg, LNET_NID_ANY);
-	if (rc != 0) {
+	if (rc) {
 		CNETERR("Error sending PUT to %s: %d\n",
-		       libcfs_id2str(target), rc);
+			libcfs_id2str(target), rc);
 		lnet_finalize(NULL, msg, rc);
 	}
 
@@ -2142,13 +2238,14 @@
 lnet_msg_t *
 lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
 {
-	/* The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
+	/*
+	 * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
 	 * returns a msg for the LND to pass to lnet_finalize() when the sink
 	 * data has been received.
 	 *
 	 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
-	 * lnet_finalize() is called on it, so the LND must call this first */
-
+	 * lnet_finalize() is called on it, so the LND must call this first
+	 */
 	struct lnet_msg *msg = lnet_msg_alloc();
 	struct lnet_libmd *getmd = getmsg->msg_md;
 	lnet_process_id_t peer_id = getmsg->msg_target;
@@ -2157,26 +2254,26 @@
 	LASSERT(!getmsg->msg_target_is_router);
 	LASSERT(!getmsg->msg_routing);
 
+	if (!msg) {
+		CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
+		       libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
+		goto drop;
+	}
+
 	cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
 	lnet_res_lock(cpt);
 
 	LASSERT(getmd->md_refcount > 0);
 
-	if (msg == NULL) {
-		CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
-			libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
-		goto drop;
-	}
-
-	if (getmd->md_threshold == 0) {
+	if (!getmd->md_threshold) {
 		CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
-			libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
-			getmd);
+		       libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
+		       getmd);
 		lnet_res_unlock(cpt);
 		goto drop;
 	}
 
-	LASSERT(getmd->md_offset == 0);
+	LASSERT(!getmd->md_offset);
 
 	CDEBUG(D_NET, "%s: Reply from %s md %p\n",
 	       libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
@@ -2209,7 +2306,7 @@
 	the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
 	lnet_net_unlock(cpt);
 
-	if (msg != NULL)
+	if (msg)
 		lnet_msg_free(msg);
 
 	return NULL;
@@ -2219,14 +2316,18 @@
 void
 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
 {
-	/* Set the REPLY length, now the RDMA that elides the REPLY message has
-	 * completed and I know it. */
-	LASSERT(reply != NULL);
+	/*
+	 * Set the REPLY length, now the RDMA that elides the REPLY message has
+	 * completed and I know it.
+	 */
+	LASSERT(reply);
 	LASSERT(reply->msg_type == LNET_MSG_GET);
 	LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
 
-	/* NB I trusted my peer to RDMA.  If she tells me she's written beyond
-	 * the end of my buffer, I might as well be dead. */
+	/*
+	 * NB I trusted my peer to RDMA.  If she tells me she's written beyond
+	 * the end of my buffer, I might as well be dead.
+	 */
 	LASSERT(len <= reply->msg_ev.mlength);
 
 	reply->msg_ev.mlength = len;
@@ -2264,7 +2365,6 @@
 	int cpt;
 	int rc;
 
-	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
@@ -2275,7 +2375,7 @@
 	}
 
 	msg = lnet_msg_alloc();
-	if (msg == NULL) {
+	if (!msg) {
 		CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
 		       libcfs_id2str(target));
 		return -ENOMEM;
@@ -2285,11 +2385,11 @@
 	lnet_res_lock(cpt);
 
 	md = lnet_handle2md(&mdh);
-	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+	if (!md || !md->md_threshold || md->md_me) {
 		CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
 		       match_bits, portal, libcfs_id2str(target),
-		       md == NULL ? -1 : md->md_threshold);
-		if (md != NULL && md->md_me != NULL)
+		       !md ? -1 : md->md_threshold);
+		if (md && md->md_me)
 			CERROR("REPLY MD also attached to portal %d\n",
 			       md->md_me->me_portal);
 
@@ -2323,7 +2423,7 @@
 	rc = lnet_send(self, msg, LNET_NID_ANY);
 	if (rc < 0) {
 		CNETERR("Error sending GET to %s: %d\n",
-		       libcfs_id2str(target), rc);
+			libcfs_id2str(target), rc);
 		lnet_finalize(NULL, msg, rc);
 	}
 
@@ -2358,12 +2458,12 @@
 	__u32 order = 2;
 	struct list_head *rn_list;
 
-	/* if !local_nid_dist_zero, I don't return a distance of 0 ever
+	/*
+	 * if !local_nid_dist_zero, I don't return a distance of 0 ever
 	 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
 	 * keep order 0 free for 0@lo and order 1 free for a local NID
-	 * match */
-
-	LASSERT(the_lnet.ln_init);
+	 * match
+	 */
 	LASSERT(the_lnet.ln_refcount > 0);
 
 	cpt = lnet_net_lock_current();
@@ -2372,9 +2472,9 @@
 		ni = list_entry(e, lnet_ni_t, ni_list);
 
 		if (ni->ni_nid == dstnid) {
-			if (srcnidp != NULL)
+			if (srcnidp)
 				*srcnidp = dstnid;
-			if (orderp != NULL) {
+			if (orderp) {
 				if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
 					*orderp = 0;
 				else
@@ -2386,9 +2486,9 @@
 		}
 
 		if (LNET_NIDNET(ni->ni_nid) == dstnet) {
-			if (srcnidp != NULL)
+			if (srcnidp)
 				*srcnidp = ni->ni_nid;
-			if (orderp != NULL)
+			if (orderp)
 				*orderp = order;
 			lnet_net_unlock(cpt);
 			return 1;
@@ -2408,17 +2508,17 @@
 			LASSERT(!list_empty(&rnet->lrn_routes));
 
 			list_for_each_entry(route, &rnet->lrn_routes,
-						lr_list) {
-				if (shortest == NULL ||
+					    lr_list) {
+				if (!shortest ||
 				    route->lr_hops < shortest->lr_hops)
 					shortest = route;
 			}
 
-			LASSERT(shortest != NULL);
+			LASSERT(shortest);
 			hops = shortest->lr_hops;
-			if (srcnidp != NULL)
+			if (srcnidp)
 				*srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
-			if (orderp != NULL)
+			if (orderp)
 				*orderp = order;
 			lnet_net_unlock(cpt);
 			return hops + 1;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 43977e8d..c372390 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -74,7 +74,6 @@
 		ev->initiator.nid = LNET_NID_ANY;
 		ev->initiator.pid = the_lnet.ln_pid;
 		ev->sender        = LNET_NID_ANY;
-
 	} else {
 		/* event for passive message */
 		ev->target.pid    = hdr->dest_pid;
@@ -173,7 +172,7 @@
 	lnet_event_t *ev = &msg->msg_ev;
 
 	LASSERT(msg->msg_tx_committed);
-	if (status != 0)
+	if (status)
 		goto out;
 
 	counters = the_lnet.ln_counters[msg->msg_tx_cpt];
@@ -181,7 +180,7 @@
 	default: /* routed message */
 		LASSERT(msg->msg_routing);
 		LASSERT(msg->msg_rx_committed);
-		LASSERT(ev->type == 0);
+		LASSERT(!ev->type);
 
 		counters->route_length += msg->msg_len;
 		counters->route_count++;
@@ -203,8 +202,10 @@
 
 	case LNET_EVENT_GET:
 		LASSERT(msg->msg_rx_committed);
-		/* overwritten while sending reply, we should never be
-		 * here for optimized GET */
+		/*
+		 * overwritten while sending reply, we should never be
+		 * here for optimized GET
+		 */
 		LASSERT(msg->msg_type == LNET_MSG_REPLY);
 		msg->msg_type = LNET_MSG_GET; /* fix type */
 		break;
@@ -225,13 +226,13 @@
 	LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
 	LASSERT(msg->msg_rx_committed);
 
-	if (status != 0)
+	if (status)
 		goto out;
 
 	counters = the_lnet.ln_counters[msg->msg_rx_cpt];
 	switch (ev->type) {
 	default:
-		LASSERT(ev->type == 0);
+		LASSERT(!ev->type);
 		LASSERT(msg->msg_routing);
 		goto out;
 
@@ -240,10 +241,12 @@
 		break;
 
 	case LNET_EVENT_GET:
-		/* type is "REPLY" if it's an optimized GET on passive side,
+		/*
+		 * type is "REPLY" if it's an optimized GET on passive side,
 		 * because optimized GET will never be committed for sending,
 		 * so message type wouldn't be changed back to "GET" by
-		 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
+		 * lnet_msg_decommit_tx(), see details in lnet_parse_get()
+		 */
 		LASSERT(msg->msg_type == LNET_MSG_REPLY ||
 			msg->msg_type == LNET_MSG_GET);
 		counters->send_length += msg->msg_wanted;
@@ -254,8 +257,10 @@
 		break;
 
 	case LNET_EVENT_REPLY:
-		/* type is "GET" if it's an optimized GET on active side,
-		 * see details in lnet_create_reply_msg() */
+		/*
+		 * type is "GET" if it's an optimized GET on active side,
+		 * see details in lnet_create_reply_msg()
+		 */
 		LASSERT(msg->msg_type == LNET_MSG_GET ||
 			msg->msg_type == LNET_MSG_REPLY);
 		break;
@@ -309,10 +314,12 @@
 		   unsigned int offset, unsigned int mlen)
 {
 	/* NB: @offset and @len are only useful for receiving */
-	/* Here, we attach the MD on lnet_msg and mark it busy and
+	/*
+	 * Here, we attach the MD on lnet_msg and mark it busy and
 	 * decrementing its threshold. Come what may, the lnet_msg "owns"
 	 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
-	 * signals completion. */
+	 * signals completion.
+	 */
 	LASSERT(!msg->msg_routing);
 
 	msg->msg_md = md;
@@ -343,7 +350,7 @@
 	LASSERT(md->md_refcount >= 0);
 
 	unlink = lnet_md_unlinkable(md);
-	if (md->md_eq != NULL) {
+	if (md->md_eq) {
 		msg->msg_ev.status   = status;
 		msg->msg_ev.unlinked = unlink;
 		lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
@@ -364,7 +371,7 @@
 
 	LASSERT(msg->msg_onactivelist);
 
-	if (status == 0 && msg->msg_ack) {
+	if (!status && msg->msg_ack) {
 		/* Only send an ACK if the PUT completed successfully */
 
 		lnet_msg_decommit(msg, cpt, 0);
@@ -383,8 +390,10 @@
 		msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
 		msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
 
-		/* NB: we probably want to use NID of msg::msg_from as 3rd
-		 * parameter (router NID) if it's routed message */
+		/*
+		 * NB: we probably want to use NID of msg::msg_from as 3rd
+		 * parameter (router NID) if it's routed message
+		 */
 		rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
 
 		lnet_net_lock(cpt);
@@ -401,7 +410,7 @@
 		 */
 		return rc;
 
-	} else if (status == 0 &&	/* OK so far */
+	} else if (!status &&	/* OK so far */
 		   (msg->msg_routing && !msg->msg_sending)) {
 		/* not forwarded */
 		LASSERT(!msg->msg_receiving);	/* called back recv already */
@@ -442,7 +451,7 @@
 
 	LASSERT(!in_interrupt());
 
-	if (msg == NULL)
+	if (!msg)
 		return;
 #if 0
 	CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
@@ -458,12 +467,12 @@
 	       msg->msg_rtrcredit ? "F" : "",
 	       msg->msg_peerrtrcredit ? "f" : "",
 	       msg->msg_onactivelist ? "!" : "",
-	       msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
-	       msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
+	       !msg->msg_txpeer ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
+	       !msg->msg_rxpeer ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
 #endif
 	msg->msg_ev.status = status;
 
-	if (msg->msg_md != NULL) {
+	if (msg->msg_md) {
 		cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
 
 		lnet_res_lock(cpt);
@@ -491,15 +500,16 @@
 	container = the_lnet.ln_msg_containers[cpt];
 	list_add_tail(&msg->msg_list, &container->msc_finalizing);
 
-	/* Recursion breaker.  Don't complete the message here if I am (or
-	 * enough other threads are) already completing messages */
-
+	/*
+	 * Recursion breaker.  Don't complete the message here if I am (or
+	 * enough other threads are) already completing messages
+	 */
 	my_slot = -1;
 	for (i = 0; i < container->msc_nfinalizers; i++) {
 		if (container->msc_finalizers[i] == current)
 			break;
 
-		if (my_slot < 0 && container->msc_finalizers[i] == NULL)
+		if (my_slot < 0 && !container->msc_finalizers[i])
 			my_slot = i;
 	}
 
@@ -512,21 +522,23 @@
 
 	while (!list_empty(&container->msc_finalizing)) {
 		msg = list_entry(container->msc_finalizing.next,
-				     lnet_msg_t, msg_list);
+				 lnet_msg_t, msg_list);
 
 		list_del(&msg->msg_list);
 
-		/* NB drops and regains the lnet lock if it actually does
-		 * anything, so my finalizing friends can chomp along too */
+		/*
+		 * NB drops and regains the lnet lock if it actually does
+		 * anything, so my finalizing friends can chomp along too
+		 */
 		rc = lnet_complete_msg_locked(msg, cpt);
-		if (rc != 0)
+		if (rc)
 			break;
 	}
 
 	container->msc_finalizers[my_slot] = NULL;
 	lnet_net_unlock(cpt);
 
-	if (rc != 0)
+	if (rc)
 		goto again;
 }
 EXPORT_SYMBOL(lnet_finalize);
@@ -536,12 +548,12 @@
 {
 	int count = 0;
 
-	if (container->msc_init == 0)
+	if (!container->msc_init)
 		return;
 
 	while (!list_empty(&container->msc_active)) {
 		lnet_msg_t *msg = list_entry(container->msc_active.next,
-						 lnet_msg_t, msg_activelist);
+					     lnet_msg_t, msg_activelist);
 
 		LASSERT(msg->msg_onactivelist);
 		msg->msg_onactivelist = 0;
@@ -553,41 +565,23 @@
 	if (count > 0)
 		CERROR("%d active msg on exit\n", count);
 
-	if (container->msc_finalizers != NULL) {
+	if (container->msc_finalizers) {
 		LIBCFS_FREE(container->msc_finalizers,
 			    container->msc_nfinalizers *
 			    sizeof(*container->msc_finalizers));
 		container->msc_finalizers = NULL;
 	}
-#ifdef LNET_USE_LIB_FREELIST
-	lnet_freelist_fini(&container->msc_freelist);
-#endif
 	container->msc_init = 0;
 }
 
 int
 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
 {
-	int rc;
-
 	container->msc_init = 1;
 
 	INIT_LIST_HEAD(&container->msc_active);
 	INIT_LIST_HEAD(&container->msc_finalizing);
 
-#ifdef LNET_USE_LIB_FREELIST
-	memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
-
-	rc = lnet_freelist_init(&container->msc_freelist,
-				LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
-	if (rc != 0) {
-		CERROR("Failed to init freelist for message container\n");
-		lnet_msg_container_cleanup(container);
-		return rc;
-	}
-#else
-	rc = 0;
-#endif
 	/* number of CPUs */
 	container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
 
@@ -595,13 +589,13 @@
 			 container->msc_nfinalizers *
 			 sizeof(*container->msc_finalizers));
 
-	if (container->msc_finalizers == NULL) {
+	if (!container->msc_finalizers) {
 		CERROR("Failed to allocate message finalizers\n");
 		lnet_msg_container_cleanup(container);
 		return -ENOMEM;
 	}
 
-	return rc;
+	return 0;
 }
 
 void
@@ -610,7 +604,7 @@
 	struct lnet_msg_container *container;
 	int i;
 
-	if (the_lnet.ln_msg_containers == NULL)
+	if (!the_lnet.ln_msg_containers)
 		return;
 
 	cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
@@ -630,14 +624,14 @@
 	the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
 						      sizeof(*container));
 
-	if (the_lnet.ln_msg_containers == NULL) {
+	if (!the_lnet.ln_msg_containers) {
 		CERROR("Failed to allocate cpu-partition data for network\n");
 		return -ENOMEM;
 	}
 
 	cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
 		rc = lnet_msg_container_setup(container, i);
-		if (rc != 0) {
+		if (rc) {
 			lnet_msg_containers_destroy();
 			return rc;
 		}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index bd7b071..0281c6a1 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
@@ -50,7 +45,7 @@
 	struct lnet_portal *ptl = the_lnet.ln_portals[index];
 	int unique;
 
-	unique = ignore_bits == 0 &&
+	unique = !ignore_bits &&
 		 match_id.nid != LNET_NID_ANY &&
 		 match_id.pid != LNET_PID_ANY;
 
@@ -139,8 +134,10 @@
 lnet_try_match_md(lnet_libmd_t *md,
 		  struct lnet_match_info *info, struct lnet_msg *msg)
 {
-	/* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
-	 * lnet_match_blocked_msg() relies on this to avoid races */
+	/*
+	 * ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
+	 * lnet_match_blocked_msg() relies on this to avoid races
+	 */
 	unsigned int offset;
 	unsigned int mlength;
 	lnet_me_t *me = md->md_me;
@@ -150,7 +147,7 @@
 		return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
 
 	/* mismatched MD op */
-	if ((md->md_options & info->mi_opc) == 0)
+	if (!(md->md_options & info->mi_opc))
 		return LNET_MATCHMD_NONE;
 
 	/* mismatched ME nid/pid? */
@@ -163,17 +160,17 @@
 		return LNET_MATCHMD_NONE;
 
 	/* mismatched ME matchbits? */
-	if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
+	if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits)
 		return LNET_MATCHMD_NONE;
 
 	/* Hurrah! This _is_ a match; check it out... */
 
-	if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
+	if (!(md->md_options & LNET_MD_MANAGE_REMOTE))
 		offset = md->md_offset;
 	else
 		offset = info->mi_roffset;
 
-	if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
+	if (md->md_options & LNET_MD_MAX_SIZE) {
 		mlength = md->md_max_size;
 		LASSERT(md->md_offset + mlength <= md->md_length);
 	} else {
@@ -182,7 +179,7 @@
 
 	if (info->mi_rlength <= mlength) {	/* fits in allowed space */
 		mlength = info->mi_rlength;
-	} else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
+	} else if (!(md->md_options & LNET_MD_TRUNCATE)) {
 		/* this packet _really_ is too big */
 		CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
 		       libcfs_id2str(info->mi_id), info->mi_mbits,
@@ -203,10 +200,12 @@
 	if (!lnet_md_exhausted(md))
 		return LNET_MATCHMD_OK;
 
-	/* Auto-unlink NOW, so the ME gets unlinked if required.
+	/*
+	 * Auto-unlink NOW, so the ME gets unlinked if required.
 	 * We bumped md->md_refcount above so the MD just gets flagged
-	 * for unlink when it is finalized. */
-	if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
+	 * for unlink when it is finalized.
+	 */
+	if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK)
 		lnet_md_unlink(md);
 
 	return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
@@ -239,7 +238,7 @@
 	ptl = the_lnet.ln_portals[index];
 
 	mtable = lnet_match2mt(ptl, id, mbits);
-	if (mtable != NULL) /* unique portal or only one match-table */
+	if (mtable) /* unique portal or only one match-table */
 		return mtable;
 
 	/* it's a wildcard portal */
@@ -248,8 +247,10 @@
 		return NULL;
 	case LNET_INS_BEFORE:
 	case LNET_INS_AFTER:
-		/* posted by no affinity thread, always hash to specific
-		 * match-table to avoid buffer stealing which is heavy */
+		/*
+		 * posted by no affinity thread, always hash to specific
+		 * match-table to avoid buffer stealing which is heavy
+		 */
 		return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
 	case LNET_INS_LOCAL:
 		/* posted by cpu-affinity thread */
@@ -274,7 +275,7 @@
 	LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
 
 	mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits);
-	if (mtable != NULL)
+	if (mtable)
 		return mtable;
 
 	/* it's a wildcard portal */
@@ -298,10 +299,12 @@
 		/* is there any active entry for this portal? */
 		nmaps = ptl->ptl_mt_nmaps;
 		/* map to an active mtable to avoid heavy "stealing" */
-		if (nmaps != 0) {
-			/* NB: there is possibility that ptl_mt_maps is being
+		if (nmaps) {
+			/*
+			 * NB: there is possibility that ptl_mt_maps is being
 			 * changed because we are not under protection of
-			 * lnet_ptl_lock, but it shouldn't hurt anything */
+			 * lnet_ptl_lock, but it shouldn't hurt anything
+			 */
 			cpt = ptl->ptl_mt_maps[rotor % nmaps];
 		}
 	}
@@ -331,7 +334,7 @@
 	bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
 	pos &= (1 << LNET_MT_BITS_U64) - 1;
 
-	return ((*bmap) & (1ULL << pos)) != 0;
+	return (*bmap & (1ULL << pos));
 }
 
 static void
@@ -391,18 +394,20 @@
 
 	list_for_each_entry_safe(me, tmp, head, me_list) {
 		/* ME attached but MD not attached yet */
-		if (me->me_md == NULL)
+		if (!me->me_md)
 			continue;
 
 		LASSERT(me == me->me_md->md_me);
 
 		rc = lnet_try_match_md(me->me_md, info, msg);
-		if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
+		if (!(rc & LNET_MATCHMD_EXHAUSTED))
 			exhausted = 0; /* mlist is not empty */
 
-		if ((rc & LNET_MATCHMD_FINISH) != 0) {
-			/* don't return EXHAUSTED bit because we don't know
-			 * whether the mlist is empty or not */
+		if (rc & LNET_MATCHMD_FINISH) {
+			/*
+			 * don't return EXHAUSTED bit because we don't know
+			 * whether the mlist is empty or not
+			 */
 			return rc & ~LNET_MATCHMD_EXHAUSTED;
 		}
 	}
@@ -413,7 +418,7 @@
 			exhausted = 0;
 	}
 
-	if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
+	if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
 		head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
 		goto again; /* re-check MEs w/o ignore-bits */
 	}
@@ -430,8 +435,10 @@
 {
 	int rc;
 
-	/* message arrived before any buffer posting on this portal,
-	 * simply delay or drop this message */
+	/*
+	 * message arrived before any buffer posting on this portal,
+	 * simply delay or drop this message
+	 */
 	if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
 		return 0;
 
@@ -446,7 +453,7 @@
 		if (msg->msg_rx_ready_delay) {
 			msg->msg_rx_delayed = 1;
 			list_add_tail(&msg->msg_list,
-					  &ptl->ptl_msg_delayed);
+				      &ptl->ptl_msg_delayed);
 		}
 		rc = LNET_MATCHMD_NONE;
 	} else {
@@ -465,9 +472,11 @@
 	int rc = 0;
 	int i;
 
-	/* steal buffer from other CPTs, and delay it if nothing to steal,
+	/*
+	 * steal buffer from other CPTs, and delay it if nothing to steal,
 	 * this function is more expensive than a regular match, but we
-	 * don't expect it can happen a lot */
+	 * don't expect it can happen a lot
+	 */
 	LASSERT(lnet_ptl_is_wildcard(ptl));
 
 	for (i = 0; i < LNET_CPT_NUMBER; i++) {
@@ -476,37 +485,39 @@
 
 		cpt = (first + i) % LNET_CPT_NUMBER;
 		mtable = ptl->ptl_mtables[cpt];
-		if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
+		if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
 			continue;
 
 		lnet_res_lock(cpt);
 		lnet_ptl_lock(ptl);
 
-		if (i == 0) { /* the first try, attach on stealing list */
+		if (!i) { /* the first try, attach on stealing list */
 			list_add_tail(&msg->msg_list,
-					  &ptl->ptl_msg_stealing);
+				      &ptl->ptl_msg_stealing);
 		}
 
 		if (!list_empty(&msg->msg_list)) { /* on stealing list */
 			rc = lnet_mt_match_md(mtable, info, msg);
 
-			if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
+			if ((rc & LNET_MATCHMD_EXHAUSTED) &&
 			    mtable->mt_enabled)
 				lnet_ptl_disable_mt(ptl, cpt);
 
-			if ((rc & LNET_MATCHMD_FINISH) != 0)
+			if (rc & LNET_MATCHMD_FINISH)
 				list_del_init(&msg->msg_list);
 
 		} else {
-			/* could be matched by lnet_ptl_attach_md()
-			 * which is called by another thread */
-			rc = msg->msg_md == NULL ?
+			/*
+			 * could be matched by lnet_ptl_attach_md()
+			 * which is called by another thread
+			 */
+			rc = !msg->msg_md ?
 			     LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
 		}
 
 		if (!list_empty(&msg->msg_list) && /* not matched yet */
 		    (i == LNET_CPT_NUMBER - 1 || /* the last CPT */
-		     ptl->ptl_mt_nmaps == 0 ||   /* no active CPT */
+		     !ptl->ptl_mt_nmaps ||   /* no active CPT */
 		     (ptl->ptl_mt_nmaps == 1 &&  /* the only active CPT */
 		      ptl->ptl_mt_maps[0] == cpt))) {
 			/* nothing to steal, delay or drop */
@@ -515,7 +526,7 @@
 			if (lnet_ptl_is_lazy(ptl)) {
 				msg->msg_rx_delayed = 1;
 				list_add_tail(&msg->msg_list,
-						  &ptl->ptl_msg_delayed);
+					      &ptl->ptl_msg_delayed);
 				rc = LNET_MATCHMD_NONE;
 			} else {
 				rc = LNET_MATCHMD_DROP;
@@ -525,7 +536,7 @@
 		lnet_ptl_unlock(ptl);
 		lnet_res_unlock(cpt);
 
-		if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
+		if ((rc & LNET_MATCHMD_FINISH) || msg->msg_rx_delayed)
 			break;
 	}
 
@@ -551,7 +562,7 @@
 
 	ptl = the_lnet.ln_portals[info->mi_portal];
 	rc = lnet_ptl_match_early(ptl, msg);
-	if (rc != 0) /* matched or delayed early message */
+	if (rc) /* matched or delayed early message */
 		return rc;
 
 	mtable = lnet_mt_of_match(info, msg);
@@ -563,13 +574,13 @@
 	}
 
 	rc = lnet_mt_match_md(mtable, info, msg);
-	if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
+	if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) {
 		lnet_ptl_lock(ptl);
 		lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
 		lnet_ptl_unlock(ptl);
 	}
 
-	if ((rc & LNET_MATCHMD_FINISH) != 0)	/* matched or dropping */
+	if (rc & LNET_MATCHMD_FINISH)	/* matched or dropping */
 		goto out1;
 
 	if (!msg->msg_rx_ready_delay)
@@ -630,7 +641,7 @@
 	int exhausted = 0;
 	int cpt;
 
-	LASSERT(md->md_refcount == 0); /* a brand new MD */
+	LASSERT(!md->md_refcount); /* a brand new MD */
 
 	me->me_md = md;
 	md->md_me = me;
@@ -664,15 +675,15 @@
 
 		rc = lnet_try_match_md(md, &info, msg);
 
-		exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
-		if ((rc & LNET_MATCHMD_NONE) != 0) {
+		exhausted = (rc & LNET_MATCHMD_EXHAUSTED);
+		if (rc & LNET_MATCHMD_NONE) {
 			if (exhausted)
 				break;
 			continue;
 		}
 
 		/* Hurrah! This _is_ a match */
-		LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
+		LASSERT(rc & LNET_MATCHMD_FINISH);
 		list_del_init(&msg->msg_list);
 
 		if (head == &ptl->ptl_msg_stealing) {
@@ -682,7 +693,7 @@
 			continue;
 		}
 
-		if ((rc & LNET_MATCHMD_OK) != 0) {
+		if (rc & LNET_MATCHMD_OK) {
 			list_add_tail(&msg->msg_list, matches);
 
 			CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
@@ -717,7 +728,7 @@
 	struct lnet_match_table	*mtable;
 	int i;
 
-	if (ptl->ptl_mtables == NULL) /* uninitialized portal */
+	if (!ptl->ptl_mtables) /* uninitialized portal */
 		return;
 
 	LASSERT(list_empty(&ptl->ptl_msg_delayed));
@@ -727,7 +738,7 @@
 		lnet_me_t *me;
 		int j;
 
-		if (mtable->mt_mhash == NULL) /* uninitialized match-table */
+		if (!mtable->mt_mhash) /* uninitialized match-table */
 			continue;
 
 		mhash = mtable->mt_mhash;
@@ -735,7 +746,7 @@
 		for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
 			while (!list_empty(&mhash[j])) {
 				me = list_entry(mhash[j].next,
-						    lnet_me_t, me_list);
+						lnet_me_t, me_list);
 				CERROR("Active ME %p on exit\n", me);
 				list_del(&me->me_list);
 				lnet_me_free(me);
@@ -759,7 +770,7 @@
 
 	ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
 					    sizeof(struct lnet_match_table));
-	if (ptl->ptl_mtables == NULL) {
+	if (!ptl->ptl_mtables) {
 		CERROR("Failed to create match table for portal %d\n", index);
 		return -ENOMEM;
 	}
@@ -772,7 +783,7 @@
 		/* the extra entry is for MEs with ignore bits */
 		LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
 				 sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
-		if (mhash == NULL) {
+		if (!mhash) {
 			CERROR("Failed to create match hash for portal %d\n",
 			       index);
 			goto failed;
@@ -800,7 +811,7 @@
 {
 	int i;
 
-	if (the_lnet.ln_portals == NULL)
+	if (!the_lnet.ln_portals)
 		return;
 
 	for (i = 0; i < the_lnet.ln_nportals; i++)
@@ -820,7 +831,7 @@
 
 	the_lnet.ln_nportals = MAX_PORTALS;
 	the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size);
-	if (the_lnet.ln_portals == NULL) {
+	if (!the_lnet.ln_portals) {
 		CERROR("Failed to allocate portals table\n");
 		return -ENOMEM;
 	}
@@ -886,17 +897,8 @@
 }
 EXPORT_SYMBOL(LNetSetLazyPortal);
 
-/**
- * Turn off the lazy portal attribute. Delayed requests on the portal,
- * if any, will be all dropped when this function returns.
- *
- * \param portal Index of the portal to disable the lazy attribute on.
- *
- * \retval 0       On success.
- * \retval -EINVAL If \a portal is not a valid index.
- */
 int
-LNetClearLazyPortal(int portal)
+lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason)
 {
 	struct lnet_portal *ptl;
 	LIST_HEAD(zombies);
@@ -915,21 +917,48 @@
 		return 0;
 	}
 
-	if (the_lnet.ln_shutdown)
-		CWARN("Active lazy portal %d on exit\n", portal);
-	else
-		CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
+	if (ni) {
+		struct lnet_msg *msg, *tmp;
 
-	/* grab all the blocked messages atomically */
-	list_splice_init(&ptl->ptl_msg_delayed, &zombies);
+		/* grab all messages which are on the NI passed in */
+		list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed,
+					 msg_list) {
+			if (msg->msg_rxpeer->lp_ni == ni)
+				list_move(&msg->msg_list, &zombies);
+		}
+	} else {
+		if (the_lnet.ln_shutdown)
+			CWARN("Active lazy portal %d on exit\n", portal);
+		else
+			CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
 
-	lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
+		/* grab all the blocked messages atomically */
+		list_splice_init(&ptl->ptl_msg_delayed, &zombies);
+
+		lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
+	}
 
 	lnet_ptl_unlock(ptl);
 	lnet_res_unlock(LNET_LOCK_EX);
 
-	lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr");
+	lnet_drop_delayed_msg_list(&zombies, reason);
 
 	return 0;
 }
+
+/**
+ * Turn off the lazy portal attribute. Delayed requests on the portal,
+ * if any, will be all dropped when this function returns.
+ *
+ * \param portal Index of the portal to disable the lazy attribute on.
+ *
+ * \retval 0       On success.
+ * \retval -EINVAL If \a portal is not a valid index.
+ */
+int
+LNetClearLazyPortal(int portal)
+{
+	return lnet_clear_lazy_portal(NULL, portal,
+				      "Clearing lazy portal attr");
+}
 EXPORT_SYMBOL(LNetClearLazyPortal);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 589ecc8..88905d5 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -64,7 +64,7 @@
 	int rc;
 
 	rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create socket: %d\n", rc);
 		return rc;
 	}
@@ -101,12 +101,12 @@
 
 	strcpy(ifr.ifr_name, name);
 	rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't get flags for interface %s\n", name);
 		return rc;
 	}
 
-	if ((ifr.ifr_flags & IFF_UP) == 0) {
+	if (!(ifr.ifr_flags & IFF_UP)) {
 		CDEBUG(D_NET, "Interface %s down\n", name);
 		*up = 0;
 		*ip = *mask = 0;
@@ -117,7 +117,7 @@
 	strcpy(ifr.ifr_name, name);
 	ifr.ifr_addr.sa_family = AF_INET;
 	rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't get IP address for interface %s\n", name);
 		return rc;
 	}
@@ -128,7 +128,7 @@
 	strcpy(ifr.ifr_name, name);
 	ifr.ifr_addr.sa_family = AF_INET;
 	rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't get netmask for interface %s\n", name);
 		return rc;
 	}
@@ -159,13 +159,13 @@
 	for (;;) {
 		if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
 			toobig = 1;
-			nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+			nalloc = PAGE_CACHE_SIZE / sizeof(*ifr);
 			CWARN("Too many interfaces: only enumerating first %d\n",
 			      nalloc);
 		}
 
 		LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
-		if (ifr == NULL) {
+		if (!ifr) {
 			CERROR("ENOMEM enumerating up to %d interfaces\n",
 			       nalloc);
 			rc = -ENOMEM;
@@ -181,9 +181,9 @@
 			goto out1;
 		}
 
-		LASSERT(rc == 0);
+		LASSERT(!rc);
 
-		nfound = ifc.ifc_len/sizeof(*ifr);
+		nfound = ifc.ifc_len / sizeof(*ifr);
 		LASSERT(nfound <= nalloc);
 
 		if (nfound < nalloc || toobig)
@@ -193,11 +193,11 @@
 		nalloc *= 2;
 	}
 
-	if (nfound == 0)
+	if (!nfound)
 		goto out1;
 
 	LIBCFS_ALLOC(names, nfound * sizeof(*names));
-	if (names == NULL) {
+	if (!names) {
 		rc = -ENOMEM;
 		goto out1;
 	}
@@ -213,7 +213,7 @@
 		}
 
 		LIBCFS_ALLOC(names[i], IFNAMSIZ);
-		if (names[i] == NULL) {
+		if (!names[i]) {
 			rc = -ENOMEM;
 			goto out2;
 		}
@@ -242,7 +242,7 @@
 
 	LASSERT(n > 0);
 
-	for (i = 0; i < n && names[i] != NULL; i++)
+	for (i = 0; i < n && names[i]; i++)
 		LIBCFS_FREE(names[i], IFNAMSIZ);
 
 	LIBCFS_FREE(names, n * sizeof(*names));
@@ -258,19 +258,20 @@
 	struct timeval tv;
 
 	LASSERT(nob > 0);
-	/* Caller may pass a zero timeout if she thinks the socket buffer is
-	 * empty enough to take the whole message immediately */
-
+	/*
+	 * Caller may pass a zero timeout if she thinks the socket buffer is
+	 * empty enough to take the whole message immediately
+	 */
 	for (;;) {
 		struct kvec  iov = {
 			.iov_base = buffer,
 			.iov_len  = nob
 		};
 		struct msghdr msg = {
-			.msg_flags      = (timeout == 0) ? MSG_DONTWAIT : 0
+			.msg_flags      = !timeout ? MSG_DONTWAIT : 0
 		};
 
-		if (timeout != 0) {
+		if (timeout) {
 			/* Set send timeout to remaining time */
 			tv = (struct timeval) {
 				.tv_sec = ticks / HZ,
@@ -278,7 +279,7 @@
 			};
 			rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
 					       (char *)&tv, sizeof(tv));
-			if (rc != 0) {
+			if (rc) {
 				CERROR("Can't set socket send timeout %ld.%06d: %d\n",
 				       (long)tv.tv_sec, (int)tv.tv_usec, rc);
 				return rc;
@@ -295,7 +296,7 @@
 		if (rc < 0)
 			return rc;
 
-		if (rc == 0) {
+		if (!rc) {
 			CERROR("Unexpected zero rc\n");
 			return -ECONNABORTED;
 		}
@@ -337,7 +338,7 @@
 		};
 		rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
 				       (char *)&tv, sizeof(tv));
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
 			       (long)tv.tv_sec, (int)tv.tv_usec, rc);
 			return rc;
@@ -350,13 +351,13 @@
 		if (rc < 0)
 			return rc;
 
-		if (rc == 0)
+		if (!rc)
 			return -ECONNRESET;
 
 		buffer = ((char *)buffer) + rc;
 		nob -= rc;
 
-		if (nob == 0)
+		if (!nob)
 			return 0;
 
 		if (ticks <= 0)
@@ -379,7 +380,7 @@
 
 	rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
 	*sockp = sock;
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create socket: %d\n", rc);
 		return rc;
 	}
@@ -387,16 +388,16 @@
 	option = 1;
 	rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
 			       (char *)&option, sizeof(option));
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
 		goto failed;
 	}
 
-	if (local_ip != 0 || local_port != 0) {
+	if (local_ip || local_port) {
 		memset(&locaddr, 0, sizeof(locaddr));
 		locaddr.sin_family = AF_INET;
 		locaddr.sin_port = htons(local_port);
-		locaddr.sin_addr.s_addr = (local_ip == 0) ?
+		locaddr.sin_addr.s_addr = !local_ip ?
 					  INADDR_ANY : htonl(local_ip);
 
 		rc = kernel_bind(sock, (struct sockaddr *)&locaddr,
@@ -406,7 +407,7 @@
 			*fatal = 0;
 			goto failed;
 		}
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Error trying to bind to port %d: %d\n",
 			       local_port, rc);
 			goto failed;
@@ -425,22 +426,22 @@
 	int option;
 	int rc;
 
-	if (txbufsize != 0) {
+	if (txbufsize) {
 		option = txbufsize;
 		rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
 				       (char *)&option, sizeof(option));
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't set send buffer %d: %d\n",
 			       option, rc);
 			return rc;
 		}
 	}
 
-	if (rxbufsize != 0) {
+	if (rxbufsize) {
 		option = rxbufsize;
 		rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
-				      (char *)&option, sizeof(option));
-		if (rc != 0) {
+				       (char *)&option, sizeof(option));
+		if (rc) {
 			CERROR("Can't set receive buffer %d: %d\n",
 			       option, rc);
 			return rc;
@@ -461,16 +462,16 @@
 		rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len);
 	else
 		rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Error %d getting sock %s IP/port\n",
 		       rc, remote ? "peer" : "local");
 		return rc;
 	}
 
-	if (ip != NULL)
+	if (ip)
 		*ip = ntohl(sin.sin_addr.s_addr);
 
-	if (port != NULL)
+	if (port)
 		*port = ntohs(sin.sin_port);
 
 	return 0;
@@ -480,10 +481,10 @@
 int
 lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize)
 {
-	if (txbufsize != NULL)
+	if (txbufsize)
 		*txbufsize = sock->sk->sk_sndbuf;
 
-	if (rxbufsize != NULL)
+	if (rxbufsize)
 		*rxbufsize = sock->sk->sk_rcvbuf;
 
 	return 0;
@@ -498,7 +499,7 @@
 	int rc;
 
 	rc = lnet_sock_create(sockp, &fatal, local_ip, local_port);
-	if (rc != 0) {
+	if (rc) {
 		if (!fatal)
 			CERROR("Can't create socket: port %d already in use\n",
 			       local_port);
@@ -506,14 +507,13 @@
 	}
 
 	rc = kernel_listen(*sockp, backlog);
-	if (rc == 0)
+	if (!rc)
 		return 0;
 
 	CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
 	sock_release(*sockp);
 	return rc;
 }
-EXPORT_SYMBOL(lnet_sock_listen);
 
 int
 lnet_sock_accept(struct socket **newsockp, struct socket *sock)
@@ -524,8 +524,10 @@
 
 	init_waitqueue_entry(&wait, current);
 
-	/* XXX this should add a ref to sock->ops->owner, if
-	 * TCP could be a module */
+	/*
+	 * XXX this should add a ref to sock->ops->owner, if
+	 * TCP could be a module
+	 */
 	rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock);
 	if (rc) {
 		CERROR("Can't allocate socket\n");
@@ -545,7 +547,7 @@
 		rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
 	}
 
-	if (rc != 0)
+	if (rc)
 		goto failed;
 
 	*newsockp = newsock;
@@ -555,7 +557,6 @@
 	sock_release(newsock);
 	return rc;
 }
-EXPORT_SYMBOL(lnet_sock_accept);
 
 int
 lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
@@ -565,7 +566,7 @@
 	int rc;
 
 	rc = lnet_sock_create(sockp, fatal, local_ip, local_port);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	memset(&srvaddr, 0, sizeof(srvaddr));
@@ -575,13 +576,15 @@
 
 	rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr,
 			    sizeof(srvaddr), 0);
-	if (rc == 0)
+	if (!rc)
 		return 0;
 
-	/* EADDRNOTAVAIL probably means we're already connected to the same
+	/*
+	 * EADDRNOTAVAIL probably means we're already connected to the same
 	 * peer/port on the same local port on a differently typed
 	 * connection.  Let our caller retry with a different local
-	 * port... */
+	 * port...
+	 */
 	*fatal = !(rc == -EADDRNOTAVAIL);
 
 	CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET,
@@ -591,4 +594,3 @@
 	sock_release(*sockp);
 	return rc;
 }
-EXPORT_SYMBOL(lnet_sock_connect);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 2a137f4..468eda6 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -46,15 +46,15 @@
 
 static int
 lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
-	    int delayed, unsigned int niov,
-	    struct kvec *iov, lnet_kiov_t *kiov,
-	    unsigned int offset, unsigned int mlen, unsigned int rlen)
+	   int delayed, unsigned int niov,
+	   struct kvec *iov, lnet_kiov_t *kiov,
+	   unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
 	lnet_msg_t *sendmsg = private;
 
-	if (lntmsg != NULL) {		   /* not discarding */
-		if (sendmsg->msg_iov != NULL) {
-			if (iov != NULL)
+	if (lntmsg) {		   /* not discarding */
+		if (sendmsg->msg_iov) {
+			if (iov)
 				lnet_copy_iov2iov(niov, iov, offset,
 						  sendmsg->msg_niov,
 						  sendmsg->msg_iov,
@@ -65,7 +65,7 @@
 						   sendmsg->msg_iov,
 						   sendmsg->msg_offset, mlen);
 		} else {
-			if (iov != NULL)
+			if (iov)
 				lnet_copy_kiov2iov(niov, iov, offset,
 						   sendmsg->msg_niov,
 						   sendmsg->msg_kiov,
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index c93c007..e12fe37 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -36,6 +36,7 @@
 
 #define DEBUG_SUBSYSTEM S_LNET
 #include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
 
 static int config_on_load;
 module_param(config_on_load, int, 0444);
@@ -52,13 +53,21 @@
 	mutex_lock(&lnet_config_mutex);
 
 	if (!the_lnet.ln_niinit_self) {
-		rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+		rc = try_module_get(THIS_MODULE);
+
+		if (rc != 1)
+			goto out;
+
+		rc = LNetNIInit(LNET_PID_LUSTRE);
 		if (rc >= 0) {
 			the_lnet.ln_niinit_self = 1;
 			rc = 0;
+		} else {
+			module_put(THIS_MODULE);
 		}
 	}
 
+out:
 	mutex_unlock(&lnet_config_mutex);
 	return rc;
 }
@@ -73,6 +82,7 @@
 	if (the_lnet.ln_niinit_self) {
 		the_lnet.ln_niinit_self = 0;
 		LNetNIFini();
+		module_put(THIS_MODULE);
 	}
 
 	mutex_lock(&the_lnet.ln_api_mutex);
@@ -80,28 +90,93 @@
 	mutex_unlock(&the_lnet.ln_api_mutex);
 
 	mutex_unlock(&lnet_config_mutex);
-	return (refcount == 0) ? 0 : -EBUSY;
+	return !refcount ? 0 : -EBUSY;
 }
 
 static int
-lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
+lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
+{
+	struct lnet_ioctl_config_data *conf =
+		(struct lnet_ioctl_config_data *)hdr;
+	int rc;
+
+	if (conf->cfg_hdr.ioc_len < sizeof(*conf))
+		return -EINVAL;
+
+	mutex_lock(&lnet_config_mutex);
+	if (!the_lnet.ln_niinit_self) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+	rc = lnet_dyn_add_ni(LNET_PID_LUSTRE,
+			     conf->cfg_config_u.cfg_net.net_intf,
+			     conf->cfg_config_u.cfg_net.net_peer_timeout,
+			     conf->cfg_config_u.cfg_net.net_peer_tx_credits,
+			     conf->cfg_config_u.cfg_net.net_peer_rtr_credits,
+			     conf->cfg_config_u.cfg_net.net_max_tx_credits);
+out_unlock:
+	mutex_unlock(&lnet_config_mutex);
+
+	return rc;
+}
+
+static int
+lnet_dyn_unconfigure(struct libcfs_ioctl_hdr *hdr)
+{
+	struct lnet_ioctl_config_data *conf =
+		(struct lnet_ioctl_config_data *)hdr;
+	int rc;
+
+	if (conf->cfg_hdr.ioc_len < sizeof(*conf))
+		return -EINVAL;
+
+	mutex_lock(&lnet_config_mutex);
+	if (!the_lnet.ln_niinit_self) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+	rc = lnet_dyn_del_ni(conf->cfg_net);
+out_unlock:
+	mutex_unlock(&lnet_config_mutex);
+
+	return rc;
+}
+
+static int
+lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
 {
 	int rc;
 
 	switch (cmd) {
-	case IOC_LIBCFS_CONFIGURE:
+	case IOC_LIBCFS_CONFIGURE: {
+		struct libcfs_ioctl_data *data =
+			(struct libcfs_ioctl_data *)hdr;
+
+		if (data->ioc_hdr.ioc_len < sizeof(*data))
+			return -EINVAL;
+
+		the_lnet.ln_nis_from_mod_params = data->ioc_flags;
 		return lnet_configure(NULL);
+	}
 
 	case IOC_LIBCFS_UNCONFIGURE:
 		return lnet_unconfigure();
 
+	case IOC_LIBCFS_ADD_NET:
+		return lnet_dyn_configure(hdr);
+
+	case IOC_LIBCFS_DEL_NET:
+		return lnet_dyn_unconfigure(hdr);
+
 	default:
-		/* Passing LNET_PID_ANY only gives me a ref if the net is up
+		/*
+		 * Passing LNET_PID_ANY only gives me a ref if the net is up
 		 * already; I'll need it to ensure the net can't go down while
-		 * I'm called into it */
+		 * I'm called into it
+		 */
 		rc = LNetNIInit(LNET_PID_ANY);
 		if (rc >= 0) {
-			rc = LNetCtl(cmd, data);
+			rc = LNetCtl(cmd, hdr);
 			LNetNIFini();
 		}
 		return rc;
@@ -118,17 +193,19 @@
 	mutex_init(&lnet_config_mutex);
 
 	rc = lnet_init();
-	if (rc != 0) {
+	if (rc) {
 		CERROR("lnet_init: error %d\n", rc);
 		return rc;
 	}
 
 	rc = libcfs_register_ioctl(&lnet_ioctl_handler);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
 	if (config_on_load) {
-		/* Have to schedule a separate thread to avoid deadlocking
-		 * in modload */
+		/*
+		 * Have to schedule a separate thread to avoid deadlocking
+		 * in modload
+		 */
 		(void) kthread_run(lnet_configure, NULL, "lnet_initd");
 	}
 
@@ -141,7 +218,7 @@
 	int rc;
 
 	rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
 	lnet_fini();
 }
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index 80f585a..00010f3 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -170,7 +170,7 @@
 	}
 
 	LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
-	if (addrrange == NULL)
+	if (!addrrange)
 		return -ENOMEM;
 	list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
 	INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
@@ -203,16 +203,18 @@
 		return NULL;
 
 	nf = libcfs_namenum2netstrfns(src->ls_str);
-	if (nf == NULL)
+	if (!nf)
 		return NULL;
 	endlen = src->ls_len - strlen(nf->nf_name);
-	if (endlen == 0)
+	if (!endlen)
 		/* network name only, e.g. "elan" or "tcp" */
 		netnum = 0;
 	else {
-		/* e.g. "elan25" or "tcp23", refuse to parse if
+		/*
+		 * e.g. "elan25" or "tcp23", refuse to parse if
 		 * network name is not appended with decimal or
-		 * hexadecimal number */
+		 * hexadecimal number
+		 */
 		if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name),
 				       endlen, &netnum, 0, MAX_NUMERIC_VALUE))
 			return NULL;
@@ -227,7 +229,7 @@
 	}
 
 	LIBCFS_ALLOC(nr, sizeof(struct nidrange));
-	if (nr == NULL)
+	if (!nr)
 		return NULL;
 	list_add_tail(&nr->nr_link, nidlist);
 	INIT_LIST_HEAD(&nr->nr_addrranges);
@@ -253,17 +255,17 @@
 	struct nidrange *nr;
 
 	tmp = *src;
-	if (cfs_gettok(src, '@', &addrrange) == 0)
+	if (!cfs_gettok(src, '@', &addrrange))
 		goto failed;
 
-	if (cfs_gettok(src, '@', &net) == 0 || src->ls_str != NULL)
+	if (!cfs_gettok(src, '@', &net) || src->ls_str)
 		goto failed;
 
 	nr = add_nidrange(&net, nidlist);
-	if (nr == NULL)
+	if (!nr)
 		goto failed;
 
-	if (parse_addrange(&addrrange, nr) != 0)
+	if (parse_addrange(&addrrange, nr))
 		goto failed;
 
 	return 1;
@@ -342,12 +344,12 @@
 	INIT_LIST_HEAD(nidlist);
 	while (src.ls_str) {
 		rc = cfs_gettok(&src, ' ', &res);
-		if (rc == 0) {
+		if (!rc) {
 			cfs_free_nidlist(nidlist);
 			return 0;
 		}
 		rc = parse_nidrange(&res, nidlist);
-		if (rc == 0) {
+		if (!rc) {
 			cfs_free_nidlist(nidlist);
 			return 0;
 		}
@@ -378,7 +380,7 @@
 			return 1;
 		list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
 			if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
-						       &ar->ar_numaddr_ranges))
+							    &ar->ar_numaddr_ranges))
 				return 1;
 	}
 	return 0;
@@ -395,7 +397,7 @@
 {
 	struct netstrfns *nf = nr->nr_netstrfns;
 
-	if (nr->nr_netnum == 0)
+	if (!nr->nr_netnum)
 		return scnprintf(buffer, count, "@%s", nf->nf_name);
 	else
 		return scnprintf(buffer, count, "@%s%u",
@@ -417,7 +419,7 @@
 	struct netstrfns *nf = nr->nr_netstrfns;
 
 	list_for_each_entry(ar, addrranges, ar_link) {
-		if (i != 0)
+		if (i)
 			i += scnprintf(buffer + i, count - i, " ");
 		i += nf->nf_print_addrlist(buffer + i, count - i,
 					   &ar->ar_numaddr_ranges);
@@ -442,10 +444,10 @@
 		return 0;
 
 	list_for_each_entry(nr, nidlist, nr_link) {
-		if (i != 0)
+		if (i)
 			i += scnprintf(buffer + i, count - i, " ");
 
-		if (nr->nr_all != 0) {
+		if (nr->nr_all) {
 			LASSERT(list_empty(&nr->nr_addrranges));
 			i += scnprintf(buffer + i, count - i, "*");
 			i += cfs_print_network(buffer + i, count - i, nr);
@@ -487,13 +489,13 @@
 	tmp_ip_addr = ((min_ip[0] << 24) | (min_ip[1] << 16) |
 		       (min_ip[2] << 8) | min_ip[3]);
 
-	if (min_nid != NULL)
+	if (min_nid)
 		*min_nid = tmp_ip_addr;
 
 	tmp_ip_addr = ((max_ip[0] << 24) | (max_ip[1] << 16) |
 		       (max_ip[2] << 8) | max_ip[3]);
 
-	if (max_nid != NULL)
+	if (max_nid)
 		*max_nid = tmp_ip_addr;
 }
 
@@ -515,16 +517,16 @@
 
 	list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) {
 		list_for_each_entry(re, &el->el_exprs, re_link) {
-			if (re->re_lo < min_addr || min_addr == 0)
+			if (re->re_lo < min_addr || !min_addr)
 				min_addr = re->re_lo;
 			if (re->re_hi > max_addr)
 				max_addr = re->re_hi;
 		}
 	}
 
-	if (min_nid != NULL)
+	if (min_nid)
 		*min_nid = min_addr;
-	if (max_nid != NULL)
+	if (max_nid)
 		*max_nid = max_addr;
 }
 
@@ -546,17 +548,17 @@
 
 	list_for_each_entry(nr, nidlist, nr_link) {
 		nf = nr->nr_netstrfns;
-		if (lndname == NULL)
+		if (!lndname)
 			lndname = nf->nf_name;
 		if (netnum == -1)
 			netnum = nr->nr_netnum;
 
-		if (strcmp(lndname, nf->nf_name) != 0 ||
+		if (strcmp(lndname, nf->nf_name) ||
 		    netnum != nr->nr_netnum)
 			return false;
 	}
 
-	if (nf == NULL)
+	if (!nf)
 		return false;
 
 	if (!nf->nf_is_contiguous(nidlist))
@@ -590,7 +592,7 @@
 		list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
 			cfs_num_ar_min_max(ar, &current_start_nid,
 					   &current_end_nid);
-			if (last_end_nid != 0 &&
+			if (last_end_nid &&
 			    (current_start_nid - last_end_nid != 1))
 				return false;
 			last_end_nid = current_end_nid;
@@ -600,7 +602,7 @@
 						    re_link) {
 					if (re->re_stride > 1)
 						return false;
-					else if (last_hi != 0 &&
+					else if (last_hi &&
 						 re->re_hi - last_hi != 1)
 						return false;
 					last_hi = re->re_hi;
@@ -640,7 +642,7 @@
 			last_diff = 0;
 			cfs_ip_ar_min_max(ar, &current_start_nid,
 					  &current_end_nid);
-			if (last_end_nid != 0 &&
+			if (last_end_nid &&
 			    (current_start_nid - last_end_nid != 1))
 				return false;
 			last_end_nid = current_end_nid;
@@ -724,7 +726,7 @@
 		list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
 			cfs_num_ar_min_max(ar, &tmp_min_addr,
 					   &tmp_max_addr);
-			if (tmp_min_addr < min_addr || min_addr == 0)
+			if (tmp_min_addr < min_addr || !min_addr)
 				min_addr = tmp_min_addr;
 			if (tmp_max_addr > max_addr)
 				max_addr = tmp_min_addr;
@@ -756,16 +758,16 @@
 		list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
 			cfs_ip_ar_min_max(ar, &tmp_min_ip_addr,
 					  &tmp_max_ip_addr);
-			if (tmp_min_ip_addr < min_ip_addr || min_ip_addr == 0)
+			if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr)
 				min_ip_addr = tmp_min_ip_addr;
 			if (tmp_max_ip_addr > max_ip_addr)
 				max_ip_addr = tmp_max_ip_addr;
 		}
 	}
 
-	if (min_nid != NULL)
+	if (min_nid)
 		*min_nid = min_ip_addr;
-	if (max_nid != NULL)
+	if (max_nid)
 		*max_nid = max_ip_addr;
 }
 
@@ -784,12 +786,14 @@
 		 (addr >> 8) & 0xff, addr & 0xff);
 }
 
-/* CAVEAT EMPTOR XscanfX
+/*
+ * CAVEAT EMPTOR XscanfX
  * I use "%n" at the end of a sscanf format to detect trailing junk.  However
  * sscanf may return immediately if it sees the terminating '0' in a string, so
  * I initialise the %n variable to the expected length.  If sscanf sets it;
  * fine, if it doesn't, then the scan ended at the end of the string, which is
- * fine too :) */
+ * fine too :)
+ */
 static int
 libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
 {
@@ -802,9 +806,9 @@
 	/* numeric IP? */
 	if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
 	    n == nob &&
-	    (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
-	    (c & ~0xff) == 0 && (d & ~0xff) == 0) {
-		*addr = ((a<<24)|(b<<16)|(c<<8)|d);
+	    !(a & ~0xff) && !(b & ~0xff) &&
+	    !(c & ~0xff) && !(d & ~0xff)) {
+		*addr = ((a << 24) | (b << 16) | (c << 8) | d);
 		return 1;
 	}
 
@@ -824,7 +828,7 @@
 	src.ls_len = len;
 	i = 0;
 
-	while (src.ls_str != NULL) {
+	while (src.ls_str) {
 		struct cfs_lstr res;
 
 		if (!cfs_gettok(&src, '.', &res)) {
@@ -833,7 +837,7 @@
 		}
 
 		rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
-		if (rc != 0)
+		if (rc)
 			goto out;
 
 		list_add_tail(&el->el_link, list);
@@ -858,7 +862,7 @@
 
 	list_for_each_entry(el, list, el_link) {
 		LASSERT(j++ < 4);
-		if (i != 0)
+		if (i)
 			i += scnprintf(buffer + i, count - i, ".");
 		i += cfs_expr_list_print(buffer + i, count - i, el);
 	}
@@ -928,7 +932,7 @@
 	int	rc;
 
 	rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
-	if (rc == 0)
+	if (!rc)
 		list_add_tail(&el->el_link, list);
 
 	return rc;
@@ -1060,7 +1064,7 @@
 int
 libcfs_isknown_lnd(__u32 lnd)
 {
-	return libcfs_lnd2netstrfns(lnd) != NULL;
+	return !!libcfs_lnd2netstrfns(lnd);
 }
 EXPORT_SYMBOL(libcfs_isknown_lnd);
 
@@ -1069,7 +1073,7 @@
 {
 	struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
 
-	return (nf == NULL) ? NULL : nf->nf_modname;
+	return nf ? nf->nf_modname : NULL;
 }
 EXPORT_SYMBOL(libcfs_lnd2modname);
 
@@ -1078,7 +1082,7 @@
 {
 	struct netstrfns *nf = libcfs_name2netstrfns(str);
 
-	if (nf != NULL)
+	if (nf)
 		return nf->nf_type;
 
 	return -1;
@@ -1091,7 +1095,7 @@
 	struct netstrfns *nf;
 
 	nf = libcfs_lnd2netstrfns(lnd);
-	if (nf == NULL)
+	if (!nf)
 		snprintf(buf, buf_size, "?%u?", lnd);
 	else
 		snprintf(buf, buf_size, "%s", nf->nf_name);
@@ -1108,9 +1112,9 @@
 	struct netstrfns *nf;
 
 	nf = libcfs_lnd2netstrfns(lnd);
-	if (nf == NULL)
+	if (!nf)
 		snprintf(buf, buf_size, "<%u:%u>", lnd, nnum);
-	else if (nnum == 0)
+	else if (!nnum)
 		snprintf(buf, buf_size, "%s", nf->nf_name);
 	else
 		snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum);
@@ -1135,14 +1139,14 @@
 	}
 
 	nf = libcfs_lnd2netstrfns(lnd);
-	if (nf == NULL)
+	if (!nf) {
 		snprintf(buf, buf_size, "%x@<%u:%u>", addr, lnd, nnum);
-	else {
+	} else {
 		size_t addr_len;
 
 		nf->nf_addr2str(addr, buf, buf_size);
 		addr_len = strlen(buf);
-		if (nnum == 0)
+		if (!nnum)
 			snprintf(buf + addr_len, buf_size - addr_len, "@%s",
 				 nf->nf_name);
 		else
@@ -1195,7 +1199,7 @@
 {
 	__u32  net;
 
-	if (libcfs_str2net_internal(str, &net) != NULL)
+	if (libcfs_str2net_internal(str, &net))
 		return net;
 
 	return LNET_NIDNET(LNET_NID_ANY);
@@ -1210,15 +1214,15 @@
 	__u32 net;
 	__u32 addr;
 
-	if (sep != NULL) {
+	if (sep) {
 		nf = libcfs_str2net_internal(sep + 1, &net);
-		if (nf == NULL)
+		if (!nf)
 			return LNET_NID_ANY;
 	} else {
 		sep = str + strlen(str);
 		net = LNET_MKNET(SOCKLND, 0);
 		nf = libcfs_lnd2netstrfns(SOCKLND);
-		LASSERT(nf != NULL);
+		LASSERT(nf);
 	}
 
 	if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
@@ -1240,8 +1244,8 @@
 	}
 
 	snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
-		 ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "",
-		 (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
+		 id.pid & LNET_PID_USERFLAG ? "U" : "",
+		 id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid));
 	return str;
 }
 EXPORT_SYMBOL(libcfs_id2str);
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 1fceed3..19c80c9 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -39,6 +39,7 @@
 #define DEBUG_SUBSYSTEM S_LNET
 
 #include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
 
 int
 lnet_peer_tables_create(void)
@@ -50,7 +51,7 @@
 
 	the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
 						   sizeof(*ptable));
-	if (the_lnet.ln_peer_tables == NULL) {
+	if (!the_lnet.ln_peer_tables) {
 		CERROR("Failed to allocate cpu-partition peer tables\n");
 		return -ENOMEM;
 	}
@@ -60,7 +61,7 @@
 
 		LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
 				 LNET_PEER_HASH_SIZE * sizeof(*hash));
-		if (hash == NULL) {
+		if (!hash) {
 			CERROR("Failed to create peer hash table\n");
 			lnet_peer_tables_destroy();
 			return -ENOMEM;
@@ -82,12 +83,12 @@
 	int i;
 	int j;
 
-	if (the_lnet.ln_peer_tables == NULL)
+	if (!the_lnet.ln_peer_tables)
 		return;
 
 	cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
 		hash = ptable->pt_hash;
-		if (hash == NULL) /* not initialized */
+		if (!hash) /* not initialized */
 			break;
 
 		LASSERT(list_empty(&ptable->pt_deathrow));
@@ -103,62 +104,116 @@
 	the_lnet.ln_peer_tables = NULL;
 }
 
+static void
+lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
+{
+	int i;
+	lnet_peer_t *lp;
+	lnet_peer_t *tmp;
+
+	for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
+		list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+					 lp_hashlist) {
+			if (ni && ni != lp->lp_ni)
+				continue;
+			list_del_init(&lp->lp_hashlist);
+			/* Lose hash table's ref */
+			ptable->pt_zombies++;
+			lnet_peer_decref_locked(lp);
+		}
+	}
+}
+
+static void
+lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
+				     int cpt_locked)
+{
+	int i;
+
+	for (i = 3; ptable->pt_zombies; i++) {
+		lnet_net_unlock(cpt_locked);
+
+		if (is_power_of_2(i)) {
+			CDEBUG(D_WARNING,
+			       "Waiting for %d zombies on peer table\n",
+			       ptable->pt_zombies);
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(cfs_time_seconds(1) >> 1);
+		lnet_net_lock(cpt_locked);
+	}
+}
+
+static void
+lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
+				int cpt_locked)
+{
+	lnet_peer_t *lp;
+	lnet_peer_t *tmp;
+	lnet_nid_t lp_nid;
+	int i;
+
+	for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
+		list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+					 lp_hashlist) {
+			if (ni != lp->lp_ni)
+				continue;
+
+			if (!lp->lp_rtr_refcount)
+				continue;
+
+			lp_nid = lp->lp_nid;
+
+			lnet_net_unlock(cpt_locked);
+			lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid);
+			lnet_net_lock(cpt_locked);
+		}
+	}
+}
+
 void
-lnet_peer_tables_cleanup(void)
+lnet_peer_tables_cleanup(lnet_ni_t *ni)
 {
 	struct lnet_peer_table *ptable;
+	struct list_head deathrow;
+	lnet_peer_t *lp;
 	int i;
-	int j;
 
-	LASSERT(the_lnet.ln_shutdown);	/* i.e. no new peers */
+	INIT_LIST_HEAD(&deathrow);
 
+	LASSERT(the_lnet.ln_shutdown || ni);
+	/*
+	 * If just deleting the peers for a NI, get rid of any routes these
+	 * peers are gateways for.
+	 */
 	cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
 		lnet_net_lock(i);
-
-		for (j = 0; j < LNET_PEER_HASH_SIZE; j++) {
-			struct list_head *peers = &ptable->pt_hash[j];
-
-			while (!list_empty(peers)) {
-				lnet_peer_t *lp = list_entry(peers->next,
-								 lnet_peer_t,
-								 lp_hashlist);
-				list_del_init(&lp->lp_hashlist);
-				/* lose hash table's ref */
-				lnet_peer_decref_locked(lp);
-			}
-		}
-
+		lnet_peer_table_del_rtrs_locked(ni, ptable, i);
 		lnet_net_unlock(i);
 	}
 
+	/*
+	 * Start the process of moving the applicable peers to
+	 * deathrow.
+	 */
 	cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
-		LIST_HEAD(deathrow);
-		lnet_peer_t *lp;
-
 		lnet_net_lock(i);
-
-		for (j = 3; ptable->pt_number != 0; j++) {
-			lnet_net_unlock(i);
-
-			if ((j & (j - 1)) == 0) {
-				CDEBUG(D_WARNING,
-				       "Waiting for %d peers on peer table\n",
-				       ptable->pt_number);
-			}
-			set_current_state(TASK_UNINTERRUPTIBLE);
-			schedule_timeout(cfs_time_seconds(1) / 2);
-			lnet_net_lock(i);
-		}
-		list_splice_init(&ptable->pt_deathrow, &deathrow);
-
+		lnet_peer_table_cleanup_locked(ni, ptable);
 		lnet_net_unlock(i);
+	}
 
-		while (!list_empty(&deathrow)) {
-			lp = list_entry(deathrow.next,
-					    lnet_peer_t, lp_hashlist);
-			list_del(&lp->lp_hashlist);
-			LIBCFS_FREE(lp, sizeof(*lp));
-		}
+	/* Cleanup all entries on deathrow. */
+	cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+		lnet_net_lock(i);
+		lnet_peer_table_deathrow_wait_locked(ptable, i);
+		list_splice_init(&ptable->pt_deathrow, &deathrow);
+		lnet_net_unlock(i);
+	}
+
+	while (!list_empty(&deathrow)) {
+		lp = list_entry(deathrow.next, lnet_peer_t, lp_hashlist);
+		list_del(&lp->lp_hashlist);
+		LIBCFS_FREE(lp, sizeof(*lp));
 	}
 }
 
@@ -167,11 +222,11 @@
 {
 	struct lnet_peer_table *ptable;
 
-	LASSERT(lp->lp_refcount == 0);
-	LASSERT(lp->lp_rtr_refcount == 0);
+	LASSERT(!lp->lp_refcount);
+	LASSERT(!lp->lp_rtr_refcount);
 	LASSERT(list_empty(&lp->lp_txq));
 	LASSERT(list_empty(&lp->lp_hashlist));
-	LASSERT(lp->lp_txqnob == 0);
+	LASSERT(!lp->lp_txqnob);
 
 	ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
 	LASSERT(ptable->pt_number > 0);
@@ -181,6 +236,8 @@
 	lp->lp_ni = NULL;
 
 	list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+	LASSERT(ptable->pt_zombies > 0);
+	ptable->pt_zombies--;
 }
 
 lnet_peer_t *
@@ -220,14 +277,14 @@
 
 	ptable = the_lnet.ln_peer_tables[cpt2];
 	lp = lnet_find_peer_locked(ptable, nid);
-	if (lp != NULL) {
+	if (lp) {
 		*lpp = lp;
 		return 0;
 	}
 
 	if (!list_empty(&ptable->pt_deathrow)) {
 		lp = list_entry(ptable->pt_deathrow.next,
-				    lnet_peer_t, lp_hashlist);
+				lnet_peer_t, lp_hashlist);
 		list_del(&lp->lp_hashlist);
 	}
 
@@ -238,12 +295,12 @@
 	ptable->pt_number++;
 	lnet_net_unlock(cpt);
 
-	if (lp != NULL)
+	if (lp)
 		memset(lp, 0, sizeof(*lp));
 	else
 		LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
 
-	if (lp == NULL) {
+	if (!lp) {
 		rc = -ENOMEM;
 		lnet_net_lock(cpt);
 		goto out;
@@ -276,30 +333,30 @@
 	}
 
 	lp2 = lnet_find_peer_locked(ptable, nid);
-	if (lp2 != NULL) {
+	if (lp2) {
 		*lpp = lp2;
 		goto out;
 	}
 
 	lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
-	if (lp->lp_ni == NULL) {
+	if (!lp->lp_ni) {
 		rc = -EHOSTUNREACH;
 		goto out;
 	}
 
-	lp->lp_txcredits     =
-	lp->lp_mintxcredits  = lp->lp_ni->ni_peertxcredits;
-	lp->lp_rtrcredits    =
+	lp->lp_txcredits = lp->lp_ni->ni_peertxcredits;
+	lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
+	lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
 	lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
 
 	list_add_tail(&lp->lp_hashlist,
-			  &ptable->pt_hash[lnet_nid2peerhash(nid)]);
+		      &ptable->pt_hash[lnet_nid2peerhash(nid)]);
 	ptable->pt_version++;
 	*lpp = lp;
 
 	return 0;
 out:
-	if (lp != NULL)
+	if (lp)
 		list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
 	ptable->pt_number--;
 	return rc;
@@ -317,7 +374,7 @@
 	lnet_net_lock(cpt);
 
 	rc = lnet_nid2peer_locked(&lp, nid, cpt);
-	if (rc != 0) {
+	if (rc) {
 		lnet_net_unlock(cpt);
 		CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
 		return;
@@ -336,3 +393,65 @@
 
 	lnet_net_unlock(cpt);
 }
+
+int
+lnet_get_peer_info(__u32 peer_index, __u64 *nid,
+		   char aliveness[LNET_MAX_STR_LEN],
+		   __u32 *cpt_iter, __u32 *refcount,
+		   __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+		   __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
+		   __u32 *peer_tx_qnob)
+{
+	struct lnet_peer_table *peer_table;
+	lnet_peer_t *lp;
+	bool found = false;
+	int lncpt, j;
+
+	/* get the number of CPTs */
+	lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
+
+	/*
+	 * if the cpt number to be examined is >= the number of cpts in
+	 * the system then indicate that there are no more cpts to examin
+	 */
+	if (*cpt_iter >= lncpt)
+		return -ENOENT;
+
+	/* get the current table */
+	peer_table = the_lnet.ln_peer_tables[*cpt_iter];
+	/* if the ptable is NULL then there are no more cpts to examine */
+	if (!peer_table)
+		return -ENOENT;
+
+	lnet_net_lock(*cpt_iter);
+
+	for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
+		struct list_head *peers = &peer_table->pt_hash[j];
+
+		list_for_each_entry(lp, peers, lp_hashlist) {
+			if (peer_index-- > 0)
+				continue;
+
+			snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
+			if (lnet_isrouter(lp) ||
+			    lnet_peer_aliveness_enabled(lp))
+				snprintf(aliveness, LNET_MAX_STR_LEN,
+					 lp->lp_alive ? "up" : "down");
+
+			*nid = lp->lp_nid;
+			*refcount = lp->lp_refcount;
+			*ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits;
+			*peer_tx_credits = lp->lp_txcredits;
+			*peer_rtr_credits = lp->lp_rtrcredits;
+			*peer_min_rtr_credits = lp->lp_mintxcredits;
+			*peer_tx_qnob = lp->lp_txqnob;
+
+			found = true;
+		}
+	}
+	lnet_net_unlock(*cpt_iter);
+
+	*cpt_iter = lncpt;
+
+	return found ? 0 : -ENOENT;
+}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index f5faa41..5e8b0ba 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -15,10 +15,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
@@ -28,8 +24,11 @@
 #define LNET_NRB_TINY		(LNET_NRB_TINY_MIN * 4)
 #define LNET_NRB_SMALL_MIN	4096	/* min value for each CPT */
 #define LNET_NRB_SMALL		(LNET_NRB_SMALL_MIN * 4)
+#define LNET_NRB_SMALL_PAGES	1
 #define LNET_NRB_LARGE_MIN	256	/* min value for each CPT */
 #define LNET_NRB_LARGE		(LNET_NRB_LARGE_MIN * 4)
+#define LNET_NRB_LARGE_PAGES   ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
+				 PAGE_CACHE_SHIFT)
 
 static char *forwarding = "";
 module_param(forwarding, charp, 0444);
@@ -61,8 +60,10 @@
 	if (peer_buffer_credits > 0)
 		return peer_buffer_credits;
 
-	/* As an approximation, allow this peer the same number of router
-	 * buffers as it is allowed outstanding sends */
+	/*
+	 * As an approximation, allow this peer the same number of router
+	 * buffers as it is allowed outstanding sends
+	 */
 	return ni->ni_peertxcredits;
 }
 
@@ -107,7 +108,7 @@
 	lp->lp_timestamp = when;		/* update timestamp */
 	lp->lp_ping_deadline = 0;	       /* disable ping timeout */
 
-	if (lp->lp_alive_count != 0 &&	  /* got old news */
+	if (lp->lp_alive_count &&	  /* got old news */
 	    (!lp->lp_alive) == (!alive)) {      /* new date for old news */
 		CDEBUG(D_NET, "Old news\n");
 		return;
@@ -131,11 +132,12 @@
 	int alive;
 	int notifylnd;
 
-	/* Notify only in 1 thread at any time to ensure ordered notification.
+	/*
+	 * Notify only in 1 thread at any time to ensure ordered notification.
 	 * NB individual events can be missed; the only guarantee is that you
-	 * always get the most recent news */
-
-	if (lp->lp_notifying || ni == NULL)
+	 * always get the most recent news
+	 */
+	if (lp->lp_notifying || !ni)
 		return;
 
 	lp->lp_notifying = 1;
@@ -147,13 +149,14 @@
 		lp->lp_notifylnd = 0;
 		lp->lp_notify    = 0;
 
-		if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
+		if (notifylnd && ni->ni_lnd->lnd_notify) {
 			lnet_net_unlock(lp->lp_cpt);
 
-			/* A new notification could happen now; I'll handle it
-			 * when control returns to me */
-
-			(ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
+			/*
+			 * A new notification could happen now; I'll handle it
+			 * when control returns to me
+			 */
+			ni->ni_lnd->lnd_notify(ni, lp->lp_nid, alive);
 
 			lnet_net_lock(lp->lp_cpt);
 		}
@@ -176,7 +179,7 @@
 		/* a simple insertion sort */
 		list_for_each_prev(pos, &the_lnet.ln_routers) {
 			lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
-							  lp_rtr_list);
+						      lp_rtr_list);
 
 			if (rtr->lp_nid < lp->lp_nid)
 				break;
@@ -197,12 +200,12 @@
 
 	/* lnet_net_lock must be exclusively locked */
 	lp->lp_rtr_refcount--;
-	if (lp->lp_rtr_refcount == 0) {
+	if (!lp->lp_rtr_refcount) {
 		LASSERT(list_empty(&lp->lp_routes));
 
-		if (lp->lp_rcd != NULL) {
+		if (lp->lp_rcd) {
 			list_add(&lp->lp_rcd->rcd_list,
-				     &the_lnet.ln_rcd_deathrow);
+				 &the_lnet.ln_rcd_deathrow);
 			lp->lp_rcd = NULL;
 		}
 
@@ -245,8 +248,10 @@
 
 	cfs_get_random_bytes(seed, sizeof(seed));
 
-	/* Nodes with small feet have little entropy
-	 * the NID for this node gives the most entropy in the low bits */
+	/*
+	 * Nodes with small feet have little entropy
+	 * the NID for this node gives the most entropy in the low bits
+	 */
 	list_for_each(tmp, &the_lnet.ln_nis) {
 		ni = list_entry(tmp, lnet_ni_t, ni_list);
 		lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
@@ -277,7 +282,7 @@
 	/* len+1 positions to add a new entry, also prevents division by 0 */
 	offset = cfs_rand() % (len + 1);
 	list_for_each(e, &rnet->lrn_routes) {
-		if (offset == 0)
+		if (!offset)
 			break;
 		offset--;
 	}
@@ -312,17 +317,17 @@
 		return -EINVAL;
 
 	if (lnet_islocalnet(net))	       /* it's a local network */
-		return 0;		       /* ignore the route entry */
+		return -EEXIST;
 
 	/* Assume net, route, all new */
 	LIBCFS_ALLOC(route, sizeof(*route));
 	LIBCFS_ALLOC(rnet, sizeof(*rnet));
-	if (route == NULL || rnet == NULL) {
+	if (!route || !rnet) {
 		CERROR("Out of memory creating route %s %d %s\n",
 		       libcfs_net2str(net), hops, libcfs_nid2str(gateway));
-		if (route != NULL)
+		if (route)
 			LIBCFS_FREE(route, sizeof(*route));
-		if (rnet != NULL)
+		if (rnet)
 			LIBCFS_FREE(rnet, sizeof(*rnet));
 		return -ENOMEM;
 	}
@@ -336,25 +341,24 @@
 	lnet_net_lock(LNET_LOCK_EX);
 
 	rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
-	if (rc != 0) {
+	if (rc) {
 		lnet_net_unlock(LNET_LOCK_EX);
 
 		LIBCFS_FREE(route, sizeof(*route));
 		LIBCFS_FREE(rnet, sizeof(*rnet));
 
 		if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
-			return 0;	/* ignore the route entry */
+			return rc;	/* ignore the route entry */
 		CERROR("Error %d creating route %s %d %s\n", rc,
 		       libcfs_net2str(net), hops,
 		       libcfs_nid2str(gateway));
-
 		return rc;
 	}
 
 	LASSERT(!the_lnet.ln_shutdown);
 
 	rnet2 = lnet_find_net_locked(net);
-	if (rnet2 == NULL) {
+	if (!rnet2) {
 		/* new network */
 		list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
 		rnet2 = rnet;
@@ -382,8 +386,8 @@
 		lnet_net_unlock(LNET_LOCK_EX);
 
 		/* XXX Assume alive */
-		if (ni->ni_lnd->lnd_notify != NULL)
-			(ni->ni_lnd->lnd_notify)(ni, gateway, 1);
+		if (ni->ni_lnd->lnd_notify)
+			ni->ni_lnd->lnd_notify(ni, gateway, 1);
 
 		lnet_net_lock(LNET_LOCK_EX);
 	}
@@ -391,14 +395,20 @@
 	/* -1 for notify or !add_route */
 	lnet_peer_decref_locked(route->lr_gateway);
 	lnet_net_unlock(LNET_LOCK_EX);
+	rc = 0;
 
-	if (!add_route)
+	if (!add_route) {
+		rc = -EEXIST;
 		LIBCFS_FREE(route, sizeof(*route));
+	}
 
 	if (rnet != rnet2)
 		LIBCFS_FREE(rnet, sizeof(*rnet));
 
-	return 0;
+	/* indicate to startup the router checker if configured */
+	wake_up(&the_lnet.ln_rc_waitq);
+
+	return rc;
 }
 
 int
@@ -426,10 +436,9 @@
 				lnet_nid_t nid2;
 				int net;
 
-				route = list_entry(e2, lnet_route_t,
-						       lr_list);
+				route = list_entry(e2, lnet_route_t, lr_list);
 
-				if (route2 == NULL) {
+				if (!route2) {
 					route2 = route;
 					continue;
 				}
@@ -472,9 +481,10 @@
 	CDEBUG(D_NET, "Del route: net %s : gw %s\n",
 	       libcfs_net2str(net), libcfs_nid2str(gw_nid));
 
-	/* NB Caller may specify either all routes via the given gateway
-	 * or a specific route entry actual NIDs) */
-
+	/*
+	 * NB Caller may specify either all routes via the given gateway
+	 * or a specific route entry actual NIDs)
+	 */
 	lnet_net_lock(LNET_LOCK_EX);
 	if (net == LNET_NIDNET(LNET_NID_ANY))
 		rn_list = &the_lnet.ln_remote_nets_hash[0];
@@ -486,7 +496,7 @@
 		rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
 
 		if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
-			net == rnet->lrn_net))
+		      net == rnet->lrn_net))
 			continue;
 
 		list_for_each(e2, &rnet->lrn_routes) {
@@ -513,7 +523,7 @@
 
 			LIBCFS_FREE(route, sizeof(*route));
 
-			if (rnet != NULL)
+			if (rnet)
 				LIBCFS_FREE(rnet, sizeof(*rnet));
 
 			rc = 0;
@@ -538,6 +548,38 @@
 	lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
 }
 
+int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
+{
+	int i, rc = -ENOENT, j;
+
+	if (!the_lnet.ln_rtrpools)
+		return rc;
+
+	for (i = 0; i < LNET_NRBPOOLS; i++) {
+		lnet_rtrbufpool_t *rbp;
+
+		lnet_net_lock(LNET_LOCK_EX);
+		cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
+			if (i++ != idx)
+				continue;
+
+			pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages;
+			pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers;
+			pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits;
+			pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits;
+			rc = 0;
+			break;
+		}
+		lnet_net_unlock(LNET_LOCK_EX);
+	}
+
+	lnet_net_lock(LNET_LOCK_EX);
+	pool_cfg->pl_routing = the_lnet.ln_routing;
+	lnet_net_unlock(LNET_LOCK_EX);
+
+	return rc;
+}
+
 int
 lnet_get_route(int idx, __u32 *net, __u32 *hops,
 	       lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
@@ -558,15 +600,14 @@
 			rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
 
 			list_for_each(e2, &rnet->lrn_routes) {
-				route = list_entry(e2, lnet_route_t,
-						       lr_list);
+				route = list_entry(e2, lnet_route_t, lr_list);
 
-				if (idx-- == 0) {
+				if (!idx--) {
 					*net      = rnet->lrn_net;
 					*hops     = route->lr_hops;
 					*priority = route->lr_priority;
 					*gateway  = route->lr_gateway->lp_nid;
-					*alive    = route->lr_gateway->lp_alive;
+					*alive = lnet_is_route_alive(route);
 					lnet_net_unlock(cpt);
 					return 0;
 				}
@@ -604,7 +645,7 @@
 {
 	lnet_ping_info_t *info = rcd->rcd_pinginfo;
 	struct lnet_peer *gw = rcd->rcd_gateway;
-	lnet_route_t *rtr;
+	lnet_route_t *rte;
 
 	if (!gw->lp_alive)
 		return;
@@ -621,21 +662,25 @@
 	}
 
 	gw->lp_ping_feats = info->pi_features;
-	if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
+	if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) {
 		CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
 		       libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
 		return; /* nothing I can understand */
 	}
 
-	if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
+	if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS))
 		return; /* can't carry NI status info */
 
-	list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
-		int ptl_status = LNET_NI_STATUS_INVALID;
+	list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
 		int down = 0;
 		int up = 0;
 		int i;
 
+		if (gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) {
+			rte->lr_downis = 1;
+			continue;
+		}
+
 		for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
 			lnet_ni_status_t *stat = &info->pi_ni[i];
 			lnet_nid_t nid = stat->ns_nid;
@@ -651,22 +696,15 @@
 				continue;
 
 			if (stat->ns_status == LNET_NI_STATUS_DOWN) {
-				if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
-					down++;
-				else if (ptl_status != LNET_NI_STATUS_UP)
-					ptl_status = LNET_NI_STATUS_DOWN;
+				down++;
 				continue;
 			}
 
 			if (stat->ns_status == LNET_NI_STATUS_UP) {
-				if (LNET_NIDNET(nid) == rtr->lr_net) {
+				if (LNET_NIDNET(nid) == rte->lr_net) {
 					up = 1;
 					break;
 				}
-				/* ptl NIs are considered down only when
-				 * they're all down */
-				if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
-					ptl_status = LNET_NI_STATUS_UP;
 				continue;
 			}
 
@@ -677,10 +715,10 @@
 		}
 
 		if (up) { /* ignore downed NIs if NI for dest network is up */
-			rtr->lr_downis = 0;
+			rte->lr_downis = 0;
 			continue;
 		}
-		rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
+		rte->lr_downis = down;
 	}
 }
 
@@ -690,7 +728,7 @@
 	lnet_rc_data_t *rcd = event->md.user_ptr;
 	struct lnet_peer *lp;
 
-	LASSERT(rcd != NULL);
+	LASSERT(rcd);
 
 	if (event->unlinked) {
 		LNetInvalidateHandle(&rcd->rcd_mdh);
@@ -701,11 +739,13 @@
 		event->type == LNET_EVENT_REPLY);
 
 	lp = rcd->rcd_gateway;
-	LASSERT(lp != NULL);
+	LASSERT(lp);
 
-	 /* NB: it's called with holding lnet_res_lock, we have a few
-	  * places need to hold both locks at the same time, please take
-	  * care of lock ordering */
+	/*
+	 * NB: it's called with holding lnet_res_lock, we have a few
+	 * places need to hold both locks at the same time, please take
+	 * care of lock ordering
+	 */
 	lnet_net_lock(lp->lp_cpt);
 	if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
 		/* ignore if no longer a router or rcd is replaced */
@@ -714,23 +754,26 @@
 
 	if (event->type == LNET_EVENT_SEND) {
 		lp->lp_ping_notsent = 0;
-		if (event->status == 0)
+		if (!event->status)
 			goto out;
 	}
 
 	/* LNET_EVENT_REPLY */
-	/* A successful REPLY means the router is up.  If _any_ comms
+	/*
+	 * A successful REPLY means the router is up.  If _any_ comms
 	 * to the router fail I assume it's down (this will happen if
 	 * we ping alive routers to try to detect router death before
-	 * apps get burned). */
+	 * apps get burned).
+	 */
+	lnet_notify_locked(lp, 1, !event->status, cfs_time_current());
 
-	lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
-	/* The router checker will wake up very shortly and do the
+	/*
+	 * The router checker will wake up very shortly and do the
 	 * actual notification.
 	 * XXX If 'lp' stops being a router before then, it will still
-	 * have the notification pending!!! */
-
-	if (avoid_asym_router_failure && event->status == 0)
+	 * have the notification pending!!!
+	 */
+	if (avoid_asym_router_failure && !event->status)
 		lnet_parse_rc_info(rcd);
 
  out:
@@ -753,7 +796,7 @@
 		list_for_each(entry, &the_lnet.ln_routers) {
 			rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
 
-			if (rtr->lp_alive_count == 0) {
+			if (!rtr->lp_alive_count) {
 				all_known = 0;
 				break;
 			}
@@ -774,7 +817,7 @@
 {
 	lnet_route_t *rte;
 
-	if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
+	if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) {
 		list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
 			if (rte->lr_net == net) {
 				rte->lr_downis = 0;
@@ -811,13 +854,15 @@
 			continue;
 		}
 
-		LASSERT(ni->ni_status != NULL);
+		LASSERT(ni->ni_status);
 
 		if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
 			CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
 			       libcfs_nid2str(ni->ni_nid), timeout);
-			/* NB: so far, this is the only place to set
-			 * NI status to "down" */
+			/*
+			 * NB: so far, this is the only place to set
+			 * NI status to "down"
+			 */
 			ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
 		}
 		lnet_ni_unlock(ni);
@@ -831,7 +876,7 @@
 	/* detached from network */
 	LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
 
-	if (rcd->rcd_gateway != NULL) {
+	if (rcd->rcd_gateway) {
 		int cpt = rcd->rcd_gateway->lp_cpt;
 
 		lnet_net_lock(cpt);
@@ -839,7 +884,7 @@
 		lnet_net_unlock(cpt);
 	}
 
-	if (rcd->rcd_pinginfo != NULL)
+	if (rcd->rcd_pinginfo)
 		LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
 
 	LIBCFS_FREE(rcd, sizeof(*rcd));
@@ -856,14 +901,14 @@
 	lnet_net_unlock(gateway->lp_cpt);
 
 	LIBCFS_ALLOC(rcd, sizeof(*rcd));
-	if (rcd == NULL)
+	if (!rcd)
 		goto out;
 
 	LNetInvalidateHandle(&rcd->rcd_mdh);
 	INIT_LIST_HEAD(&rcd->rcd_list);
 
 	LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
-	if (pi == NULL)
+	if (!pi)
 		goto out;
 
 	for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
@@ -885,11 +930,11 @@
 		CERROR("Can't bind MD: %d\n", rc);
 		goto out;
 	}
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
 	lnet_net_lock(gateway->lp_cpt);
 	/* router table changed or someone has created rcd for this gateway */
-	if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
+	if (!lnet_isrouter(gateway) || gateway->lp_rcd) {
 		lnet_net_unlock(gateway->lp_cpt);
 		goto out;
 	}
@@ -902,10 +947,10 @@
 	return rcd;
 
  out:
-	if (rcd != NULL) {
+	if (rcd) {
 		if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
 			rc = LNetMDUnlink(rcd->rcd_mdh);
-			LASSERT(rc == 0);
+			LASSERT(!rc);
 		}
 		lnet_destroy_rc_data(rcd);
 	}
@@ -936,7 +981,7 @@
 
 	lnet_peer_addref_locked(rtr);
 
-	if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
+	if (rtr->lp_ping_deadline && /* ping timed out? */
 	    cfs_time_after(now, rtr->lp_ping_deadline))
 		lnet_notify_locked(rtr, 1, 0, now);
 
@@ -950,10 +995,10 @@
 		return;
 	}
 
-	rcd = rtr->lp_rcd != NULL ?
+	rcd = rtr->lp_rcd ?
 	      rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
 
-	if (rcd == NULL)
+	if (!rcd)
 		return;
 
 	secs = lnet_router_check_interval(rtr);
@@ -964,7 +1009,7 @@
 	       rtr->lp_ping_deadline, rtr->lp_ping_notsent,
 	       rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
 
-	if (secs != 0 && !rtr->lp_ping_notsent &&
+	if (secs && !rtr->lp_ping_notsent &&
 	    cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
 					     cfs_time_seconds(secs)))) {
 		int rc;
@@ -972,7 +1017,7 @@
 		lnet_handle_md_t mdh;
 
 		id.nid = rtr->lp_nid;
-		id.pid = LUSTRE_SRV_LNET_PID;
+		id.pid = LNET_PID_LUSTRE;
 		CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
 
 		rtr->lp_ping_notsent   = 1;
@@ -980,7 +1025,7 @@
 
 		mdh = rcd->rcd_mdh;
 
-		if (rtr->lp_ping_deadline == 0) {
+		if (!rtr->lp_ping_deadline) {
 			rtr->lp_ping_deadline =
 				cfs_time_shift(router_ping_timeout);
 		}
@@ -991,7 +1036,7 @@
 			     LNET_PROTO_PING_MATCHBITS, 0);
 
 		lnet_net_lock(rtr->lp_cpt);
-		if (rc != 0)
+		if (rc)
 			rtr->lp_ping_notsent = 0; /* no event pending */
 	}
 
@@ -1001,8 +1046,9 @@
 int
 lnet_router_checker_start(void)
 {
+	struct task_struct *task;
 	int rc;
-	int eqsz;
+	int eqsz = 0;
 
 	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
 
@@ -1012,39 +1058,33 @@
 		return -EINVAL;
 	}
 
-	if (!the_lnet.ln_routing &&
-	    live_router_check_interval <= 0 &&
-	    dead_router_check_interval <= 0)
-		return 0;
-
 	sema_init(&the_lnet.ln_rc_signal, 0);
-	/* EQ size doesn't matter; the callback is guaranteed to get every
-	 * event */
-	eqsz = 0;
-	rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
-			 &the_lnet.ln_rc_eqh);
-	if (rc != 0) {
+
+	rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
+	if (rc) {
 		CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
 		return -ENOMEM;
 	}
 
 	the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
-	rc = PTR_ERR(kthread_run(lnet_router_checker,
-				 NULL, "router_checker"));
-	if (IS_ERR_VALUE(rc)) {
+	task = kthread_run(lnet_router_checker, NULL, "router_checker");
+	if (IS_ERR(task)) {
+		rc = PTR_ERR(task);
 		CERROR("Can't start router checker thread: %d\n", rc);
 		/* block until event callback signals exit */
 		down(&the_lnet.ln_rc_signal);
 		rc = LNetEQFree(the_lnet.ln_rc_eqh);
-		LASSERT(rc == 0);
+		LASSERT(!rc);
 		the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
 		return -ENOMEM;
 	}
 
 	if (check_routers_before_use) {
-		/* Note that a helpful side-effect of pinging all known routers
+		/*
+		 * Note that a helpful side-effect of pinging all known routers
 		 * at startup is that it makes them drop stale connections they
-		 * may have to a previous instance of me. */
+		 * may have to a previous instance of me.
+		 */
 		lnet_wait_known_routerstate();
 	}
 
@@ -1061,13 +1101,15 @@
 
 	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
 	the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
+	/* wakeup the RC thread if it's sleeping */
+	wake_up(&the_lnet.ln_rc_waitq);
 
 	/* block until event callback signals exit */
 	down(&the_lnet.ln_rc_signal);
 	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
 
 	rc = LNetEQFree(the_lnet.ln_rc_eqh);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 }
 
 static void
@@ -1091,13 +1133,13 @@
 	if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
 		/* router checker is stopping, prune all */
 		list_for_each_entry(lp, &the_lnet.ln_routers,
-					lp_rtr_list) {
-			if (lp->lp_rcd == NULL)
+				    lp_rtr_list) {
+			if (!lp->lp_rcd)
 				continue;
 
 			LASSERT(list_empty(&lp->lp_rcd->rcd_list));
 			list_add(&lp->lp_rcd->rcd_list,
-				     &the_lnet.ln_rcd_deathrow);
+				 &the_lnet.ln_rcd_deathrow);
 			lp->lp_rcd = NULL;
 		}
 	}
@@ -1119,7 +1161,7 @@
 	/* release all zombie RCDs */
 	while (!list_empty(&the_lnet.ln_rcd_zombie)) {
 		list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
-					     rcd_list) {
+					 rcd_list) {
 			if (LNetHandleIsInvalid(rcd->rcd_mdh))
 				list_move(&rcd->rcd_list, &head);
 		}
@@ -1131,7 +1173,7 @@
 
 		while (!list_empty(&head)) {
 			rcd = list_entry(head.next,
-					     lnet_rc_data_t, rcd_list);
+					 lnet_rc_data_t, rcd_list);
 			list_del_init(&rcd->rcd_list);
 			lnet_destroy_rc_data(rcd);
 		}
@@ -1151,6 +1193,33 @@
 	lnet_net_unlock(LNET_LOCK_EX);
 }
 
+/*
+ * This function is called to check if the RC should block indefinitely.
+ * It's called from lnet_router_checker() as well as being passed to
+ * wait_event_interruptible() to avoid the lost wake_up problem.
+ *
+ * When it's called from wait_event_interruptible() it is necessary to
+ * also not sleep if the rc state is not running to avoid a deadlock
+ * when the system is shutting down
+ */
+static inline bool
+lnet_router_checker_active(void)
+{
+	if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING)
+		return true;
+
+	/*
+	 * Router Checker thread needs to run when routing is enabled in
+	 * order to call lnet_update_ni_status_locked()
+	 */
+	if (the_lnet.ln_routing)
+		return true;
+
+	return !list_empty(&the_lnet.ln_routers) &&
+		(live_router_check_interval > 0 ||
+		 dead_router_check_interval > 0);
+}
+
 static int
 lnet_router_checker(void *arg)
 {
@@ -1159,8 +1228,6 @@
 
 	cfs_block_allsigs();
 
-	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-
 	while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
 		__u64 version;
 		int cpt;
@@ -1199,15 +1266,25 @@
 
 		lnet_prune_rc_data(0); /* don't wait for UNLINK */
 
-		/* Call schedule_timeout() here always adds 1 to load average
+		/*
+		 * Call schedule_timeout() here always adds 1 to load average
 		 * because kernel counts # active tasks as nr_running
-		 * + nr_uninterruptible. */
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(cfs_time_seconds(1));
+		 * + nr_uninterruptible.
+		 */
+		/*
+		 * if there are any routes then wakeup every second.  If
+		 * there are no routes then sleep indefinitely until woken
+		 * up by a user adding a route
+		 */
+		if (!lnet_router_checker_active())
+			wait_event_interruptible(the_lnet.ln_rc_waitq,
+						 lnet_router_checker_active());
+		else
+			wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
+							 false,
+							 cfs_time_seconds(1));
 	}
 
-	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
-
 	lnet_prune_rc_data(1); /* wait for UNLINK */
 
 	the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1216,7 +1293,7 @@
 	return 0;
 }
 
-static void
+void
 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
 {
 	int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
@@ -1237,7 +1314,7 @@
 	int i;
 
 	LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
-	if (rb == NULL)
+	if (!rb)
 		return NULL;
 
 	rb->rb_pool = rbp;
@@ -1246,7 +1323,7 @@
 		page = alloc_pages_node(
 				cfs_cpt_spread_node(lnet_cpt_table(), cpt),
 				GFP_KERNEL | __GFP_ZERO, 0);
-		if (page == NULL) {
+		if (!page) {
 			while (--i >= 0)
 				__free_page(rb->rb_kiov[i].kiov_page);
 
@@ -1263,66 +1340,119 @@
 }
 
 static void
-lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
+lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
 {
 	int npages = rbp->rbp_npages;
-	int nbuffers = 0;
+	struct list_head tmp;
 	lnet_rtrbuf_t *rb;
 
-	if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
+	if (!rbp->rbp_nbuffers) /* not initialized or already freed */
 		return;
 
-	LASSERT(list_empty(&rbp->rbp_msgs));
-	LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers);
+	INIT_LIST_HEAD(&tmp);
 
-	while (!list_empty(&rbp->rbp_bufs)) {
-		LASSERT(rbp->rbp_credits > 0);
+	lnet_net_lock(cpt);
+	lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
+	list_splice_init(&rbp->rbp_bufs, &tmp);
+	rbp->rbp_req_nbuffers = 0;
+	rbp->rbp_nbuffers = 0;
+	rbp->rbp_credits = 0;
+	rbp->rbp_mincredits = 0;
+	lnet_net_unlock(cpt);
 
-		rb = list_entry(rbp->rbp_bufs.next,
-				    lnet_rtrbuf_t, rb_list);
+	/* Free buffers on the free list. */
+	while (!list_empty(&tmp)) {
+		rb = list_entry(tmp.next, lnet_rtrbuf_t, rb_list);
 		list_del(&rb->rb_list);
 		lnet_destroy_rtrbuf(rb, npages);
-		nbuffers++;
 	}
-
-	LASSERT(rbp->rbp_nbuffers == nbuffers);
-	LASSERT(rbp->rbp_credits == nbuffers);
-
-	rbp->rbp_nbuffers = rbp->rbp_credits = 0;
 }
 
 static int
-lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
+lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
 {
+	struct list_head rb_list;
 	lnet_rtrbuf_t *rb;
-	int i;
+	int num_rb;
+	int num_buffers = 0;
+	int old_req_nbufs;
+	int npages = rbp->rbp_npages;
 
-	if (rbp->rbp_nbuffers != 0) {
-		LASSERT(rbp->rbp_nbuffers == nbufs);
+	lnet_net_lock(cpt);
+	/*
+	 * If we are called for less buffers than already in the pool, we
+	 * just lower the req_nbuffers number and excess buffers will be
+	 * thrown away as they are returned to the free list.  Credits
+	 * then get adjusted as well.
+	 * If we already have enough buffers allocated to serve the
+	 * increase requested, then we can treat that the same way as we
+	 * do the decrease.
+	 */
+	num_rb = nbufs - rbp->rbp_nbuffers;
+	if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
+		rbp->rbp_req_nbuffers = nbufs;
+		lnet_net_unlock(cpt);
 		return 0;
 	}
+	/*
+	 * store the older value of rbp_req_nbuffers and then set it to
+	 * the new request to prevent lnet_return_rx_credits_locked() from
+	 * freeing buffers that we need to keep around
+	 */
+	old_req_nbufs = rbp->rbp_req_nbuffers;
+	rbp->rbp_req_nbuffers = nbufs;
+	lnet_net_unlock(cpt);
 
-	for (i = 0; i < nbufs; i++) {
+	INIT_LIST_HEAD(&rb_list);
+
+	/*
+	 * allocate the buffers on a local list first.  If all buffers are
+	 * allocated successfully then join this list to the rbp buffer
+	 * list. If not then free all allocated buffers.
+	 */
+	while (num_rb-- > 0) {
 		rb = lnet_new_rtrbuf(rbp, cpt);
+		if (!rb) {
+			CERROR("Failed to allocate %d route bufs of %d pages\n",
+			       nbufs, npages);
 
-		if (rb == NULL) {
-			CERROR("Failed to allocate %d router bufs of %d pages\n",
-			       nbufs, rbp->rbp_npages);
-			return -ENOMEM;
+			lnet_net_lock(cpt);
+			rbp->rbp_req_nbuffers = old_req_nbufs;
+			lnet_net_unlock(cpt);
+
+			goto failed;
 		}
 
-		rbp->rbp_nbuffers++;
-		rbp->rbp_credits++;
-		rbp->rbp_mincredits++;
-		list_add(&rb->rb_list, &rbp->rbp_bufs);
-
-		/* No allocation "under fire" */
-		/* Otherwise we'd need code to schedule blocked msgs etc */
-		LASSERT(!the_lnet.ln_routing);
+		list_add(&rb->rb_list, &rb_list);
+		num_buffers++;
 	}
 
-	LASSERT(rbp->rbp_credits == nbufs);
+	lnet_net_lock(cpt);
+
+	list_splice_tail(&rb_list, &rbp->rbp_bufs);
+	rbp->rbp_nbuffers += num_buffers;
+	rbp->rbp_credits += num_buffers;
+	rbp->rbp_mincredits = rbp->rbp_credits;
+	/*
+	 * We need to schedule blocked msg using the newly
+	 * added buffers.
+	 */
+	while (!list_empty(&rbp->rbp_bufs) &&
+	       !list_empty(&rbp->rbp_msgs))
+		lnet_schedule_blocked_locked(rbp);
+
+	lnet_net_unlock(cpt);
+
 	return 0;
+
+failed:
+	while (!list_empty(&rb_list)) {
+		rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
+		list_del(&rb->rb_list);
+		lnet_destroy_rtrbuf(rb, npages);
+	}
+
+	return -ENOMEM;
 }
 
 static void
@@ -1337,26 +1467,28 @@
 }
 
 void
-lnet_rtrpools_free(void)
+lnet_rtrpools_free(int keep_pools)
 {
 	lnet_rtrbufpool_t *rtrp;
 	int i;
 
-	if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
+	if (!the_lnet.ln_rtrpools) /* uninitialized or freed */
 		return;
 
 	cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
-		lnet_rtrpool_free_bufs(&rtrp[0]);
-		lnet_rtrpool_free_bufs(&rtrp[1]);
-		lnet_rtrpool_free_bufs(&rtrp[2]);
+		lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
+		lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
+		lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
 	}
 
-	cfs_percpt_free(the_lnet.ln_rtrpools);
-	the_lnet.ln_rtrpools = NULL;
+	if (!keep_pools) {
+		cfs_percpt_free(the_lnet.ln_rtrpools);
+		the_lnet.ln_rtrpools = NULL;
+	}
 }
 
 static int
-lnet_nrb_tiny_calculate(int npages)
+lnet_nrb_tiny_calculate(void)
 {
 	int nrbs = LNET_NRB_TINY;
 
@@ -1375,7 +1507,7 @@
 }
 
 static int
-lnet_nrb_small_calculate(int npages)
+lnet_nrb_small_calculate(void)
 {
 	int nrbs = LNET_NRB_SMALL;
 
@@ -1394,7 +1526,7 @@
 }
 
 static int
-lnet_nrb_large_calculate(int npages)
+lnet_nrb_large_calculate(void)
 {
 	int nrbs = LNET_NRB_LARGE;
 
@@ -1416,16 +1548,12 @@
 lnet_rtrpools_alloc(int im_a_router)
 {
 	lnet_rtrbufpool_t *rtrp;
-	int large_pages;
-	int small_pages = 1;
 	int nrb_tiny;
 	int nrb_small;
 	int nrb_large;
 	int rc;
 	int i;
 
-	large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
 	if (!strcmp(forwarding, "")) {
 		/* not set either way */
 		if (!im_a_router)
@@ -1440,41 +1568,46 @@
 		return -EINVAL;
 	}
 
-	nrb_tiny = lnet_nrb_tiny_calculate(0);
+	nrb_tiny = lnet_nrb_tiny_calculate();
 	if (nrb_tiny < 0)
 		return -EINVAL;
 
-	nrb_small = lnet_nrb_small_calculate(small_pages);
+	nrb_small = lnet_nrb_small_calculate();
 	if (nrb_small < 0)
 		return -EINVAL;
 
-	nrb_large = lnet_nrb_large_calculate(large_pages);
+	nrb_large = lnet_nrb_large_calculate();
 	if (nrb_large < 0)
 		return -EINVAL;
 
 	the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
 						LNET_NRBPOOLS *
 						sizeof(lnet_rtrbufpool_t));
-	if (the_lnet.ln_rtrpools == NULL) {
+	if (!the_lnet.ln_rtrpools) {
 		LCONSOLE_ERROR_MSG(0x10c,
 				   "Failed to initialize router buffe pool\n");
 		return -ENOMEM;
 	}
 
 	cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
-		lnet_rtrpool_init(&rtrp[0], 0);
-		rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
-		if (rc != 0)
+		lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
+		rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
+					      nrb_tiny, i);
+		if (rc)
 			goto failed;
 
-		lnet_rtrpool_init(&rtrp[1], small_pages);
-		rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
-		if (rc != 0)
+		lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
+				  LNET_NRB_SMALL_PAGES);
+		rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
+					      nrb_small, i);
+		if (rc)
 			goto failed;
 
-		lnet_rtrpool_init(&rtrp[2], large_pages);
-		rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
-		if (rc != 0)
+		lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
+				  LNET_NRB_LARGE_PAGES);
+		rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
+					      nrb_large, i);
+		if (rc)
 			goto failed;
 	}
 
@@ -1485,10 +1618,118 @@
 	return 0;
 
  failed:
-	lnet_rtrpools_free();
+	lnet_rtrpools_free(0);
 	return rc;
 }
 
+static int
+lnet_rtrpools_adjust_helper(int tiny, int small, int large)
+{
+	int nrb = 0;
+	int rc = 0;
+	int i;
+	lnet_rtrbufpool_t *rtrp;
+
+	/*
+	 * If the provided values for each buffer pool are different than the
+	 * configured values, we need to take action.
+	 */
+	if (tiny >= 0) {
+		tiny_router_buffers = tiny;
+		nrb = lnet_nrb_tiny_calculate();
+		cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+			rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
+						      nrb, i);
+			if (rc)
+				return rc;
+		}
+	}
+	if (small >= 0) {
+		small_router_buffers = small;
+		nrb = lnet_nrb_small_calculate();
+		cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+			rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
+						      nrb, i);
+			if (rc)
+				return rc;
+		}
+	}
+	if (large >= 0) {
+		large_router_buffers = large;
+		nrb = lnet_nrb_large_calculate();
+		cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+			rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
+						      nrb, i);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return 0;
+}
+
+int
+lnet_rtrpools_adjust(int tiny, int small, int large)
+{
+	/*
+	 * this function doesn't revert the changes if adding new buffers
+	 * failed.  It's up to the user space caller to revert the
+	 * changes.
+	 */
+	if (!the_lnet.ln_routing)
+		return 0;
+
+	return lnet_rtrpools_adjust_helper(tiny, small, large);
+}
+
+int
+lnet_rtrpools_enable(void)
+{
+	int rc;
+
+	if (the_lnet.ln_routing)
+		return 0;
+
+	if (!the_lnet.ln_rtrpools)
+		/*
+		 * If routing is turned off, and we have never
+		 * initialized the pools before, just call the
+		 * standard buffer pool allocation routine as
+		 * if we are just configuring this for the first
+		 * time.
+		 */
+		return lnet_rtrpools_alloc(1);
+
+	rc = lnet_rtrpools_adjust_helper(0, 0, 0);
+	if (rc)
+		return rc;
+
+	lnet_net_lock(LNET_LOCK_EX);
+	the_lnet.ln_routing = 1;
+
+	the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
+	lnet_net_unlock(LNET_LOCK_EX);
+
+	return 0;
+}
+
+void
+lnet_rtrpools_disable(void)
+{
+	if (!the_lnet.ln_routing)
+		return;
+
+	lnet_net_lock(LNET_LOCK_EX);
+	the_lnet.ln_routing = 0;
+	the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+
+	tiny_router_buffers = 0;
+	small_router_buffers = 0;
+	large_router_buffers = 0;
+	lnet_net_unlock(LNET_LOCK_EX);
+	lnet_rtrpools_free(1);
+}
+
 int
 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 {
@@ -1499,28 +1740,28 @@
 	LASSERT(!in_interrupt());
 
 	CDEBUG(D_NET, "%s notifying %s: %s\n",
-		(ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
-		libcfs_nid2str(nid),
-		alive ? "up" : "down");
+	       !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
+	       libcfs_nid2str(nid),
+	       alive ? "up" : "down");
 
-	if (ni != NULL &&
+	if (ni &&
 	    LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
 		CWARN("Ignoring notification of %s %s by %s (different net)\n",
-			libcfs_nid2str(nid), alive ? "birth" : "death",
-			libcfs_nid2str(ni->ni_nid));
+		      libcfs_nid2str(nid), alive ? "birth" : "death",
+		      libcfs_nid2str(ni->ni_nid));
 		return -EINVAL;
 	}
 
 	/* can't do predictions... */
 	if (cfs_time_after(when, now)) {
 		CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
-		      (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+		      !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
 		      libcfs_nid2str(nid), alive ? "up" : "down",
 		      cfs_duration_sec(cfs_time_sub(when, now)));
 		return -EINVAL;
 	}
 
-	if (ni != NULL && !alive &&	     /* LND telling me she's down */
+	if (ni && !alive &&	     /* LND telling me she's down */
 	    !auto_down) {		       /* auto-down disabled */
 		CDEBUG(D_NET, "Auto-down disabled\n");
 		return 0;
@@ -1534,21 +1775,23 @@
 	}
 
 	lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
-	if (lp == NULL) {
+	if (!lp) {
 		/* nid not found */
 		lnet_net_unlock(cpt);
 		CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
 		return 0;
 	}
 
-	/* We can't fully trust LND on reporting exact peer last_alive
+	/*
+	 * We can't fully trust LND on reporting exact peer last_alive
 	 * if he notifies us about dead peer. For example ksocklnd can
 	 * call us with when == _time_when_the_node_was_booted_ if
-	 * no connections were successfully established */
-	if (ni != NULL && !alive && when < lp->lp_last_alive)
+	 * no connections were successfully established
+	 */
+	if (ni && !alive && when < lp->lp_last_alive)
 		when = lp->lp_last_alive;
 
-	lnet_notify_locked(lp, ni == NULL, alive, when);
+	lnet_notify_locked(lp, !ni, alive, when);
 
 	lnet_ni_notify_locked(ni, lp);
 
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 396c7c4..fc643df 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -15,18 +15,16 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Portals; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
 #include "../../include/linux/libcfs/libcfs.h"
 #include "../../include/linux/lnet/lib-lnet.h"
 
-/* This is really lnet_proc.c. You might need to update sanity test 215
- * if any file format is changed. */
+/*
+ * This is really lnet_proc.c. You might need to update sanity test 215
+ * if any file format is changed.
+ */
 
 #define LNET_LOFFT_BITS		(sizeof(loff_t) * 8)
 /*
@@ -76,9 +74,10 @@
 #define LNET_PROC_VERSION(v)	((unsigned int)((v) & LNET_PROC_VER_MASK))
 
 static int proc_call_handler(void *data, int write, loff_t *ppos,
-		void __user *buffer, size_t *lenp,
-		int (*handler)(void *data, int write,
-		loff_t pos, void __user *buffer, int len))
+			     void __user *buffer, size_t *lenp,
+			     int (*handler)(void *data, int write,
+					    loff_t pos, void __user *buffer,
+					    int len))
 {
 	int rc = handler(data, write, *ppos, buffer, *lenp);
 
@@ -111,11 +110,11 @@
 	/* read */
 
 	LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
-	if (ctrs == NULL)
+	if (!ctrs)
 		return -ENOMEM;
 
 	LIBCFS_ALLOC(tmpstr, tmpsiz);
-	if (tmpstr == NULL) {
+	if (!tmpstr) {
 		LIBCFS_FREE(ctrs, sizeof(*ctrs));
 		return -ENOMEM;
 	}
@@ -167,16 +166,16 @@
 
 	LASSERT(!write);
 
-	if (*lenp == 0)
+	if (!*lenp)
 		return 0;
 
 	LIBCFS_ALLOC(tmpstr, tmpsiz);
-	if (tmpstr == NULL)
+	if (!tmpstr)
 		return -ENOMEM;
 
 	s = tmpstr; /* points to current position in tmpstr[] */
 
-	if (*ppos == 0) {
+	if (!*ppos) {
 		s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
 			      the_lnet.ln_routing ? "enabled" : "disabled");
 		LASSERT(tmpstr + tmpsiz - s > 0);
@@ -206,23 +205,22 @@
 			return -ESTALE;
 		}
 
-		for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL;
-		     i++) {
+		for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) {
 			rn_list = &the_lnet.ln_remote_nets_hash[i];
 
 			n = rn_list->next;
 
-			while (n != rn_list && route == NULL) {
+			while (n != rn_list && !route) {
 				rnet = list_entry(n, lnet_remotenet_t,
-						      lrn_list);
+						  lrn_list);
 
 				r = rnet->lrn_routes.next;
 
 				while (r != &rnet->lrn_routes) {
 					lnet_route_t *re =
 						list_entry(r, lnet_route_t,
-							       lr_list);
-					if (skip == 0) {
+							   lr_list);
+					if (!skip) {
 						route = re;
 						break;
 					}
@@ -235,12 +233,12 @@
 			}
 		}
 
-		if (route != NULL) {
+		if (route) {
 			__u32 net = rnet->lrn_net;
 			unsigned int hops = route->lr_hops;
 			unsigned int priority = route->lr_priority;
 			lnet_nid_t nid = route->lr_gateway->lp_nid;
-			int alive = route->lr_gateway->lp_alive;
+			int alive = lnet_is_route_alive(route);
 
 			s += snprintf(s, tmpstr + tmpsiz - s,
 				      "%-8s %4u %8u %7s %s\n",
@@ -259,9 +257,9 @@
 	if (len > *lenp) {    /* linux-supplied buffer is too small */
 		rc = -EINVAL;
 	} else if (len > 0) { /* wrote something */
-		if (copy_to_user(buffer, tmpstr, len))
+		if (copy_to_user(buffer, tmpstr, len)) {
 			rc = -EFAULT;
-		else {
+		} else {
 			off += 1;
 			*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
 		}
@@ -269,7 +267,7 @@
 
 	LIBCFS_FREE(tmpstr, tmpsiz);
 
-	if (rc == 0)
+	if (!rc)
 		*lenp = len;
 
 	return rc;
@@ -291,16 +289,16 @@
 
 	LASSERT(!write);
 
-	if (*lenp == 0)
+	if (!*lenp)
 		return 0;
 
 	LIBCFS_ALLOC(tmpstr, tmpsiz);
-	if (tmpstr == NULL)
+	if (!tmpstr)
 		return -ENOMEM;
 
 	s = tmpstr; /* points to current position in tmpstr[] */
 
-	if (*ppos == 0) {
+	if (!*ppos) {
 		s += snprintf(s, tmpstr + tmpsiz - s,
 			      "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
 			      "ref", "rtr_ref", "alive_cnt", "state",
@@ -330,9 +328,9 @@
 
 		while (r != &the_lnet.ln_routers) {
 			lnet_peer_t *lp = list_entry(r, lnet_peer_t,
-							 lp_rtr_list);
+						     lp_rtr_list);
 
-			if (skip == 0) {
+			if (!skip) {
 				peer = lp;
 				break;
 			}
@@ -341,7 +339,7 @@
 			r = r->next;
 		}
 
-		if (peer != NULL) {
+		if (peer) {
 			lnet_nid_t nid = peer->lp_nid;
 			unsigned long now = cfs_time_current();
 			unsigned long deadline = peer->lp_ping_deadline;
@@ -356,19 +354,21 @@
 			lnet_route_t *rtr;
 
 			if ((peer->lp_ping_feats &
-			     LNET_PING_FEAT_NI_STATUS) != 0) {
+			     LNET_PING_FEAT_NI_STATUS)) {
 				list_for_each_entry(rtr, &peer->lp_routes,
-							lr_gwlist) {
-					/* downis on any route should be the
-					 * number of downis on the gateway */
-					if (rtr->lr_downis != 0) {
+						    lr_gwlist) {
+					/*
+					 * downis on any route should be the
+					 * number of downis on the gateway
+					 */
+					if (rtr->lr_downis) {
 						down_ni = rtr->lr_downis;
 						break;
 					}
 				}
 			}
 
-			if (deadline == 0)
+			if (!deadline)
 				s += snprintf(s, tmpstr + tmpsiz - s,
 					      "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
 					      nrefs, nrtrrefs, alive_cnt,
@@ -394,9 +394,9 @@
 	if (len > *lenp) {    /* linux-supplied buffer is too small */
 		rc = -EINVAL;
 	} else if (len > 0) { /* wrote something */
-		if (copy_to_user(buffer, tmpstr, len))
+		if (copy_to_user(buffer, tmpstr, len)) {
 			rc = -EFAULT;
-		else {
+		} else {
 			off += 1;
 			*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
 		}
@@ -404,7 +404,7 @@
 
 	LIBCFS_FREE(tmpstr, tmpsiz);
 
-	if (rc == 0)
+	if (!rc)
 		*lenp = len;
 
 	return rc;
@@ -427,7 +427,7 @@
 	CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
 	LASSERT(!write);
 
-	if (*lenp == 0)
+	if (!*lenp)
 		return 0;
 
 	if (cpt >= LNET_CPT_NUMBER) {
@@ -436,12 +436,12 @@
 	}
 
 	LIBCFS_ALLOC(tmpstr, tmpsiz);
-	if (tmpstr == NULL)
+	if (!tmpstr)
 		return -ENOMEM;
 
 	s = tmpstr; /* points to current position in tmpstr[] */
 
-	if (*ppos == 0) {
+	if (!*ppos) {
 		s += snprintf(s, tmpstr + tmpsiz - s,
 			      "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
 			      "nid", "refs", "state", "last", "max",
@@ -470,18 +470,20 @@
 		}
 
 		while (hash < LNET_PEER_HASH_SIZE) {
-			if (p == NULL)
+			if (!p)
 				p = ptable->pt_hash[hash].next;
 
 			while (p != &ptable->pt_hash[hash]) {
 				lnet_peer_t *lp = list_entry(p, lnet_peer_t,
-								 lp_hashlist);
-				if (skip == 0) {
+							     lp_hashlist);
+				if (!skip) {
 					peer = lp;
 
-					/* minor optimization: start from idx+1
+					/*
+					 * minor optimization: start from idx+1
 					 * on next iteration if we've just
-					 * drained lp_hashlist */
+					 * drained lp_hashlist
+					 */
 					if (lp->lp_hashlist.next ==
 					    &ptable->pt_hash[hash]) {
 						hoff = 1;
@@ -497,7 +499,7 @@
 				p = lp->lp_hashlist.next;
 			}
 
-			if (peer != NULL)
+			if (peer)
 				break;
 
 			p = NULL;
@@ -505,7 +507,7 @@
 			hash++;
 		}
 
-		if (peer != NULL) {
+		if (peer) {
 			lnet_nid_t nid = peer->lp_nid;
 			int nrefs = peer->lp_refcount;
 			int lastalive = -1;
@@ -553,7 +555,7 @@
 			cpt++;
 			hash = 0;
 			hoff = 1;
-			if (peer == NULL && cpt < LNET_CPT_NUMBER)
+			if (!peer && cpt < LNET_CPT_NUMBER)
 				goto again;
 		}
 	}
@@ -571,7 +573,7 @@
 
 	LIBCFS_FREE(tmpstr, tmpsiz);
 
-	if (rc == 0)
+	if (!rc)
 		*lenp = len;
 
 	return rc;
@@ -593,7 +595,7 @@
 	/* (4 %d) * 4 * LNET_CPT_NUMBER */
 	tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
 	LIBCFS_ALLOC(tmpstr, tmpsiz);
-	if (tmpstr == NULL)
+	if (!tmpstr)
 		return -ENOMEM;
 
 	s = tmpstr; /* points to current position in tmpstr[] */
@@ -603,7 +605,7 @@
 		      "pages", "count", "credits", "min");
 	LASSERT(tmpstr + tmpsiz - s > 0);
 
-	if (the_lnet.ln_rtrpools == NULL)
+	if (!the_lnet.ln_rtrpools)
 		goto out; /* I'm not a router */
 
 	for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
@@ -653,16 +655,16 @@
 
 	LASSERT(!write);
 
-	if (*lenp == 0)
+	if (!*lenp)
 		return 0;
 
 	LIBCFS_ALLOC(tmpstr, tmpsiz);
-	if (tmpstr == NULL)
+	if (!tmpstr)
 		return -ENOMEM;
 
 	s = tmpstr; /* points to current position in tmpstr[] */
 
-	if (*ppos == 0) {
+	if (!*ppos) {
 		s += snprintf(s, tmpstr + tmpsiz - s,
 			      "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
 			      "nid", "status", "alive", "refs", "peer",
@@ -680,7 +682,7 @@
 		while (n != &the_lnet.ln_nis) {
 			lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
 
-			if (skip == 0) {
+			if (!skip) {
 				ni = a_ni;
 				break;
 			}
@@ -689,7 +691,7 @@
 			n = n->next;
 		}
 
-		if (ni != NULL) {
+		if (ni) {
 			struct lnet_tx_queue *tq;
 			char *stat;
 			time64_t now = ktime_get_real_seconds();
@@ -705,15 +707,17 @@
 				last_alive = 0;
 
 			lnet_ni_lock(ni);
-			LASSERT(ni->ni_status != NULL);
+			LASSERT(ni->ni_status);
 			stat = (ni->ni_status->ns_status ==
 				LNET_NI_STATUS_UP) ? "up" : "down";
 			lnet_ni_unlock(ni);
 
-			/* we actually output credits information for
-			 * TX queue of each partition */
+			/*
+			 * we actually output credits information for
+			 * TX queue of each partition
+			 */
 			cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
-				for (j = 0; ni->ni_cpts != NULL &&
+				for (j = 0; ni->ni_cpts &&
 				     j < ni->ni_ncpts; j++) {
 					if (i == ni->ni_cpts[j])
 						break;
@@ -722,18 +726,19 @@
 				if (j == ni->ni_ncpts)
 					continue;
 
-				if (i != 0)
+				if (i)
 					lnet_net_lock(i);
 
 				s += snprintf(s, tmpstr + tmpsiz - s,
-				      "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
-				      libcfs_nid2str(ni->ni_nid), stat,
-				      last_alive, *ni->ni_refs[i],
-				      ni->ni_peertxcredits,
-				      ni->ni_peerrtrcredits,
-				      tq->tq_credits_max,
-				      tq->tq_credits, tq->tq_credits_min);
-				if (i != 0)
+					      "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
+					      libcfs_nid2str(ni->ni_nid), stat,
+					      last_alive, *ni->ni_refs[i],
+					      ni->ni_peertxcredits,
+					      ni->ni_peerrtrcredits,
+					      tq->tq_credits_max,
+					      tq->tq_credits,
+					      tq->tq_credits_min);
+				if (i)
 					lnet_net_unlock(i);
 			}
 			LASSERT(tmpstr + tmpsiz - s > 0);
@@ -755,7 +760,7 @@
 
 	LIBCFS_FREE(tmpstr, tmpsiz);
 
-	if (rc == 0)
+	if (!rc)
 		*lenp = len;
 
 	return rc;
@@ -795,8 +800,6 @@
 	},
 };
 
-extern int portal_rotor;
-
 static int __proc_lnet_portal_rotor(void *data, int write,
 				    loff_t pos, void __user *buffer, int nob)
 {
@@ -807,7 +810,7 @@
 	int i;
 
 	LIBCFS_ALLOC(buf, buf_len);
-	if (buf == NULL)
+	if (!buf)
 		return -ENOMEM;
 
 	if (!write) {
@@ -831,7 +834,7 @@
 			rc = 0;
 		} else {
 			rc = cfs_trace_copyout_string(buffer, nob,
-					buf + pos, "\n");
+						      buf + pos, "\n");
 		}
 		goto out;
 	}
@@ -844,9 +847,9 @@
 
 	rc = -EINVAL;
 	lnet_res_lock(0);
-	for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
-		if (strncasecmp(portal_rotors[i].pr_name, tmp,
-				strlen(portal_rotors[i].pr_name)) == 0) {
+	for (i = 0; portal_rotors[i].pr_name; i++) {
+		if (!strncasecmp(portal_rotors[i].pr_name, tmp,
+				 strlen(portal_rotors[i].pr_name))) {
 			portal_rotor = portal_rotors[i].pr_value;
 			rc = 0;
 			break;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index 1f04cc1..b71f4b4 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -58,7 +58,7 @@
 
 	list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
 		bulk = tsu->tsu_private;
-		if (bulk == NULL)
+		if (!bulk)
 			continue;
 
 		srpc_free_bulk(bulk);
@@ -77,25 +77,29 @@
 	srpc_bulk_t	 *bulk;
 	sfw_test_unit_t	 *tsu;
 
-	LASSERT(sn != NULL);
+	LASSERT(sn);
 	LASSERT(tsi->tsi_is_client);
 
-	if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+	if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
 		test_bulk_req_t  *breq = &tsi->tsi_u.bulk_v0;
 
 		opc   = breq->blk_opc;
 		flags = breq->blk_flags;
 		npg   = breq->blk_npg;
-		/* NB: this is not going to work for variable page size,
-		 * but we have to keep it for compatibility */
+		/*
+		 * NB: this is not going to work for variable page size,
+		 * but we have to keep it for compatibility
+		 */
 		len   = npg * PAGE_CACHE_SIZE;
 
 	} else {
 		test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
 
-		/* I should never get this step if it's unknown feature
-		 * because make_session will reject unknown feature */
-		LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+		/*
+		 * I should never get this step if it's unknown feature
+		 * because make_session will reject unknown feature
+		 */
+		LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
 
 		opc   = breq->blk_opc;
 		flags = breq->blk_flags;
@@ -116,7 +120,7 @@
 	list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
 		bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
 				       npg, len, opc == LST_BRW_READ);
-		if (bulk == NULL) {
+		if (!bulk) {
 			brw_client_fini(tsi);
 			return -ENOMEM;
 		}
@@ -141,7 +145,7 @@
 
 	ktime_get_ts64(&ts);
 
-	if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0)
+	if (!((ts.tv_nsec / NSEC_PER_USEC) & 1))
 		return 0;
 
 	return brw_inject_errors--;
@@ -153,7 +157,7 @@
 	char *addr = page_address(pg);
 	int   i;
 
-	LASSERT(addr != NULL);
+	LASSERT(addr);
 
 	if (pattern == LST_BRW_CHECK_NONE)
 		return;
@@ -184,7 +188,7 @@
 	__u64  data = 0; /* make compiler happy */
 	int    i;
 
-	LASSERT(addr != NULL);
+	LASSERT(addr);
 
 	if (pattern == LST_BRW_CHECK_NONE)
 		return 0;
@@ -216,7 +220,7 @@
 
 bad_data:
 	CERROR("Bad data in page %p: %#llx, %#llx expected\n",
-		pg, data, magic);
+	       pg, data, magic);
 	return 1;
 }
 
@@ -240,9 +244,9 @@
 
 	for (i = 0; i < bk->bk_niov; i++) {
 		pg = bk->bk_iovs[i].kiov_page;
-		if (brw_check_page(pg, pattern, magic) != 0) {
+		if (brw_check_page(pg, pattern, magic)) {
 			CERROR("Bulk page %p (%d/%d) is corrupted!\n",
-				pg, i, bk->bk_niov);
+			       pg, i, bk->bk_niov);
 			return 1;
 		}
 	}
@@ -252,7 +256,7 @@
 
 static int
 brw_client_prep_rpc(sfw_test_unit_t *tsu,
-		     lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
+		    lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
 {
 	srpc_bulk_t *bulk = tsu->tsu_private;
 	sfw_test_instance_t *tsi = tsu->tsu_instance;
@@ -265,10 +269,10 @@
 	int opc;
 	int rc;
 
-	LASSERT(sn != NULL);
-	LASSERT(bulk != NULL);
+	LASSERT(sn);
+	LASSERT(bulk);
 
-	if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+	if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
 		test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
 
 		opc   = breq->blk_opc;
@@ -279,9 +283,11 @@
 	} else {
 		test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
 
-		/* I should never get this step if it's unknown feature
-		 * because make_session will reject unknown feature */
-		LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+		/*
+		 * I should never get this step if it's unknown feature
+		 * because make_session will reject unknown feature
+		 */
+		LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
 
 		opc   = breq->blk_opc;
 		flags = breq->blk_flags;
@@ -290,7 +296,7 @@
 	}
 
 	rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
@@ -318,11 +324,11 @@
 	srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
 	srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
 
-	LASSERT(sn != NULL);
+	LASSERT(sn);
 
-	if (rpc->crpc_status != 0) {
+	if (rpc->crpc_status) {
 		CERROR("BRW RPC to %s failed with %d\n",
-			libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
+		       libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
 		if (!tsi->tsi_stopping) /* rpc could have been aborted */
 			atomic_inc(&sn->sn_brw_errors);
 		goto out;
@@ -334,10 +340,10 @@
 	}
 
 	CDEBUG(reply->brw_status ? D_WARNING : D_NET,
-		"BRW RPC to %s finished with brw_status: %d\n",
-		libcfs_id2str(rpc->crpc_dest), reply->brw_status);
+	       "BRW RPC to %s finished with brw_status: %d\n",
+	       libcfs_id2str(rpc->crpc_dest), reply->brw_status);
 
-	if (reply->brw_status != 0) {
+	if (reply->brw_status) {
 		atomic_inc(&sn->sn_brw_errors);
 		rpc->crpc_status = -(int)reply->brw_status;
 		goto out;
@@ -346,9 +352,9 @@
 	if (reqst->brw_rw == LST_BRW_WRITE)
 		goto out;
 
-	if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
+	if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) {
 		CERROR("Bulk data from %s is corrupted!\n",
-			libcfs_id2str(rpc->crpc_dest));
+		       libcfs_id2str(rpc->crpc_dest));
 		atomic_inc(&sn->sn_brw_errors);
 		rpc->crpc_status = -EBADMSG;
 	}
@@ -362,17 +368,17 @@
 {
 	srpc_bulk_t *blk = rpc->srpc_bulk;
 
-	if (blk == NULL)
+	if (!blk)
 		return;
 
-	if (rpc->srpc_status != 0)
+	if (rpc->srpc_status)
 		CERROR("Bulk transfer %s %s has failed: %d\n",
-			blk->bk_sink ? "from" : "to",
-			libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
+		       blk->bk_sink ? "from" : "to",
+		       libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
 	else
 		CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
-			blk->bk_niov, blk->bk_sink ? "from" : "to",
-			libcfs_id2str(rpc->srpc_peer));
+		       blk->bk_niov, blk->bk_sink ? "from" : "to",
+		       libcfs_id2str(rpc->srpc_peer));
 
 	sfw_free_pages(rpc);
 }
@@ -385,16 +391,16 @@
 	srpc_brw_reqst_t *reqst;
 	srpc_msg_t *reqstmsg;
 
-	LASSERT(rpc->srpc_bulk != NULL);
-	LASSERT(rpc->srpc_reqstbuf != NULL);
+	LASSERT(rpc->srpc_bulk);
+	LASSERT(rpc->srpc_reqstbuf);
 
 	reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
 	reqst = &reqstmsg->msg_body.brw_reqst;
 
-	if (status != 0) {
+	if (status) {
 		CERROR("BRW bulk %s failed for RPC from %s: %d\n",
-			reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
-			libcfs_id2str(rpc->srpc_peer), status);
+		       reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
+		       libcfs_id2str(rpc->srpc_peer), status);
 		return -EIO;
 	}
 
@@ -404,9 +410,9 @@
 	if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
 		__swab64s(&magic);
 
-	if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
+	if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) {
 		CERROR("Bulk data from %s is corrupted!\n",
-			libcfs_id2str(rpc->srpc_peer));
+		       libcfs_id2str(rpc->srpc_peer));
 		reply->brw_status = EBADMSG;
 	}
 
@@ -448,15 +454,15 @@
 		return 0;
 	}
 
-	if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+	if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
 		replymsg->msg_ses_feats = LST_FEATS_MASK;
 		reply->brw_status = EPROTO;
 		return 0;
 	}
 
-	if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+	if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
 		/* compat with old version */
-		if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) {
+		if (reqst->brw_len & ~CFS_PAGE_MASK) {
 			reply->brw_status = EINVAL;
 			return 0;
 		}
@@ -468,7 +474,7 @@
 
 	replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
 
-	if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) {
+	if (!reqst->brw_len || npg > LNET_MAX_IOV) {
 		reply->brw_status = EINVAL;
 		return 0;
 	}
@@ -476,7 +482,7 @@
 	rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg,
 			     reqst->brw_len,
 			     reqst->brw_rw == LST_BRW_WRITE);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	if (reqst->brw_rw == LST_BRW_READ)
@@ -499,7 +505,6 @@
 srpc_service_t brw_test_service;
 void brw_init_test_service(void)
 {
-
 	brw_test_service.sv_id         = SRPC_SERVICE_BRW;
 	brw_test_service.sv_name       = "brw_test";
 	brw_test_service.sv_handler    = brw_server_handle;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index a534665..90b7771 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -51,20 +51,19 @@
 	char *name;
 	int rc;
 
-	if (args->lstio_ses_idp   == NULL || /* address for output sid */
-	    args->lstio_ses_key   == 0 ||    /* no key is specified */
-	    args->lstio_ses_namep == NULL || /* session name */
+	if (!args->lstio_ses_idp || /* address for output sid */
+	    !args->lstio_ses_key ||    /* no key is specified */
+	    !args->lstio_ses_namep || /* session name */
 	    args->lstio_ses_nmlen <= 0 ||
 	    args->lstio_ses_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_ses_namep,
-			       args->lstio_ses_nmlen)) {
+	if (copy_from_user(name, args->lstio_ses_namep,
+			   args->lstio_ses_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
 		return -EFAULT;
 	}
@@ -96,11 +95,11 @@
 {
 	/* no checking of key */
 
-	if (args->lstio_ses_idp    == NULL || /* address for output sid */
-	    args->lstio_ses_keyp   == NULL || /* address for output key */
-	    args->lstio_ses_featp  == NULL || /* address for output features */
-	    args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */
-	    args->lstio_ses_namep  == NULL || /* address for output name */
+	if (!args->lstio_ses_idp || /* address for output sid */
+	    !args->lstio_ses_keyp || /* address for output key */
+	    !args->lstio_ses_featp || /* address for output features */
+	    !args->lstio_ses_ndinfo || /* address for output ndinfo */
+	    !args->lstio_ses_namep || /* address for output name */
 	    args->lstio_ses_nmlen  <= 0 ||
 	    args->lstio_ses_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
@@ -123,21 +122,21 @@
 	if (args->lstio_dbg_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_dbg_resultp == NULL)
+	if (!args->lstio_dbg_resultp)
 		return -EINVAL;
 
-	if (args->lstio_dbg_namep != NULL && /* name of batch/group */
+	if (args->lstio_dbg_namep && /* name of batch/group */
 	    (args->lstio_dbg_nmlen <= 0 ||
 	     args->lstio_dbg_nmlen > LST_NAME_SIZE))
 		return -EINVAL;
 
-	if (args->lstio_dbg_namep != NULL) {
+	if (args->lstio_dbg_namep) {
 		LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1);
-		if (name == NULL)
+		if (!name)
 			return -ENOMEM;
 
 		if (copy_from_user(name, args->lstio_dbg_namep,
-				       args->lstio_dbg_nmlen)) {
+				   args->lstio_dbg_nmlen)) {
 			LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
 
 			return -EFAULT;
@@ -157,7 +156,7 @@
 	case LST_OPC_BATCHSRV:
 		client = 0;
 	case LST_OPC_BATCHCLI:
-		if (name == NULL)
+		if (!name)
 			goto out;
 
 		rc = lstcon_batch_debug(args->lstio_dbg_timeout,
@@ -165,7 +164,7 @@
 		break;
 
 	case LST_OPC_GROUP:
-		if (name == NULL)
+		if (!name)
 			goto out;
 
 		rc = lstcon_group_debug(args->lstio_dbg_timeout,
@@ -174,7 +173,7 @@
 
 	case LST_OPC_NODES:
 		if (args->lstio_dbg_count <= 0 ||
-		    args->lstio_dbg_idsp == NULL)
+		    !args->lstio_dbg_idsp)
 			goto out;
 
 		rc = lstcon_nodes_debug(args->lstio_dbg_timeout,
@@ -188,7 +187,7 @@
 	}
 
 out:
-	if (name != NULL)
+	if (name)
 		LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
 
 	return rc;
@@ -203,18 +202,17 @@
 	if (args->lstio_grp_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_grp_namep == NULL ||
+	if (!args->lstio_grp_namep ||
 	    args->lstio_grp_nmlen <= 0 ||
 	    args->lstio_grp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_grp_namep,
-			       args->lstio_grp_nmlen)) {
+	if (copy_from_user(name, args->lstio_grp_namep,
+			   args->lstio_grp_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_grp_nmlen);
 		return -EFAULT;
 	}
@@ -237,18 +235,17 @@
 	if (args->lstio_grp_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_grp_namep == NULL ||
+	if (!args->lstio_grp_namep ||
 	    args->lstio_grp_nmlen <= 0 ||
 	    args->lstio_grp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_grp_namep,
-			       args->lstio_grp_nmlen)) {
+	if (copy_from_user(name, args->lstio_grp_namep,
+			   args->lstio_grp_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 		return -EFAULT;
 	}
@@ -271,14 +268,14 @@
 	if (args->lstio_grp_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_grp_resultp == NULL ||
-	    args->lstio_grp_namep == NULL ||
+	if (!args->lstio_grp_resultp ||
+	    !args->lstio_grp_namep ||
 	    args->lstio_grp_nmlen <= 0 ||
 	    args->lstio_grp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
 	if (copy_from_user(name,
@@ -301,7 +298,7 @@
 
 	case LST_GROUP_RMND:
 		if (args->lstio_grp_count  <= 0 ||
-		    args->lstio_grp_idsp == NULL) {
+		    !args->lstio_grp_idsp) {
 			rc = -EINVAL;
 			break;
 		}
@@ -330,21 +327,21 @@
 	if (args->lstio_grp_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_grp_idsp == NULL || /* array of ids */
+	if (!args->lstio_grp_idsp || /* array of ids */
 	    args->lstio_grp_count <= 0 ||
-	    args->lstio_grp_resultp == NULL ||
-	    args->lstio_grp_featp == NULL ||
-	    args->lstio_grp_namep == NULL ||
+	    !args->lstio_grp_resultp ||
+	    !args->lstio_grp_featp ||
+	    !args->lstio_grp_namep ||
 	    args->lstio_grp_nmlen <= 0 ||
 	    args->lstio_grp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
 	if (copy_from_user(name, args->lstio_grp_namep,
-			       args->lstio_grp_nmlen)) {
+			   args->lstio_grp_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 
 		return -EFAULT;
@@ -357,7 +354,7 @@
 			      args->lstio_grp_resultp);
 
 	LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-	if (rc == 0 &&
+	if (!rc &&
 	    copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
 		return -EINVAL;
 	}
@@ -372,7 +369,7 @@
 		return -EACCES;
 
 	if (args->lstio_grp_idx   < 0 ||
-	    args->lstio_grp_namep == NULL ||
+	    !args->lstio_grp_namep ||
 	    args->lstio_grp_nmlen <= 0 ||
 	    args->lstio_grp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
@@ -393,24 +390,24 @@
 	if (args->lstio_grp_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_grp_namep == NULL ||
+	if (!args->lstio_grp_namep ||
 	    args->lstio_grp_nmlen <= 0 ||
 	    args->lstio_grp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
-	if (args->lstio_grp_entp  == NULL && /* output: group entry */
-	    args->lstio_grp_dentsp == NULL)  /* output: node entry */
+	if (!args->lstio_grp_entp &&  /* output: group entry */
+	    !args->lstio_grp_dentsp)  /* output: node entry */
 		return -EINVAL;
 
-	if (args->lstio_grp_dentsp != NULL) { /* have node entry */
-		if (args->lstio_grp_idxp == NULL || /* node index */
-		    args->lstio_grp_ndentp == NULL) /* # of node entry */
+	if (args->lstio_grp_dentsp) { /* have node entry */
+		if (!args->lstio_grp_idxp || /* node index */
+		    !args->lstio_grp_ndentp) /* # of node entry */
 			return -EINVAL;
 
 		if (copy_from_user(&ndent, args->lstio_grp_ndentp,
-				       sizeof(ndent)) ||
+				   sizeof(ndent)) ||
 		    copy_from_user(&index, args->lstio_grp_idxp,
-				       sizeof(index)))
+				   sizeof(index)))
 			return -EFAULT;
 
 		if (ndent <= 0 || index < 0)
@@ -418,12 +415,11 @@
 	}
 
 	LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_grp_namep,
-			       args->lstio_grp_nmlen)) {
+	if (copy_from_user(name, args->lstio_grp_namep,
+			   args->lstio_grp_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 		return -EFAULT;
 	}
@@ -435,10 +431,10 @@
 
 	LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 
-	if (rc != 0)
+	if (rc)
 		return rc;
 
-	if (args->lstio_grp_dentsp != NULL &&
+	if (args->lstio_grp_dentsp &&
 	    (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
 	     copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
 		return -EFAULT;
@@ -455,18 +451,17 @@
 	if (args->lstio_bat_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_bat_namep == NULL ||
+	if (!args->lstio_bat_namep ||
 	    args->lstio_bat_nmlen <= 0 ||
 	    args->lstio_bat_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_bat_namep,
-			       args->lstio_bat_nmlen)) {
+	if (copy_from_user(name, args->lstio_bat_namep,
+			   args->lstio_bat_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 		return -EFAULT;
 	}
@@ -489,18 +484,17 @@
 	if (args->lstio_bat_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_bat_namep == NULL ||
+	if (!args->lstio_bat_namep ||
 	    args->lstio_bat_nmlen <= 0 ||
 	    args->lstio_bat_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_bat_namep,
-			       args->lstio_bat_nmlen)) {
+	if (copy_from_user(name, args->lstio_bat_namep,
+			   args->lstio_bat_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 		return -EFAULT;
 	}
@@ -524,19 +518,18 @@
 	if (args->lstio_bat_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_bat_resultp == NULL ||
-	    args->lstio_bat_namep == NULL ||
+	if (!args->lstio_bat_resultp ||
+	    !args->lstio_bat_namep ||
 	    args->lstio_bat_nmlen <= 0 ||
 	    args->lstio_bat_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_bat_namep,
-			       args->lstio_bat_nmlen)) {
+	if (copy_from_user(name, args->lstio_bat_namep,
+			   args->lstio_bat_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 		return -EFAULT;
 	}
@@ -560,8 +553,8 @@
 	if (args->lstio_bat_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_bat_resultp == NULL ||
-	    args->lstio_bat_namep == NULL ||
+	if (!args->lstio_bat_resultp ||
+	    !args->lstio_bat_namep ||
 	    args->lstio_bat_nmlen <= 0 ||
 	    args->lstio_bat_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
@@ -570,12 +563,11 @@
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_bat_namep,
-			       args->lstio_bat_nmlen)) {
+	if (copy_from_user(name, args->lstio_bat_namep,
+			   args->lstio_bat_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 		return -EFAULT;
 	}
@@ -600,7 +592,7 @@
 		return -EACCES;
 
 	if (args->lstio_bat_idx   < 0 ||
-	    args->lstio_bat_namep == NULL ||
+	    !args->lstio_bat_namep ||
 	    args->lstio_bat_nmlen <= 0 ||
 	    args->lstio_bat_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
@@ -621,24 +613,24 @@
 	if (args->lstio_bat_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_bat_namep == NULL || /* batch name */
+	if (!args->lstio_bat_namep || /* batch name */
 	    args->lstio_bat_nmlen <= 0 ||
 	    args->lstio_bat_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
-	if (args->lstio_bat_entp == NULL && /* output: batch entry */
-	    args->lstio_bat_dentsp == NULL) /* output: node entry */
+	if (!args->lstio_bat_entp && /* output: batch entry */
+	    !args->lstio_bat_dentsp) /* output: node entry */
 		return -EINVAL;
 
-	if (args->lstio_bat_dentsp != NULL) { /* have node entry */
-		if (args->lstio_bat_idxp == NULL || /* node index */
-		    args->lstio_bat_ndentp == NULL) /* # of node entry */
+	if (args->lstio_bat_dentsp) { /* have node entry */
+		if (!args->lstio_bat_idxp || /* node index */
+		    !args->lstio_bat_ndentp) /* # of node entry */
 			return -EINVAL;
 
 		if (copy_from_user(&index, args->lstio_bat_idxp,
-				       sizeof(index)) ||
+				   sizeof(index)) ||
 		    copy_from_user(&ndent, args->lstio_bat_ndentp,
-				       sizeof(ndent)))
+				   sizeof(ndent)))
 			return -EFAULT;
 
 		if (ndent <= 0 || index < 0)
@@ -646,28 +638,27 @@
 	}
 
 	LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
-	if (copy_from_user(name,
-			       args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+	if (copy_from_user(name, args->lstio_bat_namep,
+			   args->lstio_bat_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 		return -EFAULT;
 	}
 
 	name[args->lstio_bat_nmlen] = 0;
 
-	rc = lstcon_batch_info(name,
-			    args->lstio_bat_entp, args->lstio_bat_server,
-			    args->lstio_bat_testidx, &index, &ndent,
-			    args->lstio_bat_dentsp);
+	rc = lstcon_batch_info(name, args->lstio_bat_entp,
+			       args->lstio_bat_server, args->lstio_bat_testidx,
+			       &index, &ndent, args->lstio_bat_dentsp);
 
 	LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 
-	if (rc != 0)
+	if (rc)
 		return rc;
 
-	if (args->lstio_bat_dentsp != NULL &&
+	if (args->lstio_bat_dentsp &&
 	    (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
 	     copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
 		rc = -EFAULT;
@@ -685,28 +676,27 @@
 	if (args->lstio_sta_key != console_session.ses_key)
 		return -EACCES;
 
-	if (args->lstio_sta_resultp == NULL ||
-	    (args->lstio_sta_namep  == NULL &&
-	     args->lstio_sta_idsp   == NULL) ||
+	if (!args->lstio_sta_resultp ||
+	    (!args->lstio_sta_namep && !args->lstio_sta_idsp) ||
 	    args->lstio_sta_nmlen <= 0 ||
 	    args->lstio_sta_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
-	if (args->lstio_sta_idsp != NULL &&
+	if (args->lstio_sta_idsp &&
 	    args->lstio_sta_count <= 0)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1);
-	if (name == NULL)
+	if (!name)
 		return -ENOMEM;
 
 	if (copy_from_user(name, args->lstio_sta_namep,
-			       args->lstio_sta_nmlen)) {
+			   args->lstio_sta_nmlen)) {
 		LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
 		return -EFAULT;
 	}
 
-	if (args->lstio_sta_idsp == NULL) {
+	if (!args->lstio_sta_idsp) {
 		rc = lstcon_group_stat(name, args->lstio_sta_timeout,
 				       args->lstio_sta_resultp);
 	} else {
@@ -730,46 +720,46 @@
 	int		ret = 0;
 	int		rc = -ENOMEM;
 
-	if (args->lstio_tes_resultp == NULL ||
-	    args->lstio_tes_retp == NULL ||
-	    args->lstio_tes_bat_name == NULL || /* no specified batch */
+	if (!args->lstio_tes_resultp ||
+	    !args->lstio_tes_retp ||
+	    !args->lstio_tes_bat_name || /* no specified batch */
 	    args->lstio_tes_bat_nmlen <= 0 ||
 	    args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
-	    args->lstio_tes_sgrp_name == NULL || /* no source group */
+	    !args->lstio_tes_sgrp_name || /* no source group */
 	    args->lstio_tes_sgrp_nmlen <= 0 ||
 	    args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
-	    args->lstio_tes_dgrp_name == NULL || /* no target group */
+	    !args->lstio_tes_dgrp_name || /* no target group */
 	    args->lstio_tes_dgrp_nmlen <= 0 ||
 	    args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
 		return -EINVAL;
 
-	if (args->lstio_tes_loop == 0 || /* negative is infinite */
+	if (!args->lstio_tes_loop || /* negative is infinite */
 	    args->lstio_tes_concur <= 0 ||
 	    args->lstio_tes_dist <= 0 ||
 	    args->lstio_tes_span <= 0)
 		return -EINVAL;
 
 	/* have parameter, check if parameter length is valid */
-	if (args->lstio_tes_param != NULL &&
+	if (args->lstio_tes_param &&
 	    (args->lstio_tes_param_len <= 0 ||
 	     args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
 		return -EINVAL;
 
 	LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
-	if (batch_name == NULL)
+	if (!batch_name)
 		return rc;
 
 	LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1);
-	if (src_name == NULL)
+	if (!src_name)
 		goto out;
 
 	LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1);
-	 if (dst_name == NULL)
+	 if (!dst_name)
 		goto out;
 
-	if (args->lstio_tes_param != NULL) {
+	if (args->lstio_tes_param) {
 		LIBCFS_ALLOC(param, args->lstio_tes_param_len);
-		if (param == NULL)
+		if (!param)
 			goto out;
 	}
 
@@ -781,52 +771,55 @@
 	    copy_from_user(dst_name, args->lstio_tes_dgrp_name,
 			   args->lstio_tes_dgrp_nmlen) ||
 	    copy_from_user(param, args->lstio_tes_param,
-			      args->lstio_tes_param_len))
+			   args->lstio_tes_param_len))
 		goto out;
 
-	rc = lstcon_test_add(batch_name,
-			    args->lstio_tes_type,
-			    args->lstio_tes_loop,
-			    args->lstio_tes_concur,
-			    args->lstio_tes_dist, args->lstio_tes_span,
-			    src_name, dst_name, param,
-			    args->lstio_tes_param_len,
-			    &ret, args->lstio_tes_resultp);
+	rc = lstcon_test_add(batch_name, args->lstio_tes_type,
+			     args->lstio_tes_loop, args->lstio_tes_concur,
+			     args->lstio_tes_dist, args->lstio_tes_span,
+			     src_name, dst_name, param,
+			     args->lstio_tes_param_len,
+			     &ret, args->lstio_tes_resultp);
 
-	if (ret != 0)
+	if (ret)
 		rc = (copy_to_user(args->lstio_tes_retp, &ret,
-				       sizeof(ret))) ? -EFAULT : 0;
+				   sizeof(ret))) ? -EFAULT : 0;
 out:
-	if (batch_name != NULL)
+	if (batch_name)
 		LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1);
 
-	if (src_name != NULL)
+	if (src_name)
 		LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1);
 
-	if (dst_name != NULL)
+	if (dst_name)
 		LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1);
 
-	if (param != NULL)
+	if (param)
 		LIBCFS_FREE(param, args->lstio_tes_param_len);
 
 	return rc;
 }
 
 int
-lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
+lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
 {
 	char   *buf;
-	int     opc = data->ioc_u32[0];
+	struct libcfs_ioctl_data *data;
+	int     opc;
 	int     rc;
 
 	if (cmd != IOC_LIBCFS_LNETST)
 		return -EINVAL;
 
+	data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
+
+	opc = data->ioc_u32[0];
+
 	if (data->ioc_plen1 > PAGE_CACHE_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(buf, data->ioc_plen1);
-	if (buf == NULL)
+	if (!buf)
 		return -ENOMEM;
 
 	/* copy in parameter */
@@ -916,7 +909,7 @@
 	}
 
 	if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
-			     sizeof(lstcon_trans_stat_t)))
+			 sizeof(lstcon_trans_stat_t)))
 		rc = -EFAULT;
 out:
 	mutex_unlock(&console_session.ses_mutex);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 1066c70..b02a140 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -54,14 +54,16 @@
 {
 	lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
 
-	LASSERT(crpc != NULL && rpc == crpc->crp_rpc);
+	LASSERT(crpc && rpc == crpc->crp_rpc);
 	LASSERT(crpc->crp_posted && !crpc->crp_finished);
 
 	spin_lock(&rpc->crpc_lock);
 
-	if (crpc->crp_trans == NULL) {
-		/* Orphan RPC is not in any transaction,
-		 * I'm just a poor body and nobody loves me */
+	if (!crpc->crp_trans) {
+		/*
+		 * Orphan RPC is not in any transaction,
+		 * I'm just a poor body and nobody loves me
+		 */
 		spin_unlock(&rpc->crpc_lock);
 
 		/* release it */
@@ -72,9 +74,9 @@
 	/* not an orphan RPC */
 	crpc->crp_finished = 1;
 
-	if (crpc->crp_stamp == 0) {
+	if (!crpc->crp_stamp) {
 		/* not aborted */
-		LASSERT(crpc->crp_status == 0);
+		LASSERT(!crpc->crp_status);
 
 		crpc->crp_stamp  = cfs_time_current();
 		crpc->crp_status = rpc->crpc_status;
@@ -94,7 +96,7 @@
 	crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
 				       feats, bulk_npg, bulk_len,
 				       lstcon_rpc_done, (void *)crpc);
-	if (crpc->crp_rpc == NULL)
+	if (!crpc->crp_rpc)
 		return -ENOMEM;
 
 	crpc->crp_trans    = NULL;
@@ -123,20 +125,20 @@
 
 	if (!list_empty(&console_session.ses_rpc_freelist)) {
 		crpc = list_entry(console_session.ses_rpc_freelist.next,
-				      lstcon_rpc_t, crp_link);
+				  lstcon_rpc_t, crp_link);
 		list_del_init(&crpc->crp_link);
 	}
 
 	spin_unlock(&console_session.ses_rpc_lock);
 
-	if (crpc == NULL) {
+	if (!crpc) {
 		LIBCFS_ALLOC(crpc, sizeof(*crpc));
-		if (crpc == NULL)
+		if (!crpc)
 			return -ENOMEM;
 	}
 
 	rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
-	if (rc == 0) {
+	if (!rc) {
 		*crpcpp = crpc;
 		return 0;
 	}
@@ -155,7 +157,7 @@
 	LASSERT(list_empty(&crpc->crp_link));
 
 	for (i = 0; i < bulk->bk_niov; i++) {
-		if (bulk->bk_iovs[i].kiov_page == NULL)
+		if (!bulk->bk_iovs[i].kiov_page)
 			continue;
 
 		__free_page(bulk->bk_iovs[i].kiov_page);
@@ -172,7 +174,7 @@
 		spin_lock(&console_session.ses_rpc_lock);
 
 		list_add(&crpc->crp_link,
-			     &console_session.ses_rpc_freelist);
+			 &console_session.ses_rpc_freelist);
 
 		spin_unlock(&console_session.ses_rpc_lock);
 	}
@@ -186,7 +188,7 @@
 {
 	lstcon_rpc_trans_t *trans = crpc->crp_trans;
 
-	LASSERT(trans != NULL);
+	LASSERT(trans);
 
 	atomic_inc(&trans->tas_remaining);
 	crpc->crp_posted = 1;
@@ -239,10 +241,12 @@
 {
 	lstcon_rpc_trans_t *trans;
 
-	if (translist != NULL) {
+	if (translist) {
 		list_for_each_entry(trans, translist, tas_link) {
-			/* Can't enqueue two private transaction on
-			 * the same object */
+			/*
+			 * Can't enqueue two private transaction on
+			 * the same object
+			 */
 			if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
 				return -EPERM;
 		}
@@ -250,12 +254,12 @@
 
 	/* create a trans group */
 	LIBCFS_ALLOC(trans, sizeof(*trans));
-	if (trans == NULL)
+	if (!trans)
 		return -ENOMEM;
 
 	trans->tas_opc = transop;
 
-	if (translist == NULL)
+	if (!translist)
 		INIT_LIST_HEAD(&trans->tas_olink);
 	else
 		list_add_tail(&trans->tas_olink, translist);
@@ -294,8 +298,8 @@
 		spin_lock(&rpc->crpc_lock);
 
 		if (!crpc->crp_posted || /* not posted */
-		    crpc->crp_stamp != 0) { /* rpc done or aborted already */
-			if (crpc->crp_stamp == 0) {
+		    crpc->crp_stamp) { /* rpc done or aborted already */
+			if (!crpc->crp_stamp) {
 				crpc->crp_stamp = cfs_time_current();
 				crpc->crp_status = -EINTR;
 			}
@@ -329,7 +333,7 @@
 	    !list_empty(&trans->tas_olink)) /* Not an end session RPC */
 		return 1;
 
-	return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0;
+	return !atomic_read(&trans->tas_remaining) ? 1 : 0;
 }
 
 int
@@ -366,7 +370,7 @@
 	if (console_session.ses_shutdown)
 		rc = -ESHUTDOWN;
 
-	if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) {
+	if (rc || atomic_read(&trans->tas_remaining)) {
 		/* treat short timeout as canceled */
 		if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2)
 			rc = -EINTR;
@@ -389,10 +393,10 @@
 	srpc_client_rpc_t *rpc = crpc->crp_rpc;
 	srpc_generic_reply_t *rep;
 
-	LASSERT(nd != NULL && rpc != NULL);
-	LASSERT(crpc->crp_stamp != 0);
+	LASSERT(nd && rpc);
+	LASSERT(crpc->crp_stamp);
 
-	if (crpc->crp_status != 0) {
+	if (crpc->crp_status) {
 		*msgpp = NULL;
 		return crpc->crp_status;
 	}
@@ -426,19 +430,19 @@
 	srpc_msg_t *rep;
 	int error;
 
-	LASSERT(stat != NULL);
+	LASSERT(stat);
 
 	memset(stat, 0, sizeof(*stat));
 
 	list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
 		lstcon_rpc_stat_total(stat, 1);
 
-		LASSERT(crpc->crp_stamp != 0);
+		LASSERT(crpc->crp_stamp);
 
 		error = lstcon_rpc_get_reply(crpc, &rep);
-		if (error != 0) {
+		if (error) {
 			lstcon_rpc_stat_failure(stat, 1);
-			if (stat->trs_rpc_errno == 0)
+			if (!stat->trs_rpc_errno)
 				stat->trs_rpc_errno = -error;
 
 			continue;
@@ -449,7 +453,7 @@
 		lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat);
 	}
 
-	if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) {
+	if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) {
 		stat->trs_fwk_errno =
 		      lstcon_session_feats_check(trans->tas_features);
 	}
@@ -466,11 +470,11 @@
 
 int
 lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
-			     struct list_head *head_up,
+			     struct list_head __user *head_up,
 			     lstcon_rpc_readent_func_t readent)
 {
 	struct list_head tmp;
-	struct list_head *next;
+	struct list_head __user *next;
 	lstcon_rpc_ent_t *ent;
 	srpc_generic_reply_t *rep;
 	lstcon_rpc_t *crpc;
@@ -480,13 +484,13 @@
 	struct timeval tv;
 	int error;
 
-	LASSERT(head_up != NULL);
+	LASSERT(head_up);
 
 	next = head_up;
 
 	list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
 		if (copy_from_user(&tmp, next,
-				       sizeof(struct list_head)))
+				   sizeof(struct list_head)))
 			return -EFAULT;
 
 		if (tmp.next == head_up)
@@ -496,7 +500,7 @@
 
 		ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
 
-		LASSERT(crpc->crp_stamp != 0);
+		LASSERT(crpc->crp_stamp);
 
 		error = lstcon_rpc_get_reply(crpc, &msg);
 
@@ -506,33 +510,32 @@
 		      (unsigned long)console_session.ses_id.ses_stamp);
 		jiffies_to_timeval(dur, &tv);
 
-		if (copy_to_user(&ent->rpe_peer,
-				     &nd->nd_id, sizeof(lnet_process_id_t)) ||
+		if (copy_to_user(&ent->rpe_peer, &nd->nd_id,
+				 sizeof(lnet_process_id_t)) ||
 		    copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
-		    copy_to_user(&ent->rpe_state,
-				     &nd->nd_state, sizeof(nd->nd_state)) ||
+		    copy_to_user(&ent->rpe_state, &nd->nd_state,
+				 sizeof(nd->nd_state)) ||
 		    copy_to_user(&ent->rpe_rpc_errno, &error,
-				     sizeof(error)))
+				 sizeof(error)))
 			return -EFAULT;
 
-		if (error != 0)
+		if (error)
 			continue;
 
 		/* RPC is done */
 		rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
 
-		if (copy_to_user(&ent->rpe_sid,
-				     &rep->sid, sizeof(lst_sid_t)) ||
-		    copy_to_user(&ent->rpe_fwk_errno,
-				     &rep->status, sizeof(rep->status)))
+		if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) ||
+		    copy_to_user(&ent->rpe_fwk_errno, &rep->status,
+				 sizeof(rep->status)))
 			return -EFAULT;
 
-		if (readent == NULL)
+		if (!readent)
 			continue;
 
 		error = readent(trans->tas_opc, msg, ent);
 
-		if (error != 0)
+		if (error)
 			return error;
 	}
 
@@ -563,12 +566,13 @@
 			continue;
 		}
 
-		/* rpcs can be still not callbacked (even LNetMDUnlink is called)
+		/*
+		 * rpcs can be still not callbacked (even LNetMDUnlink is called)
 		 * because huge timeout for inaccessible network, don't make
 		 * user wait for them, just abandon them, they will be recycled
-		 * in callback */
-
-		LASSERT(crpc->crp_status != 0);
+		 * in callback
+		 */
+		LASSERT(crpc->crp_status);
 
 		crpc->crp_node  = NULL;
 		crpc->crp_trans = NULL;
@@ -580,7 +584,7 @@
 		atomic_dec(&trans->tas_remaining);
 	}
 
-	LASSERT(atomic_read(&trans->tas_remaining) == 0);
+	LASSERT(!atomic_read(&trans->tas_remaining));
 
 	list_del(&trans->tas_link);
 	if (!list_empty(&trans->tas_olink))
@@ -606,7 +610,7 @@
 	case LST_TRANS_SESNEW:
 		rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION,
 				     feats, 0, 0, crpc);
-		if (rc != 0)
+		if (rc)
 			return rc;
 
 		msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
@@ -619,7 +623,7 @@
 	case LST_TRANS_SESEND:
 		rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION,
 				     feats, 0, 0, crpc);
-		if (rc != 0)
+		if (rc)
 			return rc;
 
 		rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst;
@@ -640,7 +644,7 @@
 	int rc;
 
 	rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
@@ -660,7 +664,7 @@
 	int		    rc;
 
 	rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst;
@@ -676,7 +680,7 @@
 	    transop != LST_TRANS_TSBSTOP)
 		return 0;
 
-	LASSERT(tsb->tsb_index == 0);
+	LASSERT(!tsb->tsb_index);
 
 	batch = (lstcon_batch_t *)tsb;
 	brq->bar_arg = batch->bat_arg;
@@ -691,7 +695,7 @@
 	int		   rc;
 
 	rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst;
@@ -823,13 +827,13 @@
 
 	if (transop == LST_TRANS_TSBCLIADD) {
 		npg = sfw_id_pages(test->tes_span);
-		nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
+		nob = !(feats & LST_FEAT_BULK_LEN) ?
 		      npg * PAGE_CACHE_SIZE :
 		      sizeof(lnet_process_id_packed_t) * test->tes_span;
 	}
 
 	rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	trq  = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
@@ -852,7 +856,7 @@
 
 			LASSERT(nob > 0);
 
-			len = (feats & LST_FEAT_BULK_LEN) == 0 ?
+			len = !(feats & LST_FEAT_BULK_LEN) ?
 			      PAGE_CACHE_SIZE :
 			      min_t(int, nob, PAGE_CACHE_SIZE);
 			nob -= len;
@@ -862,7 +866,7 @@
 			bulk->bk_iovs[i].kiov_page   =
 				alloc_page(GFP_KERNEL);
 
-			if (bulk->bk_iovs[i].kiov_page == NULL) {
+			if (!bulk->bk_iovs[i].kiov_page) {
 				lstcon_rpc_put(*crpc);
 				return -ENOMEM;
 			}
@@ -877,7 +881,7 @@
 					  test->tes_dist,
 					  test->tes_span,
 					  npg, &bulk->bk_iovs[0]);
-		if (rc != 0) {
+		if (rc) {
 			lstcon_rpc_put(*crpc);
 			return rc;
 		}
@@ -901,7 +905,7 @@
 
 	case LST_TEST_BULK:
 		trq->tsr_service = SRPC_SERVICE_BRW;
-		if ((feats & LST_FEAT_BULK_LEN) == 0) {
+		if (!(feats & LST_FEAT_BULK_LEN)) {
 			rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *)
 						    &test->tes_param[0], trq);
 		} else {
@@ -925,8 +929,8 @@
 	srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
 	int		   status   = mksn_rep->mksn_status;
 
-	if (status == 0 &&
-	    (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+	if (!status &&
+	    (reply->msg_ses_feats & ~LST_FEATS_MASK)) {
 		mksn_rep->mksn_status = EPROTO;
 		status = EPROTO;
 	}
@@ -937,7 +941,7 @@
 			reply->msg_ses_feats);
 	}
 
-	if (status != 0)
+	if (status)
 		return status;
 
 	if (!trans->tas_feats_updated) {
@@ -947,12 +951,13 @@
 
 	if (reply->msg_ses_feats != trans->tas_features) {
 		CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
-			 reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
-			 trans->tas_features);
-		status = mksn_rep->mksn_status = EPROTO;
+			reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
+			trans->tas_features);
+		mksn_rep->mksn_status = EPROTO;
+		status = EPROTO;
 	}
 
-	if (status == 0) {
+	if (!status) {
 		/* session timeout on remote node */
 		nd->nd_timeout = mksn_rep->mksn_timeout;
 	}
@@ -974,7 +979,7 @@
 	switch (trans->tas_opc) {
 	case LST_TRANS_SESNEW:
 		rc = lstcon_sesnew_stat_reply(trans, nd, msg);
-		if (rc == 0) {
+		if (!rc) {
 			lstcon_sesop_stat_success(stat, 1);
 			return;
 		}
@@ -985,7 +990,7 @@
 	case LST_TRANS_SESEND:
 		rmsn_rep = &msg->msg_body.rmsn_reply;
 		/* ESRCH is not an error for end session */
-		if (rmsn_rep->rmsn_status == 0 ||
+		if (!rmsn_rep->rmsn_status ||
 		    rmsn_rep->rmsn_status == ESRCH) {
 			lstcon_sesop_stat_success(stat, 1);
 			return;
@@ -1014,7 +1019,7 @@
 	case LST_TRANS_TSBSTOP:
 		bat_rep = &msg->msg_body.bat_reply;
 
-		if (bat_rep->bar_status == 0) {
+		if (!bat_rep->bar_status) {
 			lstcon_tsbop_stat_success(stat, 1);
 			return;
 		}
@@ -1033,12 +1038,12 @@
 	case LST_TRANS_TSBSRVQRY:
 		bat_rep = &msg->msg_body.bat_reply;
 
-		if (bat_rep->bar_active != 0)
+		if (bat_rep->bar_active)
 			lstcon_tsbqry_stat_run(stat, 1);
 		else
 			lstcon_tsbqry_stat_idle(stat, 1);
 
-		if (bat_rep->bar_status == 0)
+		if (!bat_rep->bar_status)
 			return;
 
 		lstcon_tsbqry_stat_failure(stat, 1);
@@ -1049,7 +1054,7 @@
 	case LST_TRANS_TSBSRVADD:
 		test_rep = &msg->msg_body.tes_reply;
 
-		if (test_rep->tsr_status == 0) {
+		if (!test_rep->tsr_status) {
 			lstcon_tsbop_stat_success(stat, 1);
 			return;
 		}
@@ -1061,7 +1066,7 @@
 	case LST_TRANS_STATQRY:
 		stat_rep = &msg->msg_body.stat_reply;
 
-		if (stat_rep->str_status == 0) {
+		if (!stat_rep->str_status) {
 			lstcon_statqry_stat_success(stat, 1);
 			return;
 		}
@@ -1074,7 +1079,7 @@
 		LBUG();
 	}
 
-	if (stat->trs_fwk_errno == 0)
+	if (!stat->trs_fwk_errno)
 		stat->trs_fwk_errno = rc;
 
 	return;
@@ -1096,22 +1101,22 @@
 	/* Creating session RPG for list of nodes */
 
 	rc = lstcon_rpc_trans_prep(translist, transop, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction %d: %d\n", transop, rc);
 		return rc;
 	}
 
 	feats = trans->tas_features;
 	list_for_each_entry(ndl, ndlist, ndl_link) {
-		rc = condition == NULL ? 1 :
+		rc = !condition ? 1 :
 		     condition(transop, ndl->ndl_node, arg);
 
-		if (rc == 0)
+		if (!rc)
 			continue;
 
 		if (rc < 0) {
 			CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
-					transop, rc);
+			       transop, rc);
 			break;
 		}
 
@@ -1146,7 +1151,7 @@
 			break;
 		}
 
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Failed to create RPC for transaction %s: %d\n",
 			       lstcon_rpc_trans_name(transop), rc);
 			break;
@@ -1155,7 +1160,7 @@
 		lstcon_rpc_trans_addreq(trans, rpc);
 	}
 
-	if (rc == 0) {
+	if (!rc) {
 		*transpp = trans;
 		return 0;
 	}
@@ -1196,7 +1201,7 @@
 
 	trans = console_session.ses_ping;
 
-	LASSERT(trans != NULL);
+	LASSERT(trans);
 
 	list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
 		nd = ndl->ndl_node;
@@ -1208,7 +1213,7 @@
 
 			rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND,
 						trans->tas_features, &crpc);
-			if (rc != 0) {
+			if (rc) {
 				CERROR("Out of memory\n");
 				break;
 			}
@@ -1221,7 +1226,7 @@
 
 		crpc = &nd->nd_ping;
 
-		if (crpc->crp_rpc != NULL) {
+		if (crpc->crp_rpc) {
 			LASSERT(crpc->crp_trans == trans);
 			LASSERT(!list_empty(&crpc->crp_link));
 
@@ -1253,7 +1258,7 @@
 
 		rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG,
 				     trans->tas_features, 0, 0, 1, crpc);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Out of memory\n");
 			break;
 		}
@@ -1289,11 +1294,11 @@
 	int rc;
 
 	LASSERT(list_empty(&console_session.ses_rpc_freelist));
-	LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
+	LASSERT(!atomic_read(&console_session.ses_rpc_counter));
 
 	rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
 				   &console_session.ses_ping);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Failed to create console pinger\n");
 		return rc;
 	}
@@ -1337,7 +1342,7 @@
 	while (!list_empty(&console_session.ses_trans_list)) {
 		list_for_each(pacer, &console_session.ses_trans_list) {
 			trans = list_entry(pacer, lstcon_rpc_trans_t,
-					       tas_link);
+					   tas_link);
 
 			CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
 			       lstcon_rpc_trans_name(trans->tas_opc));
@@ -1356,7 +1361,7 @@
 
 	spin_lock(&console_session.ses_rpc_lock);
 
-	lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
+	lst_wait_until(!atomic_read(&console_session.ses_rpc_counter),
 		       console_session.ses_rpc_lock,
 		       "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n",
 		       atomic_read(&console_session.ses_rpc_counter));
@@ -1394,5 +1399,5 @@
 lstcon_rpc_module_fini(void)
 {
 	LASSERT(list_empty(&console_session.ses_rpc_freelist));
-	LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
+	LASSERT(!atomic_read(&console_session.ses_rpc_counter));
 }
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 95c832f..d2133bc 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -106,7 +106,8 @@
 #define LST_TRANS_STATQRY       0x21
 
 typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
+typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *,
+					 lstcon_rpc_ent_t __user *);
 
 int  lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
 			unsigned version, lstcon_rpc_t **crpc);
@@ -128,7 +129,7 @@
 void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
 			   lstcon_trans_stat_t *stat);
 int  lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
-				  struct list_head *head_up,
+				  struct list_head __user *head_up,
 				  lstcon_rpc_readent_func_t readent);
 void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
 void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 5619fc4..e8ca1bf 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -58,7 +58,7 @@
 	(p)->nle_nnode++;				\
 } while (0)
 
-lstcon_session_t console_session;
+struct lstcon_session console_session;
 
 static void
 lstcon_node_get(lstcon_node_t *nd)
@@ -90,7 +90,7 @@
 		return -ENOENT;
 
 	LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
-	if (*ndpp == NULL)
+	if (!*ndpp)
 		return -ENOMEM;
 
 	ndl = (lstcon_ndlink_t *)(*ndpp + 1);
@@ -104,9 +104,11 @@
 	ndl->ndl_node->nd_timeout = 0;
 	memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
 
-	/* queued in global hash & list, no refcount is taken by
+	/*
+	 * queued in global hash & list, no refcount is taken by
 	 * global hash & list, if caller release his refcount,
-	 * node will be released */
+	 * node will be released
+	 */
 	list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
 	list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
 
@@ -157,16 +159,16 @@
 		return 0;
 	}
 
-	if (create == 0)
+	if (!create)
 		return -ENOENT;
 
 	/* find or create in session hash */
 	rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
-	if (ndl == NULL) {
+	if (!ndl) {
 		lstcon_node_put(nd);
 		return -ENOMEM;
 	}
@@ -200,11 +202,11 @@
 
 	LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
 				   grp_ndl_hash[LST_NODE_HASHSIZE]));
-	if (grp == NULL)
+	if (!grp)
 		return -ENOMEM;
 
 	grp->grp_ref = 1;
-	if (name != NULL)
+	if (name)
 		strcpy(grp->grp_name, name);
 
 	INIT_LIST_HEAD(&grp->grp_link);
@@ -234,7 +236,7 @@
 	lstcon_ndlink_t *tmp;
 
 	list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
-		if ((ndl->ndl_node->nd_state & keep) == 0)
+		if (!(ndl->ndl_node->nd_state & keep))
 			lstcon_group_ndlink_release(grp, ndl);
 	}
 }
@@ -252,9 +254,8 @@
 
 	lstcon_group_drain(grp, 0);
 
-	for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+	for (i = 0; i < LST_NODE_HASHSIZE; i++)
 		LASSERT(list_empty(&grp->grp_ndl_hash[i]));
-	}
 
 	LIBCFS_FREE(grp, offsetof(lstcon_group_t,
 				  grp_ndl_hash[LST_NODE_HASHSIZE]));
@@ -266,7 +267,7 @@
 	lstcon_group_t *grp;
 
 	list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
-		if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
+		if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
 			continue;
 
 		lstcon_group_addref(grp);  /* +1 ref for caller */
@@ -284,7 +285,7 @@
 	int rc;
 
 	rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	if (!list_empty(&(*ndlpp)->ndl_link))
@@ -327,7 +328,7 @@
 
 	while (!list_empty(&old->grp_ndl_list)) {
 		ndl = list_entry(old->grp_ndl_list.next,
-				     lstcon_ndlink_t, ndl_link);
+				 lstcon_ndlink_t, ndl_link);
 		lstcon_group_ndlink_move(old, new, ndl);
 	}
 }
@@ -347,7 +348,7 @@
 		if (nd->nd_state != LST_NODE_ACTIVE)
 			return 0;
 
-		if (grp != NULL && nd->nd_ref > 1)
+		if (grp && nd->nd_ref > 1)
 			return 0;
 		break;
 
@@ -363,7 +364,7 @@
 
 static int
 lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
-		      lstcon_rpc_ent_t *ent_up)
+		      lstcon_rpc_ent_t __user *ent_up)
 {
 	srpc_debug_reply_t *rep;
 
@@ -376,9 +377,9 @@
 		rep = &msg->msg_body.dbg_reply;
 
 		if (copy_to_user(&ent_up->rpe_priv[0],
-				     &rep->dbg_timeout, sizeof(int)) ||
+				 &rep->dbg_timeout, sizeof(int)) ||
 		    copy_to_user(&ent_up->rpe_payload[0],
-				     &rep->dbg_name, LST_NAME_SIZE))
+				 &rep->dbg_name, LST_NAME_SIZE))
 			return -EFAULT;
 
 		return 0;
@@ -392,8 +393,8 @@
 
 static int
 lstcon_group_nodes_add(lstcon_group_t *grp,
-		       int count, lnet_process_id_t *ids_up,
-		       unsigned *featp, struct list_head *result_up)
+		       int count, lnet_process_id_t __user *ids_up,
+		       unsigned *featp, struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	lstcon_ndlink_t *ndl;
@@ -403,7 +404,7 @@
 	int rc;
 
 	rc = lstcon_group_alloc(NULL, &tmp);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Out of memory\n");
 		return -ENOMEM;
 	}
@@ -416,18 +417,18 @@
 
 		/* skip if it's in this group already */
 		rc = lstcon_group_ndlink_find(grp, id, &ndl, 0);
-		if (rc == 0)
+		if (!rc)
 			continue;
 
 		/* add to tmp group */
 		rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't create ndlink, out of memory\n");
 			break;
 		}
 	}
 
-	if (rc != 0) {
+	if (rc) {
 		lstcon_group_decref(tmp);
 		return rc;
 	}
@@ -435,7 +436,7 @@
 	rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
 				     &tmp->grp_trans_list, LST_TRANS_SESNEW,
 				     tmp, lstcon_sesrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		lstcon_group_decref(tmp);
 		return rc;
@@ -459,8 +460,8 @@
 
 static int
 lstcon_group_nodes_remove(lstcon_group_t *grp,
-			  int count, lnet_process_id_t *ids_up,
-			  struct list_head *result_up)
+			  int count, lnet_process_id_t __user *ids_up,
+			  struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	lstcon_ndlink_t *ndl;
@@ -472,7 +473,7 @@
 	/* End session and remove node from the group */
 
 	rc = lstcon_group_alloc(NULL, &tmp);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Out of memory\n");
 		return -ENOMEM;
 	}
@@ -484,14 +485,14 @@
 		}
 
 		/* move node to tmp group */
-		if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0)
+		if (!lstcon_group_ndlink_find(grp, id, &ndl, 0))
 			lstcon_group_ndlink_move(grp, tmp, ndl);
 	}
 
 	rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
 				     &tmp->grp_trans_list, LST_TRANS_SESEND,
 				     tmp, lstcon_sesrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		goto error;
 	}
@@ -518,15 +519,15 @@
 	lstcon_group_t *grp;
 	int rc;
 
-	rc = (lstcon_group_find(name, &grp) == 0) ? -EEXIST : 0;
-	if (rc != 0) {
+	rc = lstcon_group_find(name, &grp) ? 0: -EEXIST;
+	if (rc) {
 		/* find a group with same name */
 		lstcon_group_decref(grp);
 		return rc;
 	}
 
 	rc = lstcon_group_alloc(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't allocate descriptor for group %s\n", name);
 		return -ENOMEM;
 	}
@@ -537,17 +538,17 @@
 }
 
 int
-lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up,
-		 unsigned *featp, struct list_head *result_up)
+lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
+		 unsigned *featp, struct list_head __user *result_up)
 {
 	lstcon_group_t *grp;
 	int rc;
 
 	LASSERT(count > 0);
-	LASSERT(ids_up != NULL);
+	LASSERT(ids_up);
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group %s\n", name);
 		return rc;
 	}
@@ -575,7 +576,7 @@
 	int rc;
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group: %s\n", name);
 		return rc;
 	}
@@ -590,7 +591,7 @@
 	rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
 				     &grp->grp_trans_list, LST_TRANS_SESEND,
 				     grp, lstcon_sesrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		lstcon_group_decref(grp);
 		return rc;
@@ -601,8 +602,10 @@
 	lstcon_rpc_trans_destroy(trans);
 
 	lstcon_group_decref(grp);
-	/* -ref for session, it's destroyed,
-	 * status can't be rolled back, destroy group anyway */
+	/*
+	 * -ref for session, it's destroyed,
+	 * status can't be rolled back, destroy group anyway
+	 */
 	lstcon_group_decref(grp);
 
 	return rc;
@@ -615,7 +618,7 @@
 	int rc;
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group %s\n", name);
 		return rc;
 	}
@@ -641,14 +644,14 @@
 }
 
 int
-lstcon_nodes_remove(char *name, int count,
-		    lnet_process_id_t *ids_up, struct list_head *result_up)
+lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
+		    struct list_head __user *result_up)
 {
 	lstcon_group_t *grp = NULL;
 	int rc;
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group: %s\n", name);
 		return rc;
 	}
@@ -671,14 +674,14 @@
 }
 
 int
-lstcon_group_refresh(char *name, struct list_head *result_up)
+lstcon_group_refresh(char *name, struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	lstcon_group_t *grp;
 	int rc;
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group: %s\n", name);
 		return rc;
 	}
@@ -694,7 +697,7 @@
 	rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
 				     &grp->grp_trans_list, LST_TRANS_SESNEW,
 				     grp, lstcon_sesrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		/* local error, return */
 		CDEBUG(D_NET, "Can't create transaction: %d\n", rc);
 		lstcon_group_decref(grp);
@@ -713,15 +716,15 @@
 }
 
 int
-lstcon_group_list(int index, int len, char *name_up)
+lstcon_group_list(int index, int len, char __user *name_up)
 {
 	lstcon_group_t *grp;
 
 	LASSERT(index >= 0);
-	LASSERT(name_up != NULL);
+	LASSERT(name_up);
 
 	list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
-		if (index-- == 0) {
+		if (!index--) {
 			return copy_to_user(name_up, grp->grp_name, len) ?
 			       -EFAULT : 0;
 		}
@@ -732,15 +735,15 @@
 
 static int
 lstcon_nodes_getent(struct list_head *head, int *index_p,
-		    int *count_p, lstcon_node_ent_t *dents_up)
+		    int *count_p, lstcon_node_ent_t __user *dents_up)
 {
 	lstcon_ndlink_t *ndl;
 	lstcon_node_t *nd;
 	int count = 0;
 	int index = 0;
 
-	LASSERT(index_p != NULL && count_p != NULL);
-	LASSERT(dents_up != NULL);
+	LASSERT(index_p && count_p);
+	LASSERT(dents_up);
 	LASSERT(*index_p >= 0);
 	LASSERT(*count_p > 0);
 
@@ -753,9 +756,9 @@
 
 		nd = ndl->ndl_node;
 		if (copy_to_user(&dents_up[count].nde_id,
-				     &nd->nd_id, sizeof(nd->nd_id)) ||
+				 &nd->nd_id, sizeof(nd->nd_id)) ||
 		    copy_to_user(&dents_up[count].nde_state,
-				     &nd->nd_state, sizeof(nd->nd_state)))
+				 &nd->nd_state, sizeof(nd->nd_state)))
 			return -EFAULT;
 
 		count++;
@@ -771,8 +774,9 @@
 }
 
 int
-lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
-		  int *index_p, int *count_p, lstcon_node_ent_t *dents_up)
+lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
+		  int *index_p, int *count_p,
+		  lstcon_node_ent_t __user *dents_up)
 {
 	lstcon_ndlist_ent_t *gentp;
 	lstcon_group_t *grp;
@@ -780,7 +784,7 @@
 	int rc;
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group %s\n", name);
 		return rc;
 	}
@@ -796,7 +800,7 @@
 
 	/* non-verbose query */
 	LIBCFS_ALLOC(gentp, sizeof(lstcon_ndlist_ent_t));
-	if (gentp == NULL) {
+	if (!gentp) {
 		CERROR("Can't allocate ndlist_ent\n");
 		lstcon_group_decref(grp);
 
@@ -807,7 +811,7 @@
 		LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
 
 	rc = copy_to_user(gents_p, gentp,
-			      sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
+			  sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
 
 	LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
 
@@ -822,7 +826,7 @@
 	lstcon_batch_t *bat;
 
 	list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
-		if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
+		if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
 			*batpp = bat;
 			return 0;
 		}
@@ -838,21 +842,21 @@
 	int i;
 	int rc;
 
-	rc = (lstcon_batch_find(name, &bat) == 0) ? -EEXIST : 0;
-	if (rc != 0) {
+	rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0;
+	if (rc) {
 		CDEBUG(D_NET, "Batch %s already exists\n", name);
 		return rc;
 	}
 
 	LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
-	if (bat == NULL) {
+	if (!bat) {
 		CERROR("Can't allocate descriptor for batch %s\n", name);
 		return -ENOMEM;
 	}
 
 	LIBCFS_ALLOC(bat->bat_cli_hash,
 		     sizeof(struct list_head) * LST_NODE_HASHSIZE);
-	if (bat->bat_cli_hash == NULL) {
+	if (!bat->bat_cli_hash) {
 		CERROR("Can't allocate hash for batch %s\n", name);
 		LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
 
@@ -861,7 +865,7 @@
 
 	LIBCFS_ALLOC(bat->bat_srv_hash,
 		     sizeof(struct list_head) * LST_NODE_HASHSIZE);
-	if (bat->bat_srv_hash == NULL) {
+	if (!bat->bat_srv_hash) {
 		CERROR("Can't allocate hash for batch %s\n", name);
 		LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
 		LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
@@ -892,15 +896,15 @@
 }
 
 int
-lstcon_batch_list(int index, int len, char *name_up)
+lstcon_batch_list(int index, int len, char __user *name_up)
 {
 	lstcon_batch_t *bat;
 
-	LASSERT(name_up != NULL);
+	LASSERT(name_up);
 	LASSERT(index >= 0);
 
 	list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
-		if (index-- == 0) {
+		if (!index--) {
 			return copy_to_user(name_up, bat->bat_name, len) ?
 			       -EFAULT : 0;
 		}
@@ -910,9 +914,9 @@
 }
 
 int
-lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
-		  int testidx, int *index_p, int *ndent_p,
-		  lstcon_node_ent_t *dents_up)
+lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
+		  int server, int testidx, int *index_p, int *ndent_p,
+		  lstcon_node_ent_t __user *dents_up)
 {
 	lstcon_test_batch_ent_t *entp;
 	struct list_head *clilst;
@@ -923,7 +927,7 @@
 	int rc;
 
 	rc = lstcon_batch_find(name, &bat);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find batch %s\n", name);
 		return -ENOENT;
 	}
@@ -941,12 +945,12 @@
 		}
 	}
 
-	clilst = (test == NULL) ? &bat->bat_cli_list :
-				  &test->tes_src_grp->grp_ndl_list;
-	srvlst = (test == NULL) ? &bat->bat_srv_list :
-				  &test->tes_dst_grp->grp_ndl_list;
+	clilst = !test ? &bat->bat_cli_list :
+			 &test->tes_src_grp->grp_ndl_list;
+	srvlst = !test ? &bat->bat_srv_list :
+			 &test->tes_dst_grp->grp_ndl_list;
 
-	if (dents_up != NULL) {
+	if (dents_up) {
 		rc = lstcon_nodes_getent((server ? srvlst : clilst),
 					 index_p, ndent_p, dents_up);
 		return rc;
@@ -954,15 +958,14 @@
 
 	/* non-verbose query */
 	LIBCFS_ALLOC(entp, sizeof(lstcon_test_batch_ent_t));
-	if (entp == NULL)
+	if (!entp)
 		return -ENOMEM;
 
-	if (test == NULL) {
+	if (!test) {
 		entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
 		entp->u.tbe_batch.bae_state = bat->bat_state;
 
 	} else {
-
 		entp->u.tbe_test.tse_type   = test->tes_type;
 		entp->u.tbe_test.tse_loop   = test->tes_loop;
 		entp->u.tbe_test.tse_concur = test->tes_concur;
@@ -975,7 +978,7 @@
 		LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
 
 	rc = copy_to_user(ent_up, entp,
-			      sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
+			  sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
 
 	LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
 
@@ -1006,7 +1009,7 @@
 
 static int
 lstcon_batch_op(lstcon_batch_t *bat, int transop,
-		struct list_head *result_up)
+		struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	int rc;
@@ -1014,7 +1017,7 @@
 	rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
 				     &bat->bat_trans_list, transop,
 				     bat, lstcon_batrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		return rc;
 	}
@@ -1029,12 +1032,12 @@
 }
 
 int
-lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
+lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
 {
 	lstcon_batch_t *bat;
 	int rc;
 
-	if (lstcon_batch_find(name, &bat) != 0) {
+	if (lstcon_batch_find(name, &bat)) {
 		CDEBUG(D_NET, "Can't find batch %s\n", name);
 		return -ENOENT;
 	}
@@ -1044,19 +1047,19 @@
 	rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up);
 
 	/* mark batch as running if it's started in any node */
-	if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0)
+	if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0))
 		bat->bat_state = LST_BATCH_RUNNING;
 
 	return rc;
 }
 
 int
-lstcon_batch_stop(char *name, int force, struct list_head *result_up)
+lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
 {
 	lstcon_batch_t *bat;
 	int rc;
 
-	if (lstcon_batch_find(name, &bat) != 0) {
+	if (lstcon_batch_find(name, &bat)) {
 		CDEBUG(D_NET, "Can't find batch %s\n", name);
 		return -ENOENT;
 	}
@@ -1066,7 +1069,7 @@
 	rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up);
 
 	/* mark batch as stopped if all RPCs finished */
-	if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0)
+	if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0))
 		bat->bat_state = LST_BATCH_IDLE;
 
 	return rc;
@@ -1083,7 +1086,7 @@
 
 	while (!list_empty(&bat->bat_test_list)) {
 		test = list_entry(bat->bat_test_list.next,
-				      lstcon_test_t, tes_link);
+				  lstcon_test_t, tes_link);
 		LASSERT(list_empty(&test->tes_trans_list));
 
 		list_del(&test->tes_link);
@@ -1099,7 +1102,7 @@
 
 	while (!list_empty(&bat->bat_cli_list)) {
 		ndl = list_entry(bat->bat_cli_list.next,
-				     lstcon_ndlink_t, ndl_link);
+				 lstcon_ndlink_t, ndl_link);
 		list_del_init(&ndl->ndl_link);
 
 		lstcon_ndlink_release(ndl);
@@ -1107,7 +1110,7 @@
 
 	while (!list_empty(&bat->bat_srv_list)) {
 		ndl = list_entry(bat->bat_srv_list.next,
-				     lstcon_ndlink_t, ndl_link);
+				 lstcon_ndlink_t, ndl_link);
 		list_del_init(&ndl->ndl_link);
 
 		lstcon_ndlink_release(ndl);
@@ -1135,10 +1138,10 @@
 	struct list_head *head;
 
 	test = (lstcon_test_t *)arg;
-	LASSERT(test != NULL);
+	LASSERT(test);
 
 	batch = test->tes_batch;
-	LASSERT(batch != NULL);
+	LASSERT(batch);
 
 	if (test->tes_oneside &&
 	    transop == LST_TRANS_TSBSRVADD)
@@ -1160,7 +1163,7 @@
 
 	LASSERT(nd->nd_id.nid != LNET_NID_ANY);
 
-	if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
+	if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1))
 		return -ENOMEM;
 
 	if (list_empty(&ndl->ndl_link))
@@ -1170,15 +1173,15 @@
 }
 
 static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
+lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	lstcon_group_t *grp;
 	int transop;
 	int rc;
 
-	LASSERT(test->tes_src_grp != NULL);
-	LASSERT(test->tes_dst_grp != NULL);
+	LASSERT(test->tes_src_grp);
+	LASSERT(test->tes_dst_grp);
 
 	transop = LST_TRANS_TSBSRVADD;
 	grp  = test->tes_dst_grp;
@@ -1186,15 +1189,15 @@
 	rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
 				     &test->tes_trans_list, transop,
 				     test, lstcon_testrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		return rc;
 	}
 
 	lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
 
-	if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
-	    lstcon_trans_stat()->trs_fwk_errno != 0) {
+	if (lstcon_trans_stat()->trs_rpc_errno ||
+	    lstcon_trans_stat()->trs_fwk_errno) {
 		lstcon_rpc_trans_interpreter(trans, result_up, NULL);
 
 		lstcon_rpc_trans_destroy(trans);
@@ -1226,7 +1229,7 @@
 	int rc;
 
 	rc = lstcon_batch_find(name, batch);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find batch %s\n", name);
 		return rc;
 	}
@@ -1246,7 +1249,7 @@
 	lstcon_ndlink_t *ndl;
 
 	rc = lstcon_group_find(name, grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "can't find group %s\n", name);
 		return rc;
 	}
@@ -1266,7 +1269,7 @@
 		int concur, int dist, int span,
 		char *src_name, char *dst_name,
 		void *param, int paramlen, int *retp,
-		struct list_head *result_up)
+		struct list_head __user *result_up)
 {
 	lstcon_test_t	 *test	 = NULL;
 	int		 rc;
@@ -1280,15 +1283,15 @@
 	 * active node
 	 */
 	rc = lstcon_verify_batch(batch_name, &batch);
-	if (rc != 0)
+	if (rc)
 		goto out;
 
 	rc = lstcon_verify_group(src_name, &src_grp);
-	if (rc != 0)
+	if (rc)
 		goto out;
 
 	rc = lstcon_verify_group(dst_name, &dst_grp);
-	if (rc != 0)
+	if (rc)
 		goto out;
 
 	if (dst_grp->grp_userland)
@@ -1316,18 +1319,18 @@
 	test->tes_dst_grp	= dst_grp;
 	INIT_LIST_HEAD(&test->tes_trans_list);
 
-	if (param != NULL) {
+	if (param) {
 		test->tes_paramlen = paramlen;
 		memcpy(&test->tes_param[0], param, paramlen);
 	}
 
 	rc = lstcon_test_nodes_add(test, result_up);
 
-	if (rc != 0)
+	if (rc)
 		goto out;
 
-	if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
-	    lstcon_trans_stat()->trs_fwk_errno != 0)
+	if (lstcon_trans_stat()->trs_rpc_errno ||
+	    lstcon_trans_stat()->trs_fwk_errno)
 		CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
 		       batch_name);
 
@@ -1340,13 +1343,13 @@
 	/*  hold groups so nobody can change them */
 	return rc;
 out:
-	if (test != NULL)
+	if (test)
 		LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
 
-	if (dst_grp != NULL)
+	if (dst_grp)
 		lstcon_group_decref(dst_grp);
 
-	if (src_grp != NULL)
+	if (src_grp)
 		lstcon_group_decref(src_grp);
 
 	return rc;
@@ -1369,16 +1372,16 @@
 
 static int
 lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
-		      lstcon_rpc_ent_t *ent_up)
+		      lstcon_rpc_ent_t __user *ent_up)
 {
 	srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
 
 	LASSERT(transop == LST_TRANS_TSBCLIQRY ||
-		 transop == LST_TRANS_TSBSRVQRY);
+		transop == LST_TRANS_TSBSRVQRY);
 
 	/* positive errno, framework error code */
-	if (copy_to_user(&ent_up->rpe_priv[0],
-			     &rep->bar_active, sizeof(rep->bar_active)))
+	if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active,
+			 sizeof(rep->bar_active)))
 		return -EFAULT;
 
 	return 0;
@@ -1386,7 +1389,7 @@
 
 int
 lstcon_test_batch_query(char *name, int testidx, int client,
-			int timeout, struct list_head *result_up)
+			int timeout, struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	struct list_head *translist;
@@ -1398,12 +1401,12 @@
 	int rc;
 
 	rc = lstcon_batch_find(name, &batch);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find batch: %s\n", name);
 		return rc;
 	}
 
-	if (testidx == 0) {
+	if (!testidx) {
 		translist = &batch->bat_trans_list;
 		ndlist    = &batch->bat_cli_list;
 		hdr       = &batch->bat_hdr;
@@ -1411,7 +1414,7 @@
 	} else {
 		/* query specified test only */
 		rc = lstcon_test_find(batch, testidx, &test);
-		if (rc != 0) {
+		if (rc) {
 			CDEBUG(D_NET, "Can't find test: %d\n", testidx);
 			return rc;
 		}
@@ -1425,16 +1428,16 @@
 
 	rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr,
 				     lstcon_batrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		return rc;
 	}
 
 	lstcon_rpc_trans_postwait(trans, timeout);
 
-	if (testidx == 0 && /* query a batch, not a test */
-	    lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 &&
-	    lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) {
+	if (!testidx && /* query a batch, not a test */
+	    !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
+	    !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
 		/* all RPCs finished, and no active test */
 		batch->bat_state = LST_BATCH_IDLE;
 	}
@@ -1448,19 +1451,19 @@
 
 static int
 lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
-		       lstcon_rpc_ent_t *ent_up)
+		       lstcon_rpc_ent_t __user *ent_up)
 {
 	srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
-	sfw_counters_t *sfwk_stat;
-	srpc_counters_t *srpc_stat;
-	lnet_counters_t *lnet_stat;
+	sfw_counters_t __user *sfwk_stat;
+	srpc_counters_t __user *srpc_stat;
+	lnet_counters_t __user *lnet_stat;
 
-	if (rep->str_status != 0)
+	if (rep->str_status)
 		return 0;
 
-	sfwk_stat = (sfw_counters_t *)&ent_up->rpe_payload[0];
-	srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
-	lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
+	sfwk_stat = (sfw_counters_t __user *)&ent_up->rpe_payload[0];
+	srpc_stat = (srpc_counters_t __user *)(sfwk_stat + 1);
+	lnet_stat = (lnet_counters_t __user *)(srpc_stat + 1);
 
 	if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
 	    copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
@@ -1472,7 +1475,7 @@
 
 static int
 lstcon_ndlist_stat(struct list_head *ndlist,
-		   int timeout, struct list_head *result_up)
+		   int timeout, struct list_head __user *result_up)
 {
 	struct list_head head;
 	lstcon_rpc_trans_t *trans;
@@ -1482,7 +1485,7 @@
 
 	rc = lstcon_rpc_trans_ndlist(ndlist, &head,
 				     LST_TRANS_STATQRY, NULL, NULL, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		return rc;
 	}
@@ -1497,13 +1500,14 @@
 }
 
 int
-lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
+lstcon_group_stat(char *grp_name, int timeout,
+		  struct list_head __user *result_up)
 {
 	lstcon_group_t *grp;
 	int rc;
 
 	rc = lstcon_group_find(grp_name, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Can't find group %s\n", grp_name);
 		return rc;
 	}
@@ -1516,8 +1520,8 @@
 }
 
 int
-lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
-		  int timeout, struct list_head *result_up)
+lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
+		  int timeout, struct list_head __user *result_up)
 {
 	lstcon_ndlink_t *ndl;
 	lstcon_group_t *tmp;
@@ -1526,7 +1530,7 @@
 	int rc;
 
 	rc = lstcon_group_alloc(NULL, &tmp);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Out of memory\n");
 		return -ENOMEM;
 	}
@@ -1539,7 +1543,7 @@
 
 		/* add to tmp group */
 		rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2);
-		if (rc != 0) {
+		if (rc) {
 			CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET,
 			       "Failed to find or create %s: %d\n",
 			       libcfs_id2str(id), rc);
@@ -1547,7 +1551,7 @@
 		}
 	}
 
-	if (rc != 0) {
+	if (rc) {
 		lstcon_group_decref(tmp);
 		return rc;
 	}
@@ -1562,14 +1566,14 @@
 static int
 lstcon_debug_ndlist(struct list_head *ndlist,
 		    struct list_head *translist,
-		    int timeout, struct list_head *result_up)
+		    int timeout, struct list_head __user *result_up)
 {
 	lstcon_rpc_trans_t *trans;
 	int		 rc;
 
 	rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
 				     NULL, lstcon_sesrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		return rc;
 	}
@@ -1584,7 +1588,7 @@
 }
 
 int
-lstcon_session_debug(int timeout, struct list_head *result_up)
+lstcon_session_debug(int timeout, struct list_head __user *result_up)
 {
 	return lstcon_debug_ndlist(&console_session.ses_ndl_list,
 				   NULL, timeout, result_up);
@@ -1592,13 +1596,13 @@
 
 int
 lstcon_batch_debug(int timeout, char *name,
-		   int client, struct list_head *result_up)
+		   int client, struct list_head __user *result_up)
 {
 	lstcon_batch_t *bat;
 	int rc;
 
 	rc = lstcon_batch_find(name, &bat);
-	if (rc != 0)
+	if (rc)
 		return -ENOENT;
 
 	rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list :
@@ -1610,13 +1614,13 @@
 
 int
 lstcon_group_debug(int timeout, char *name,
-		   struct list_head *result_up)
+		   struct list_head __user *result_up)
 {
 	lstcon_group_t *grp;
 	int rc;
 
 	rc = lstcon_group_find(name, &grp);
-	if (rc != 0)
+	if (rc)
 		return -ENOENT;
 
 	rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
@@ -1628,8 +1632,8 @@
 
 int
 lstcon_nodes_debug(int timeout,
-		   int count, lnet_process_id_t *ids_up,
-		   struct list_head *result_up)
+		   int count, lnet_process_id_t __user *ids_up,
+		   struct list_head __user *result_up)
 {
 	lnet_process_id_t id;
 	lstcon_ndlink_t *ndl;
@@ -1638,7 +1642,7 @@
 	int rc;
 
 	rc = lstcon_group_alloc(NULL, &grp);
-	if (rc != 0) {
+	if (rc) {
 		CDEBUG(D_NET, "Out of memory\n");
 		return rc;
 	}
@@ -1651,13 +1655,13 @@
 
 		/* node is added to tmp group */
 		rc = lstcon_group_ndlink_find(grp, id, &ndl, 1);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Can't create node link\n");
 			break;
 		}
 	}
 
-	if (rc != 0) {
+	if (rc) {
 		lstcon_group_decref(grp);
 		return rc;
 	}
@@ -1689,11 +1693,9 @@
 	sid->ses_stamp = cfs_time_current();
 }
 
-extern srpc_service_t lstcon_acceptor_service;
-
 int
 lstcon_session_new(char *name, int key, unsigned feats,
-		   int timeout, int force, lst_sid_t *sid_up)
+		   int timeout, int force, lst_sid_t __user *sid_up)
 {
 	int rc = 0;
 	int i;
@@ -1709,11 +1711,11 @@
 		rc = lstcon_session_end();
 
 		/* lstcon_session_end() only return local error */
-		if  (rc != 0)
+		if  (rc)
 			return rc;
 	}
 
-	if ((feats & ~LST_FEATS_MASK) != 0) {
+	if (feats & ~LST_FEATS_MASK) {
 		CNETERR("Unknown session features %x\n",
 			(feats & ~LST_FEATS_MASK));
 		return -EINVAL;
@@ -1735,11 +1737,11 @@
 		sizeof(console_session.ses_name));
 
 	rc = lstcon_batch_add(LST_DEFAULT_BATCH);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	rc = lstcon_rpc_pinger_start();
-	if (rc != 0) {
+	if (rc) {
 		lstcon_batch_t *bat = NULL;
 
 		lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
@@ -1748,8 +1750,8 @@
 		return rc;
 	}
 
-	if (copy_to_user(sid_up, &console_session.ses_id,
-			     sizeof(lst_sid_t)) == 0)
+	if (!copy_to_user(sid_up, &console_session.ses_id,
+			  sizeof(lst_sid_t)))
 		return rc;
 
 	lstcon_session_end();
@@ -1758,8 +1760,10 @@
 }
 
 int
-lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp,
-		    lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len)
+lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
+		    unsigned __user *featp,
+		    lstcon_ndlist_ent_t __user *ndinfo_up,
+		    char __user *name_up, int len)
 {
 	lstcon_ndlist_ent_t *entp;
 	lstcon_ndlink_t *ndl;
@@ -1769,18 +1773,18 @@
 		return -ESRCH;
 
 	LIBCFS_ALLOC(entp, sizeof(*entp));
-	if (entp == NULL)
+	if (!entp)
 		return -ENOMEM;
 
 	list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
 		LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
 
 	if (copy_to_user(sid_up, &console_session.ses_id,
-			     sizeof(lst_sid_t)) ||
+			 sizeof(lst_sid_t)) ||
 	    copy_to_user(key_up, &console_session.ses_key,
-			     sizeof(*key_up)) ||
+			 sizeof(*key_up)) ||
 	    copy_to_user(featp, &console_session.ses_features,
-			     sizeof(*featp)) ||
+			 sizeof(*featp)) ||
 	    copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
 	    copy_to_user(name_up, console_session.ses_name, len))
 		rc = -EFAULT;
@@ -1803,7 +1807,7 @@
 	rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list,
 				     NULL, LST_TRANS_SESEND, NULL,
 				     lstcon_sesrpc_condition, &trans);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Can't create transaction: %d\n", rc);
 		return rc;
 	}
@@ -1829,7 +1833,7 @@
 	/* destroy all batches */
 	while (!list_empty(&console_session.ses_bat_list)) {
 		bat = list_entry(console_session.ses_bat_list.next,
-				     lstcon_batch_t, bat_link);
+				 lstcon_batch_t, bat_link);
 
 		lstcon_batch_destroy(bat);
 	}
@@ -1837,7 +1841,7 @@
 	/* destroy all groups */
 	while (!list_empty(&console_session.ses_grp_list)) {
 		grp = list_entry(console_session.ses_grp_list.next,
-				     lstcon_group_t, grp_link);
+				 lstcon_group_t, grp_link);
 		LASSERT(grp->grp_ref == 1);
 
 		lstcon_group_decref(grp);
@@ -1857,7 +1861,7 @@
 {
 	int rc = 0;
 
-	if ((feats & ~LST_FEATS_MASK) != 0) {
+	if (feats & ~LST_FEATS_MASK) {
 		CERROR("Can't support these features: %x\n",
 		       (feats & ~LST_FEATS_MASK));
 		return -EPROTO;
@@ -1875,7 +1879,7 @@
 
 	spin_unlock(&console_session.ses_rpc_lock);
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("remote features %x do not match with session features %x of console\n",
 		       feats, console_session.ses_features);
 	}
@@ -1905,26 +1909,26 @@
 		goto out;
 	}
 
-	if (lstcon_session_feats_check(req->msg_ses_feats) != 0) {
+	if (lstcon_session_feats_check(req->msg_ses_feats)) {
 		jrep->join_status = EPROTO;
 		goto out;
 	}
 
 	if (jreq->join_sid.ses_nid != LNET_NID_ANY &&
-	     !lstcon_session_match(jreq->join_sid)) {
+	    !lstcon_session_match(jreq->join_sid)) {
 		jrep->join_status = EBUSY;
 		goto out;
 	}
 
-	if (lstcon_group_find(jreq->join_group, &grp) != 0) {
+	if (lstcon_group_find(jreq->join_group, &grp)) {
 		rc = lstcon_group_alloc(jreq->join_group, &grp);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Out of memory\n");
 			goto out;
 		}
 
 		list_add_tail(&grp->grp_link,
-				  &console_session.ses_grp_list);
+			      &console_session.ses_grp_list);
 		lstcon_group_addref(grp);
 	}
 
@@ -1935,13 +1939,13 @@
 	}
 
 	rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0);
-	if (rc == 0) {
+	if (!rc) {
 		jrep->join_status = EEXIST;
 		goto out;
 	}
 
 	rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Out of memory\n");
 		goto out;
 	}
@@ -1949,7 +1953,7 @@
 	ndl->ndl_node->nd_state   = LST_NODE_ACTIVE;
 	ndl->ndl_node->nd_timeout = console_session.ses_timeout;
 
-	if (grp->grp_userland == 0)
+	if (!grp->grp_userland)
 		grp->grp_userland = 1;
 
 	strlcpy(jrep->join_session, console_session.ses_name,
@@ -1959,7 +1963,7 @@
 
 out:
 	rep->msg_ses_feats = console_session.ses_features;
-	if (grp != NULL)
+	if (grp)
 		lstcon_group_decref(grp);
 
 	mutex_unlock(&console_session.ses_mutex);
@@ -1967,7 +1971,7 @@
 	return rc;
 }
 
-srpc_service_t lstcon_acceptor_service;
+static srpc_service_t lstcon_acceptor_service;
 static void lstcon_init_acceptor_service(void)
 {
 	/* initialize selftest console acceptor service table */
@@ -1977,7 +1981,7 @@
 	lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX;
 }
 
-extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
+extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
 
 static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
 
@@ -1988,7 +1992,7 @@
 	int i;
 	int rc;
 
-	memset(&console_session, 0, sizeof(lstcon_session_t));
+	memset(&console_session, 0, sizeof(struct lstcon_session));
 
 	console_session.ses_id		  = LST_INVALID_SID;
 	console_session.ses_state	  = LST_SESSION_NONE;
@@ -2008,7 +2012,7 @@
 
 	LIBCFS_ALLOC(console_session.ses_ndl_hash,
 		     sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
-	if (console_session.ses_ndl_hash == NULL)
+	if (!console_session.ses_ndl_hash)
 		return -ENOMEM;
 
 	for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
@@ -2019,7 +2023,7 @@
 
 	rc = srpc_add_service(&lstcon_acceptor_service);
 	LASSERT(rc != -EBUSY);
-	if (rc != 0) {
+	if (rc) {
 		LIBCFS_FREE(console_session.ses_ndl_hash,
 			    sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
 		return rc;
@@ -2027,14 +2031,14 @@
 
 	rc = srpc_service_add_buffers(&lstcon_acceptor_service,
 				      lstcon_acceptor_service.sv_wi_total);
-	if (rc != 0) {
+	if (rc) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	rc = libcfs_register_ioctl(&lstcon_ioctl_handler);
 
-	if (rc == 0) {
+	if (!rc) {
 		lstcon_rpc_module_init();
 		return 0;
 	}
@@ -2075,9 +2079,8 @@
 	LASSERT(list_empty(&console_session.ses_bat_list));
 	LASSERT(list_empty(&console_session.ses_trans_list));
 
-	for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+	for (i = 0; i < LST_NODE_HASHSIZE; i++)
 		LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
-	}
 
 	LIBCFS_FREE(console_session.ses_ndl_hash,
 		    sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 3f3286c..c9d1081 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -135,7 +135,7 @@
 
 #define LST_CONSOLE_TIMEOUT 300	     /* default console timeout */
 
-typedef struct {
+struct lstcon_session {
 	struct mutex        ses_mutex;        /* only 1 thread in session */
 	lst_sid_t           ses_id;           /* global session id */
 	int                 ses_key;          /* local session key */
@@ -165,9 +165,9 @@
 	spinlock_t          ses_rpc_lock;     /* serialize */
 	atomic_t            ses_rpc_counter;  /* # of initialized RPCs */
 	struct list_head    ses_rpc_freelist; /* idle console rpc */
-} lstcon_session_t; /* session descriptor */
+}; /* session descriptor */
 
-extern lstcon_session_t	 console_session;
+extern struct lstcon_session	 console_session;
 
 static inline lstcon_trans_stat_t *
 lstcon_trans_stat(void)
@@ -176,7 +176,7 @@
 }
 
 static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
 {
 	unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
 
@@ -184,51 +184,54 @@
 }
 
 int lstcon_console_init(void);
-int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
 int lstcon_console_fini(void);
 int lstcon_session_match(lst_sid_t sid);
 int lstcon_session_new(char *name, int key, unsigned version,
-		       int timeout, int flags, lst_sid_t *sid_up);
-int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
-			lstcon_ndlist_ent_t *entp, char *name_up, int len);
+		       int timeout, int flags, lst_sid_t __user *sid_up);
+int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key,
+			unsigned __user *verp, lstcon_ndlist_ent_t __user *entp,
+			char __user *name_up, int len);
 int lstcon_session_end(void);
-int lstcon_session_debug(int timeout, struct list_head *result_up);
+int lstcon_session_debug(int timeout, struct list_head __user *result_up);
 int lstcon_session_feats_check(unsigned feats);
 int lstcon_batch_debug(int timeout, char *name,
-		       int client, struct list_head *result_up);
+		       int client, struct list_head __user *result_up);
 int lstcon_group_debug(int timeout, char *name,
-		       struct list_head *result_up);
-int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
-		       struct list_head *result_up);
+		       struct list_head __user *result_up);
+int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t __user *nds_up,
+		       struct list_head __user *result_up);
 int lstcon_group_add(char *name);
 int lstcon_group_del(char *name);
 int lstcon_group_clean(char *name, int args);
-int lstcon_group_refresh(char *name, struct list_head *result_up);
-int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
-		     unsigned *featp, struct list_head *result_up);
-int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
-			struct list_head *result_up);
-int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
-		      int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
-int lstcon_group_list(int idx, int len, char *name_up);
+int lstcon_group_refresh(char *name, struct list_head __user *result_up);
+int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
+		     unsigned *featp, struct list_head __user *result_up);
+int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
+			struct list_head __user *result_up);
+int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up,
+		      int *index_p, int *ndent_p,
+		      lstcon_node_ent_t __user *ndents_up);
+int lstcon_group_list(int idx, int len, char __user *name_up);
 int lstcon_batch_add(char *name);
-int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
-int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
+int lstcon_batch_run(char *name, int timeout,
+		     struct list_head __user *result_up);
+int lstcon_batch_stop(char *name, int force,
+		      struct list_head __user *result_up);
 int lstcon_test_batch_query(char *name, int testidx,
 			    int client, int timeout,
-			    struct list_head *result_up);
+			    struct list_head __user *result_up);
 int lstcon_batch_del(char *name);
-int lstcon_batch_list(int idx, int namelen, char *name_up);
-int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
+int lstcon_batch_list(int idx, int namelen, char __user *name_up);
+int lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
 		      int server, int testidx, int *index_p,
-		      int *ndent_p, lstcon_node_ent_t *dents_up);
+		      int *ndent_p, lstcon_node_ent_t __user *dents_up);
 int lstcon_group_stat(char *grp_name, int timeout,
-		      struct list_head *result_up);
-int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
-		      int timeout, struct list_head *result_up);
+		      struct list_head __user *result_up);
+int lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
+		      int timeout, struct list_head __user *result_up);
 int lstcon_test_add(char *batch_name, int type, int loop,
 		    int concur, int dist, int span,
 		    char *src_name, char *dst_name,
 		    void *param, int paramlen, int *retp,
-		    struct list_head *result_up);
+		    struct list_head __user *result_up);
 #endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 1a2da74..3bbc720 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -100,8 +100,8 @@
 	__swab64s(&(lc).route_length);  \
 } while (0)
 
-#define sfw_test_active(t)      (atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b)     (atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t)      (atomic_read(&(t)->tsi_nactive))
+#define sfw_batch_active(b)     (atomic_read(&(b)->bat_nactive))
 
 static struct smoketest_framework {
 	struct list_head  fw_zombie_rpcs;     /* RPCs to be recycled */
@@ -139,14 +139,14 @@
 {
 	sfw_test_case_t *tsc;
 
-	if (sfw_find_test_case(service->sv_id) != NULL) {
+	if (sfw_find_test_case(service->sv_id)) {
 		CERROR("Failed to register test %s (%d)\n",
-			service->sv_name, service->sv_id);
+		       service->sv_name, service->sv_id);
 		return -EEXIST;
 	}
 
 	LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
-	if (tsc == NULL)
+	if (!tsc)
 		return -ENOMEM;
 
 	tsc->tsc_cli_ops     = cliops;
@@ -164,7 +164,7 @@
 
 	LASSERT(!sfw_data.fw_shuttingdown);
 
-	if (sn == NULL || sn->sn_timeout == 0)
+	if (!sn || !sn->sn_timeout)
 		return;
 
 	LASSERT(!sn->sn_timer_active);
@@ -180,10 +180,10 @@
 {
 	sfw_session_t *sn = sfw_data.fw_session;
 
-	if (sn == NULL || !sn->sn_timer_active)
+	if (!sn || !sn->sn_timer_active)
 		return 0;
 
-	LASSERT(sn->sn_timeout != 0);
+	LASSERT(sn->sn_timeout);
 
 	if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
 		sn->sn_timer_active = 0;
@@ -202,7 +202,7 @@
 	sfw_batch_t *tsb;
 	sfw_test_case_t *tsc;
 
-	if (sn == NULL)
+	if (!sn)
 		return;
 
 	LASSERT(!sn->sn_timer_active);
@@ -226,7 +226,7 @@
 		}
 	}
 
-	if (nactive != 0)
+	if (nactive)
 		return;   /* wait for active batches to stop */
 
 	list_del_init(&sn->sn_list);
@@ -248,8 +248,8 @@
 	LASSERT(sn == sfw_data.fw_session);
 
 	CWARN("Session expired! sid: %s-%llu, name: %s\n",
-	       libcfs_nid2str(sn->sn_id.ses_nid),
-	       sn->sn_id.ses_stamp, &sn->sn_name[0]);
+	      libcfs_nid2str(sn->sn_id.ses_nid),
+	      sn->sn_id.ses_stamp, &sn->sn_name[0]);
 
 	sn->sn_timer_active = 0;
 	sfw_deactivate_session();
@@ -289,13 +289,12 @@
 	struct srpc_service *sv	= rpc->srpc_scd->scd_svc;
 	int status = rpc->srpc_status;
 
-	CDEBUG(D_NET,
-		"Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
-		sv->sv_name, libcfs_id2str(rpc->srpc_peer),
-		swi_state2str(rpc->srpc_wi.swi_state),
-		status);
+	CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
+	       sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+	       swi_state2str(rpc->srpc_wi.swi_state),
+	       status);
 
-	if (rpc->srpc_bulk != NULL)
+	if (rpc->srpc_bulk)
 		sfw_free_pages(rpc);
 	return;
 }
@@ -303,15 +302,14 @@
 static void
 sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
 {
-	LASSERT(rpc->crpc_bulk.bk_niov == 0);
+	LASSERT(!rpc->crpc_bulk.bk_niov);
 	LASSERT(list_empty(&rpc->crpc_list));
-	LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
+	LASSERT(!atomic_read(&rpc->crpc_refcount));
 
-	CDEBUG(D_NET,
-		"Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
-		rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
-		swi_state2str(rpc->crpc_wi.swi_state),
-		rpc->crpc_aborted, rpc->crpc_status);
+	CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
+	       rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+	       swi_state2str(rpc->crpc_wi.swi_state),
+	       rpc->crpc_aborted, rpc->crpc_status);
 
 	spin_lock(&sfw_data.fw_lock);
 
@@ -328,7 +326,7 @@
 	sfw_session_t *sn = sfw_data.fw_session;
 	sfw_batch_t *bat;
 
-	LASSERT(sn != NULL);
+	LASSERT(sn);
 
 	list_for_each_entry(bat, &sn->sn_batches, bat_list) {
 		if (bat->bat_id.bat_id == bid.bat_id)
@@ -344,14 +342,14 @@
 	sfw_session_t *sn = sfw_data.fw_session;
 	sfw_batch_t *bat;
 
-	LASSERT(sn != NULL);
+	LASSERT(sn);
 
 	bat = sfw_find_batch(bid);
-	if (bat != NULL)
+	if (bat)
 		return bat;
 
 	LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
-	if (bat == NULL)
+	if (!bat)
 		return NULL;
 
 	bat->bat_error    = 0;
@@ -371,14 +369,14 @@
 	sfw_counters_t *cnt = &reply->str_fw;
 	sfw_batch_t *bat;
 
-	reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+	reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
 
 	if (request->str_sid.ses_nid == LNET_NID_ANY) {
 		reply->str_status = EINVAL;
 		return 0;
 	}
 
-	if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
+	if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
 		reply->str_status = ESRCH;
 		return 0;
 	}
@@ -386,8 +384,10 @@
 	lnet_counters_get(&reply->str_lnet);
 	srpc_get_counters(&reply->str_rpc);
 
-	/* send over the msecs since the session was started
-	 - with 32 bits to send, this is ~49 days */
+	/*
+	 * send over the msecs since the session was started
+	 * with 32 bits to send, this is ~49 days
+	 */
 	cnt->running_ms	     = jiffies_to_msecs(jiffies - sn->sn_started);
 	cnt->brw_errors      = atomic_read(&sn->sn_brw_errors);
 	cnt->ping_errors     = atomic_read(&sn->sn_ping_errors);
@@ -412,12 +412,12 @@
 	int cplen = 0;
 
 	if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
-		reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+		reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
 		reply->mksn_status = EINVAL;
 		return 0;
 	}
 
-	if (sn != NULL) {
+	if (sn) {
 		reply->mksn_status  = 0;
 		reply->mksn_sid     = sn->sn_id;
 		reply->mksn_timeout = sn->sn_timeout;
@@ -437,20 +437,22 @@
 		}
 	}
 
-	/* reject the request if it requires unknown features
+	/*
+	 * reject the request if it requires unknown features
 	 * NB: old version will always accept all features because it's not
 	 * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
 	 * harmless because it will return zero feature to console, and it's
 	 * console's responsibility to make sure all nodes in a session have
-	 * same feature mask. */
-	if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+	 * same feature mask.
+	 */
+	if (msg->msg_ses_feats & ~LST_FEATS_MASK) {
 		reply->mksn_status = EPROTO;
 		return 0;
 	}
 
 	/* brand new or create by force */
 	LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
-	if (sn == NULL) {
+	if (!sn) {
 		CERROR("Dropping RPC (mksn) under memory pressure.\n");
 		return -ENOMEM;
 	}
@@ -461,7 +463,7 @@
 	spin_lock(&sfw_data.fw_lock);
 
 	sfw_deactivate_session();
-	LASSERT(sfw_data.fw_session == NULL);
+	LASSERT(!sfw_data.fw_session);
 	sfw_data.fw_session = sn;
 
 	spin_unlock(&sfw_data.fw_lock);
@@ -477,15 +479,15 @@
 {
 	sfw_session_t *sn = sfw_data.fw_session;
 
-	reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+	reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
 
 	if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
 		reply->rmsn_status = EINVAL;
 		return 0;
 	}
 
-	if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
-		reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY;
+	if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
+		reply->rmsn_status = !sn ? ESRCH : EBUSY;
 		return 0;
 	}
 
@@ -500,7 +502,7 @@
 
 	reply->rmsn_status = 0;
 	reply->rmsn_sid    = LST_INVALID_SID;
-	LASSERT(sfw_data.fw_session == NULL);
+	LASSERT(!sfw_data.fw_session);
 	return 0;
 }
 
@@ -509,7 +511,7 @@
 {
 	sfw_session_t *sn = sfw_data.fw_session;
 
-	if (sn == NULL) {
+	if (!sn) {
 		reply->dbg_status = ESRCH;
 		reply->dbg_sid    = LST_INVALID_SID;
 		return 0;
@@ -555,10 +557,10 @@
 	int nbuf;
 	int rc;
 
-	LASSERT(tsi != NULL);
+	LASSERT(tsi);
 	tsc = sfw_find_test_case(tsi->tsi_service);
 	nbuf = sfw_test_buffers(tsi);
-	LASSERT(tsc != NULL);
+	LASSERT(tsc);
 	svc = tsc->tsc_srv_service;
 
 	if (tsi->tsi_is_client) {
@@ -567,13 +569,15 @@
 	}
 
 	rc = srpc_service_add_buffers(svc, nbuf);
-	if (rc != 0) {
+	if (rc) {
 		CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
 		      svc->sv_name, nbuf, rc);
-		/* NB: this error handler is not strictly correct, because
+		/*
+		 * NB: this error handler is not strictly correct, because
 		 * it may release more buffers than already allocated,
 		 * but it doesn't matter because request portal should
-		 * be lazy portal and will grow buffers if necessary. */
+		 * be lazy portal and will grow buffers if necessary.
+		 */
 		srpc_service_remove_buffers(svc, nbuf);
 		return -ENOMEM;
 	}
@@ -589,14 +593,16 @@
 {
 	struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
 
-	LASSERT(tsc != NULL);
+	LASSERT(tsc);
 
 	if (tsi->tsi_is_client)
 		return;
 
-	/* shrink buffers, because request portal is lazy portal
+	/*
+	 * shrink buffers, because request portal is lazy portal
 	 * which can grow buffers at runtime so we may leave
-	 * some buffers behind, but never mind... */
+	 * some buffers behind, but never mind...
+	 */
 	srpc_service_remove_buffers(tsc->tsc_srv_service,
 				    sfw_test_buffers(tsi));
 	return;
@@ -619,14 +625,14 @@
 
 	while (!list_empty(&tsi->tsi_units)) {
 		tsu = list_entry(tsi->tsi_units.next,
-				     sfw_test_unit_t, tsu_list);
+				 sfw_test_unit_t, tsu_list);
 		list_del(&tsu->tsu_list);
 		LIBCFS_FREE(tsu, sizeof(*tsu));
 	}
 
 	while (!list_empty(&tsi->tsi_free_rpcs)) {
 		rpc = list_entry(tsi->tsi_free_rpcs.next,
-				     srpc_client_rpc_t, crpc_list);
+				 srpc_client_rpc_t, crpc_list);
 		list_del(&rpc->crpc_list);
 		LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
 	}
@@ -647,7 +653,7 @@
 
 	while (!list_empty(&tsb->bat_tests)) {
 		tsi = list_entry(tsb->bat_tests.next,
-				     sfw_test_instance_t, tsi_list);
+				 sfw_test_instance_t, tsi_list);
 		list_del_init(&tsi->tsi_list);
 		sfw_destroy_test_instance(tsi);
 	}
@@ -666,7 +672,7 @@
 
 	while (!list_empty(&sn->sn_batches)) {
 		batch = list_entry(sn->sn_batches.next,
-				       sfw_batch_t, bat_list);
+				   sfw_batch_t, bat_list);
 		list_del_init(&batch->bat_list);
 		sfw_destroy_batch(batch);
 	}
@@ -690,7 +696,7 @@
 	LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
 
 	if (req->tsr_service == SRPC_SERVICE_BRW) {
-		if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+		if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
 			test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
 
 			__swab32s(&bulk->blk_opc);
@@ -734,9 +740,9 @@
 	int rc;
 
 	LIBCFS_ALLOC(tsi, sizeof(*tsi));
-	if (tsi == NULL) {
+	if (!tsi) {
 		CERROR("Can't allocate test instance for batch: %llu\n",
-			tsb->bat_id.bat_id);
+		       tsb->bat_id.bat_id);
 		return -ENOMEM;
 	}
 
@@ -755,7 +761,7 @@
 	tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
 
 	rc = sfw_load_test(tsi);
-	if (rc != 0) {
+	if (rc) {
 		LIBCFS_FREE(tsi, sizeof(*tsi));
 		return rc;
 	}
@@ -768,7 +774,7 @@
 		return 0;
 	}
 
-	LASSERT(bk != NULL);
+	LASSERT(bk);
 	LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
 	LASSERT((unsigned int)bk->bk_len >=
 		sizeof(lnet_process_id_packed_t) * ndest);
@@ -782,17 +788,17 @@
 		int j;
 
 		dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
-		LASSERT(dests != NULL);  /* my pages are within KVM always */
+		LASSERT(dests);  /* my pages are within KVM always */
 		id = dests[i % SFW_ID_PER_PAGE];
 		if (msg->msg_magic != SRPC_MSG_MAGIC)
 			sfw_unpack_id(id);
 
 		for (j = 0; j < tsi->tsi_concur; j++) {
 			LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
-			if (tsu == NULL) {
+			if (!tsu) {
 				rc = -ENOMEM;
 				CERROR("Can't allocate tsu for %d\n",
-					tsi->tsi_service);
+				       tsi->tsi_service);
 				goto error;
 			}
 
@@ -805,13 +811,13 @@
 	}
 
 	rc = tsi->tsi_ops->tso_init(tsi);
-	if (rc == 0) {
+	if (!rc) {
 		list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
 		return 0;
 	}
 
 error:
-	LASSERT(rc != 0);
+	LASSERT(rc);
 	sfw_destroy_test_instance(tsi);
 	return rc;
 }
@@ -876,9 +882,8 @@
 	list_del_init(&rpc->crpc_list);
 
 	/* batch is stopping or loop is done or get error */
-	if (tsi->tsi_stopping ||
-	    tsu->tsu_loop == 0 ||
-	    (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr))
+	if (tsi->tsi_stopping || !tsu->tsu_loop ||
+	    (rpc->crpc_status && tsi->tsi_stoptsu_onerr))
 		done = 1;
 
 	/* dec ref for poster */
@@ -910,14 +915,14 @@
 	if (!list_empty(&tsi->tsi_free_rpcs)) {
 		/* pick request from buffer */
 		rpc = list_entry(tsi->tsi_free_rpcs.next,
-				     srpc_client_rpc_t, crpc_list);
+				 srpc_client_rpc_t, crpc_list);
 		LASSERT(nblk == rpc->crpc_bulk.bk_niov);
 		list_del_init(&rpc->crpc_list);
 	}
 
 	spin_unlock(&tsi->tsi_lock);
 
-	if (rpc == NULL) {
+	if (!rpc) {
 		rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
 					     blklen, sfw_test_rpc_done,
 					     sfw_test_rpc_fini, tsu);
@@ -927,7 +932,7 @@
 				     sfw_test_rpc_fini, tsu);
 	}
 
-	if (rpc == NULL) {
+	if (!rpc) {
 		CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
 		return -ENOMEM;
 	}
@@ -947,12 +952,12 @@
 
 	LASSERT(wi == &tsu->tsu_worker);
 
-	if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) {
-		LASSERT(rpc == NULL);
+	if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) {
+		LASSERT(!rpc);
 		goto test_done;
 	}
 
-	LASSERT(rpc != NULL);
+	LASSERT(rpc);
 
 	spin_lock(&tsi->tsi_lock);
 
@@ -1074,7 +1079,7 @@
 	if (testidx < 0)
 		return -EINVAL;
 
-	if (testidx == 0) {
+	if (!testidx) {
 		reply->bar_active = atomic_read(&tsb->bat_nactive);
 		return 0;
 	}
@@ -1101,11 +1106,11 @@
 sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
 		int sink)
 {
-	LASSERT(rpc->srpc_bulk == NULL);
+	LASSERT(!rpc->srpc_bulk);
 	LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
 
 	rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
-	if (rpc->srpc_bulk == NULL)
+	if (!rpc->srpc_bulk)
 		return -ENOMEM;
 
 	return 0;
@@ -1121,13 +1126,13 @@
 	sfw_batch_t *bat;
 
 	request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
-	reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+	reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
 
-	if (request->tsr_loop == 0 ||
-	    request->tsr_concur == 0 ||
+	if (!request->tsr_loop ||
+	    !request->tsr_concur ||
 	    request->tsr_sid.ses_nid == LNET_NID_ANY ||
 	    request->tsr_ndest > SFW_MAX_NDESTS ||
-	    (request->tsr_is_client && request->tsr_ndest == 0) ||
+	    (request->tsr_is_client && !request->tsr_ndest) ||
 	    request->tsr_concur > SFW_MAX_CONCUR ||
 	    request->tsr_service > SRPC_SERVICE_MAX_ID ||
 	    request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
@@ -1135,17 +1140,17 @@
 		return 0;
 	}
 
-	if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
-	    sfw_find_test_case(request->tsr_service) == NULL) {
+	if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
+	    !sfw_find_test_case(request->tsr_service)) {
 		reply->tsr_status = ENOENT;
 		return 0;
 	}
 
 	bat = sfw_bid2batch(request->tsr_bid);
-	if (bat == NULL) {
+	if (!bat) {
 		CERROR("Dropping RPC (%s) from %s under memory pressure.\n",
-			rpc->srpc_scd->scd_svc->sv_name,
-			libcfs_id2str(rpc->srpc_peer));
+		       rpc->srpc_scd->scd_svc->sv_name,
+		       libcfs_id2str(rpc->srpc_peer));
 		return -ENOMEM;
 	}
 
@@ -1154,12 +1159,12 @@
 		return 0;
 	}
 
-	if (request->tsr_is_client && rpc->srpc_bulk == NULL) {
+	if (request->tsr_is_client && !rpc->srpc_bulk) {
 		/* rpc will be resumed later in sfw_bulk_ready */
 		int npg = sfw_id_pages(request->tsr_ndest);
 		int len;
 
-		if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+		if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
 			len = npg * PAGE_CACHE_SIZE;
 
 		} else  {
@@ -1171,11 +1176,11 @@
 	}
 
 	rc = sfw_add_test_instance(bat, rpc);
-	CDEBUG(rc == 0 ? D_NET : D_WARNING,
-		"%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
-		rc == 0 ? "Added" : "Failed to add", request->tsr_service,
-		request->tsr_is_client ? "client" : "server",
-		request->tsr_loop, request->tsr_concur, request->tsr_ndest);
+	CDEBUG(!rc ? D_NET : D_WARNING,
+	       "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
+	       !rc ? "Added" : "Failed to add", request->tsr_service,
+	       request->tsr_is_client ? "client" : "server",
+	       request->tsr_loop, request->tsr_concur, request->tsr_ndest);
 
 	reply->tsr_status = (rc < 0) ? -rc : rc;
 	return 0;
@@ -1188,15 +1193,15 @@
 	int rc = 0;
 	sfw_batch_t *bat;
 
-	reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+	reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
 
-	if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
+	if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
 		reply->bar_status = ESRCH;
 		return 0;
 	}
 
 	bat = sfw_find_batch(request->bar_bid);
-	if (bat == NULL) {
+	if (!bat) {
 		reply->bar_status = ENOENT;
 		return 0;
 	}
@@ -1231,7 +1236,7 @@
 	unsigned features = LST_FEATS_MASK;
 	int rc = 0;
 
-	LASSERT(sfw_data.fw_active_srpc == NULL);
+	LASSERT(!sfw_data.fw_active_srpc);
 	LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
 	spin_lock(&sfw_data.fw_lock);
@@ -1242,7 +1247,7 @@
 	}
 
 	/* Remove timer to avoid racing with it or expiring active session */
-	if (sfw_del_session_timer() != 0) {
+	if (sfw_del_session_timer()) {
 		CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
 		       sv->sv_name, libcfs_id2str(rpc->srpc_peer));
 		spin_unlock(&sfw_data.fw_lock);
@@ -1262,7 +1267,7 @@
 	    sv->sv_id != SRPC_SERVICE_DEBUG) {
 		sfw_session_t *sn = sfw_data.fw_session;
 
-		if (sn != NULL &&
+		if (sn &&
 		    sn->sn_features != request->msg_ses_feats) {
 			CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
 				request->msg_ses_feats, sn->sn_features);
@@ -1271,10 +1276,12 @@
 			goto out;
 		}
 
-	} else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
-		/* NB: at this point, old version will ignore features and
+	} else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
+		/*
+		 * NB: at this point, old version will ignore features and
 		 * create new session anyway, so console should be able
-		 * to handle this */
+		 * to handle this
+		 */
 		reply->msg_body.reply.status = EPROTO;
 		goto out;
 	}
@@ -1312,7 +1319,7 @@
 		break;
 	}
 
-	if (sfw_data.fw_session != NULL)
+	if (sfw_data.fw_session)
 		features = sfw_data.fw_session->sn_features;
  out:
 	reply->msg_ses_feats = features;
@@ -1333,14 +1340,14 @@
 	struct srpc_service *sv = rpc->srpc_scd->scd_svc;
 	int rc;
 
-	LASSERT(rpc->srpc_bulk != NULL);
+	LASSERT(rpc->srpc_bulk);
 	LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
-	LASSERT(sfw_data.fw_active_srpc == NULL);
+	LASSERT(!sfw_data.fw_active_srpc);
 	LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
 
 	spin_lock(&sfw_data.fw_lock);
 
-	if (status != 0) {
+	if (status) {
 		CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
 		       sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
 		spin_unlock(&sfw_data.fw_lock);
@@ -1352,7 +1359,7 @@
 		return -ESHUTDOWN;
 	}
 
-	if (sfw_del_session_timer() != 0) {
+	if (sfw_del_session_timer()) {
 		CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
 		       sv->sv_name, libcfs_id2str(rpc->srpc_peer));
 		spin_unlock(&sfw_data.fw_lock);
@@ -1386,9 +1393,9 @@
 	LASSERT(!sfw_data.fw_shuttingdown);
 	LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-	if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
+	if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
 		rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
-				     srpc_client_rpc_t, crpc_list);
+				 srpc_client_rpc_t, crpc_list);
 		list_del(&rpc->crpc_list);
 
 		srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1397,15 +1404,15 @@
 
 	spin_unlock(&sfw_data.fw_lock);
 
-	if (rpc == NULL) {
+	if (!rpc) {
 		rpc = srpc_create_client_rpc(peer, service,
 					     nbulkiov, bulklen, done,
-					     nbulkiov != 0 ?  NULL :
+					     nbulkiov ?  NULL :
 					     sfw_client_rpc_fini,
 					     priv);
 	}
 
-	if (rpc != NULL) /* "session" is concept in framework */
+	if (rpc) /* "session" is concept in framework */
 		rpc->crpc_reqstmsg.msg_ses_feats = features;
 
 	return rpc;
@@ -1622,16 +1629,6 @@
 	}
 };
 
-extern sfw_test_client_ops_t ping_test_client;
-extern srpc_service_t	ping_test_service;
-extern void ping_init_test_client(void);
-extern void ping_init_test_service(void);
-
-extern sfw_test_client_ops_t brw_test_client;
-extern srpc_service_t	brw_test_service;
-extern void brw_init_test_client(void);
-extern void brw_init_test_service(void);
-
 int
 sfw_startup(void)
 {
@@ -1643,20 +1640,20 @@
 
 	if (session_timeout < 0) {
 		CERROR("Session timeout must be non-negative: %d\n",
-			session_timeout);
+		       session_timeout);
 		return -EINVAL;
 	}
 
 	if (rpc_timeout < 0) {
 		CERROR("RPC timeout must be non-negative: %d\n",
-			rpc_timeout);
+		       rpc_timeout);
 		return -EINVAL;
 	}
 
-	if (session_timeout == 0)
+	if (!session_timeout)
 		CWARN("Zero session_timeout specified - test sessions never expire.\n");
 
-	if (rpc_timeout == 0)
+	if (!rpc_timeout)
 		CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
 
 	memset(&sfw_data, 0, sizeof(struct smoketest_framework));
@@ -1672,12 +1669,12 @@
 	brw_init_test_client();
 	brw_init_test_service();
 	rc = sfw_register_test(&brw_test_service, &brw_test_client);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
 	ping_init_test_client();
 	ping_init_test_service();
 	rc = sfw_register_test(&ping_test_service, &ping_test_client);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
 	error = 0;
 	list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
@@ -1685,16 +1682,16 @@
 
 		rc = srpc_add_service(sv);
 		LASSERT(rc != -EBUSY);
-		if (rc != 0) {
+		if (rc) {
 			CWARN("Failed to add %s service: %d\n",
-			       sv->sv_name, rc);
+			      sv->sv_name, rc);
 			error = rc;
 		}
 	}
 
 	for (i = 0; ; i++) {
 		sv = &sfw_services[i];
-		if (sv->sv_name == NULL)
+		if (!sv->sv_name)
 			break;
 
 		sv->sv_bulk_ready = NULL;
@@ -1705,9 +1702,9 @@
 
 		rc = srpc_add_service(sv);
 		LASSERT(rc != -EBUSY);
-		if (rc != 0) {
+		if (rc) {
 			CWARN("Failed to add %s service: %d\n",
-			       sv->sv_name, rc);
+			      sv->sv_name, rc);
 			error = rc;
 		}
 
@@ -1716,14 +1713,14 @@
 			continue;
 
 		rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
-		if (rc != 0) {
+		if (rc) {
 			CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
 			      sv->sv_name, sv->sv_wi_total, rc);
 			error = -ENOMEM;
 		}
 	}
 
-	if (error != 0)
+	if (error)
 		sfw_shutdown();
 	return error;
 }
@@ -1738,15 +1735,15 @@
 	spin_lock(&sfw_data.fw_lock);
 
 	sfw_data.fw_shuttingdown = 1;
-	lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock,
+	lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock,
 		       "waiting for active RPC to finish.\n");
 
-	if (sfw_del_session_timer() != 0)
-		lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock,
+	if (sfw_del_session_timer())
+		lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock,
 			       "waiting for session timer to explode.\n");
 
 	sfw_deactivate_session();
-	lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
+	lst_wait_until(!atomic_read(&sfw_data.fw_nzombies),
 		       sfw_data.fw_lock,
 		       "waiting for %d zombie sessions to die.\n",
 		       atomic_read(&sfw_data.fw_nzombies));
@@ -1755,7 +1752,7 @@
 
 	for (i = 0; ; i++) {
 		sv = &sfw_services[i];
-		if (sv->sv_name == NULL)
+		if (!sv->sv_name)
 			break;
 
 		srpc_shutdown_service(sv);
@@ -1772,7 +1769,7 @@
 		srpc_client_rpc_t *rpc;
 
 		rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
-				     srpc_client_rpc_t, crpc_list);
+				 srpc_client_rpc_t, crpc_list);
 		list_del(&rpc->crpc_list);
 
 		LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1780,7 +1777,7 @@
 
 	for (i = 0; ; i++) {
 		sv = &sfw_services[i];
-		if (sv->sv_name == NULL)
+		if (!sv->sv_name)
 			break;
 
 		srpc_wait_service_shutdown(sv);
@@ -1788,7 +1785,7 @@
 
 	while (!list_empty(&sfw_data.fw_tests)) {
 		tsc = list_entry(sfw_data.fw_tests.next,
-				     sfw_test_case_t, tsc_list);
+				 sfw_test_case_t, tsc_list);
 
 		srpc_wait_service_shutdown(tsc->tsc_srv_service);
 
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 46cbdf0..cbb7884 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -37,6 +37,7 @@
 #define DEBUG_SUBSYSTEM S_LNET
 
 #include "selftest.h"
+#include "console.h"
 
 enum {
 	LST_INIT_NONE = 0,
@@ -47,9 +48,6 @@
 	LST_INIT_CONSOLE
 };
 
-extern int lstcon_console_init(void);
-extern int lstcon_console_fini(void);
-
 static int lst_init_step = LST_INIT_NONE;
 
 struct cfs_wi_sched *lst_sched_serial;
@@ -70,7 +68,7 @@
 	case LST_INIT_WI_TEST:
 		for (i = 0;
 		     i < cfs_cpt_number(lnet_cpt_table()); i++) {
-			if (lst_sched_test[i] == NULL)
+			if (!lst_sched_test[i])
 				continue;
 			cfs_wi_sched_destroy(lst_sched_test[i]);
 		}
@@ -98,7 +96,7 @@
 
 	rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY,
 				 1, &lst_sched_serial);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Failed to create serial WI scheduler for LST\n");
 		return rc;
 	}
@@ -106,7 +104,7 @@
 
 	nscheds = cfs_cpt_number(lnet_cpt_table());
 	LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds);
-	if (lst_sched_test == NULL)
+	if (!lst_sched_test)
 		goto error;
 
 	lst_init_step = LST_INIT_WI_TEST;
@@ -117,7 +115,7 @@
 		nthrs = max(nthrs - 1, 1);
 		rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
 					 nthrs, &lst_sched_test[i]);
-		if (rc != 0) {
+		if (rc) {
 			CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
 			       i);
 			goto error;
@@ -125,21 +123,21 @@
 	}
 
 	rc = srpc_startup();
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LST can't startup rpc\n");
 		goto error;
 	}
 	lst_init_step = LST_INIT_RPC;
 
 	rc = sfw_startup();
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LST can't startup framework\n");
 		goto error;
 	}
 	lst_init_step = LST_INIT_FW;
 
 	rc = lstcon_console_init();
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LST can't startup console\n");
 		goto error;
 	}
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index d426536..9d27e395 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -61,7 +61,7 @@
 	sfw_session_t *sn = tsi->tsi_batch->bat_session;
 
 	LASSERT(tsi->tsi_is_client);
-	LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0);
+	LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
 
 	spin_lock_init(&lst_ping_data.pnd_lock);
 	lst_ping_data.pnd_counter = 0;
@@ -75,7 +75,7 @@
 	sfw_session_t *sn = tsi->tsi_batch->bat_session;
 	int errors;
 
-	LASSERT(sn != NULL);
+	LASSERT(sn);
 	LASSERT(tsi->tsi_is_client);
 
 	errors = atomic_read(&sn->sn_ping_errors);
@@ -95,11 +95,11 @@
 	struct timespec64 ts;
 	int rc;
 
-	LASSERT(sn != NULL);
-	LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+	LASSERT(sn);
+	LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
 
 	rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst;
@@ -126,14 +126,14 @@
 	srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
 	struct timespec64 ts;
 
-	LASSERT(sn != NULL);
+	LASSERT(sn);
 
-	if (rpc->crpc_status != 0) {
+	if (rpc->crpc_status) {
 		if (!tsi->tsi_stopping) /* rpc could have been aborted */
 			atomic_inc(&sn->sn_ping_errors);
 		CERROR("Unable to ping %s (%d): %d\n",
-			libcfs_id2str(rpc->crpc_dest),
-			reqst->pnr_seq, rpc->crpc_status);
+		       libcfs_id2str(rpc->crpc_dest),
+		       reqst->pnr_seq, rpc->crpc_status);
 		return;
 	}
 
@@ -147,8 +147,8 @@
 		rpc->crpc_status = -EBADMSG;
 		atomic_inc(&sn->sn_ping_errors);
 		CERROR("Bad magic %u from %s, %u expected.\n",
-			reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
-			LST_PING_TEST_MAGIC);
+		       reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
+		       LST_PING_TEST_MAGIC);
 		return;
 	}
 
@@ -156,8 +156,8 @@
 		rpc->crpc_status = -EBADMSG;
 		atomic_inc(&sn->sn_ping_errors);
 		CERROR("Bad seq %u from %s, %u expected.\n",
-			reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
-			reqst->pnr_seq);
+		       reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
+		       reqst->pnr_seq);
 		return;
 	}
 
@@ -191,14 +191,14 @@
 
 	if (req->pnr_magic != LST_PING_TEST_MAGIC) {
 		CERROR("Unexpected magic %08x from %s\n",
-			req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
+		       req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
 		return -EINVAL;
 	}
 
 	rep->pnr_seq   = req->pnr_seq;
 	rep->pnr_magic = LST_PING_TEST_MAGIC;
 
-	if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+	if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
 		replymsg->msg_ses_feats = LST_FEATS_MASK;
 		rep->pnr_status = EPROTO;
 		return 0;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 2acf6ec..1b76933 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -107,11 +107,11 @@
 	int i;
 	struct page *pg;
 
-	LASSERT(bk != NULL);
+	LASSERT(bk);
 
 	for (i = 0; i < bk->bk_niov; i++) {
 		pg = bk->bk_iovs[i].kiov_page;
-		if (pg == NULL)
+		if (!pg)
 			break;
 
 		__free_page(pg);
@@ -131,7 +131,7 @@
 
 	LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
 			 offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
-	if (bk == NULL) {
+	if (!bk) {
 		CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
 		return NULL;
 	}
@@ -147,7 +147,7 @@
 
 		pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
 				      GFP_KERNEL, 0);
-		if (pg == NULL) {
+		if (!pg) {
 			CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
 			srpc_free_bulk(bk);
 			return NULL;
@@ -199,7 +199,7 @@
 	struct list_head *q;
 	int i;
 
-	if (svc->sv_cpt_data == NULL)
+	if (!svc->sv_cpt_data)
 		return;
 
 	cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
@@ -212,9 +212,8 @@
 				break;
 
 			while (!list_empty(q)) {
-				buf = list_entry(q->next,
-						     struct srpc_buffer,
-						     buf_list);
+				buf = list_entry(q->next, struct srpc_buffer,
+						 buf_list);
 				list_del(&buf->buf_list);
 				LIBCFS_FREE(buf, sizeof(*buf));
 			}
@@ -224,8 +223,8 @@
 
 		while (!list_empty(&scd->scd_rpc_free)) {
 			rpc = list_entry(scd->scd_rpc_free.next,
-					     struct srpc_server_rpc,
-					     srpc_list);
+					 struct srpc_server_rpc,
+					 srpc_list);
 			list_del(&rpc->srpc_list);
 			LIBCFS_FREE(rpc, sizeof(*rpc));
 		}
@@ -259,7 +258,7 @@
 
 	svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
 					    sizeof(struct srpc_service_cd));
-	if (svc->sv_cpt_data == NULL)
+	if (!svc->sv_cpt_data)
 		return -ENOMEM;
 
 	svc->sv_ncpts = srpc_serv_is_framework(svc) ?
@@ -278,23 +277,27 @@
 		scd->scd_ev.ev_data = scd;
 		scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
 
-		/* NB: don't use lst_sched_serial for adding buffer,
-		 * see details in srpc_service_add_buffers() */
+		/*
+		 * NB: don't use lst_sched_serial for adding buffer,
+		 * see details in srpc_service_add_buffers()
+		 */
 		swi_init_workitem(&scd->scd_buf_wi, scd,
 				  srpc_add_buffer, lst_sched_test[i]);
 
-		if (i != 0 && srpc_serv_is_framework(svc)) {
-			/* NB: framework service only needs srpc_service_cd for
+		if (i && srpc_serv_is_framework(svc)) {
+			/*
+			 * NB: framework service only needs srpc_service_cd for
 			 * one partition, but we allocate for all to make
 			 * it easier to implement, it will waste a little
-			 * memory but nobody should care about this */
+			 * memory but nobody should care about this
+			 */
 			continue;
 		}
 
 		for (j = 0; j < nrpcs; j++) {
 			LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
 					 i, sizeof(*rpc));
-			if (rpc == NULL) {
+			if (!rpc) {
 				srpc_service_fini(svc);
 				return -ENOMEM;
 			}
@@ -312,14 +315,14 @@
 
 	LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
 
-	if (srpc_service_init(sv) != 0)
+	if (srpc_service_init(sv))
 		return -ENOMEM;
 
 	spin_lock(&srpc_data.rpc_glock);
 
 	LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
 
-	if (srpc_data.rpc_services[id] != NULL) {
+	if (srpc_data.rpc_services[id]) {
 		spin_unlock(&srpc_data.rpc_glock);
 		goto failed;
 	}
@@ -363,7 +366,7 @@
 
 	rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
 			  local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LNetMEAttach failed: %d\n", rc);
 		LASSERT(rc == -ENOMEM);
 		return -ENOMEM;
@@ -377,18 +380,17 @@
 	md.eq_handle = srpc_data.rpc_lnet_eq;
 
 	rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LNetMDAttach failed: %d\n", rc);
 		LASSERT(rc == -ENOMEM);
 
 		rc = LNetMEUnlink(meh);
-		LASSERT(rc == 0);
+		LASSERT(!rc);
 		return -ENOMEM;
 	}
 
-	CDEBUG(D_NET,
-		"Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
-		libcfs_id2str(peer), portal, matchbits);
+	CDEBUG(D_NET, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
+	       libcfs_id2str(peer), portal, matchbits);
 	return 0;
 }
 
@@ -404,42 +406,44 @@
 	md.start     = buf;
 	md.length    = len;
 	md.eq_handle = srpc_data.rpc_lnet_eq;
-	md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
+	md.threshold = options & LNET_MD_OP_GET ? 2 : 1;
 	md.options   = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
 
 	rc = LNetMDBind(md, LNET_UNLINK, mdh);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LNetMDBind failed: %d\n", rc);
 		LASSERT(rc == -ENOMEM);
 		return -ENOMEM;
 	}
 
-	/* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
+	/*
+	 * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
 	 * they're only meaningful for MDs attached to an ME (i.e. passive
-	 * buffers... */
-	if ((options & LNET_MD_OP_PUT) != 0) {
+	 * buffers...
+	 */
+	if (options & LNET_MD_OP_PUT) {
 		rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
 			     portal, matchbits, 0, 0);
 	} else {
-		LASSERT((options & LNET_MD_OP_GET) != 0);
+		LASSERT(options & LNET_MD_OP_GET);
 
 		rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
 	}
 
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
-			((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
-			libcfs_id2str(peer), portal, matchbits, rc);
+		       options & LNET_MD_OP_PUT ? "Put" : "Get",
+		       libcfs_id2str(peer), portal, matchbits, rc);
 
-		/* The forthcoming unlink event will complete this operation
+		/*
+		 * The forthcoming unlink event will complete this operation
 		 * with failure, so fall through and return success here.
 		 */
 		rc = LNetMDUnlink(*mdh);
-		LASSERT(rc == 0);
+		LASSERT(!rc);
 	} else {
-		CDEBUG(D_NET,
-			"Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
-			libcfs_id2str(peer), portal, matchbits);
+		CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
+		       libcfs_id2str(peer), portal, matchbits);
 	}
 	return 0;
 }
@@ -476,19 +480,22 @@
 				      msg, sizeof(*msg), &buf->buf_mdh,
 				      &scd->scd_ev);
 
-	/* At this point, a RPC (new or delayed) may have arrived in
+	/*
+	 * At this point, a RPC (new or delayed) may have arrived in
 	 * msg and its event handler has been called. So we must add
-	 * buf to scd_buf_posted _before_ dropping scd_lock */
-
+	 * buf to scd_buf_posted _before_ dropping scd_lock
+	 */
 	spin_lock(&scd->scd_lock);
 
-	if (rc == 0) {
+	if (!rc) {
 		if (!sv->sv_shuttingdown)
 			return 0;
 
 		spin_unlock(&scd->scd_lock);
-		/* srpc_shutdown_service might have tried to unlink me
-		 * when my buf_mdh was still invalid */
+		/*
+		 * srpc_shutdown_service might have tried to unlink me
+		 * when my buf_mdh was still invalid
+		 */
 		LNetMDUnlink(buf->buf_mdh);
 		spin_lock(&scd->scd_lock);
 		return 0;
@@ -514,9 +521,11 @@
 	struct srpc_buffer *buf;
 	int rc = 0;
 
-	/* it's called by workitem scheduler threads, these threads
+	/*
+	 * it's called by workitem scheduler threads, these threads
 	 * should have been set CPT affinity, so buffers will be posted
-	 * on CPT local list of Portal */
+	 * on CPT local list of Portal
+	 */
 	spin_lock(&scd->scd_lock);
 
 	while (scd->scd_buf_adjust > 0 &&
@@ -527,7 +536,7 @@
 		spin_unlock(&scd->scd_lock);
 
 		LIBCFS_ALLOC(buf, sizeof(*buf));
-		if (buf == NULL) {
+		if (!buf) {
 			CERROR("Failed to add new buf to service: %s\n",
 			       scd->scd_svc->sv_name);
 			spin_lock(&scd->scd_lock);
@@ -546,7 +555,7 @@
 		}
 
 		rc = srpc_service_post_buffer(scd, buf);
-		if (rc != 0)
+		if (rc)
 			break; /* buf has been freed inside */
 
 		LASSERT(scd->scd_buf_posting > 0);
@@ -555,7 +564,7 @@
 		scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
 	}
 
-	if (rc != 0) {
+	if (rc) {
 		scd->scd_buf_err_stamp = ktime_get_real_seconds();
 		scd->scd_buf_err = rc;
 
@@ -607,12 +616,12 @@
 		 * block all WIs pending on lst_sched_serial for a moment
 		 * which is not good but not fatal.
 		 */
-		lst_wait_until(scd->scd_buf_err != 0 ||
-			       (scd->scd_buf_adjust == 0 &&
-				scd->scd_buf_posting == 0),
+		lst_wait_until(scd->scd_buf_err ||
+			       (!scd->scd_buf_adjust &&
+				!scd->scd_buf_posting),
 			       scd->scd_lock, "waiting for adding buffer\n");
 
-		if (scd->scd_buf_err != 0 && rc == 0)
+		if (scd->scd_buf_err && !rc)
 			rc = scd->scd_buf_err;
 
 		spin_unlock(&scd->scd_lock);
@@ -670,7 +679,7 @@
 		}
 
 		rpc = list_entry(scd->scd_rpc_active.next,
-				     struct srpc_server_rpc, srpc_list);
+				 struct srpc_server_rpc, srpc_list);
 		CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
 			rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
 			swi_state2str(rpc->srpc_wi.swi_state),
@@ -693,7 +702,7 @@
 	__must_hold(&scd->scd_lock)
 {
 	if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
-		if (srpc_service_post_buffer(scd, buf) != 0) {
+		if (srpc_service_post_buffer(scd, buf)) {
 			CWARN("Failed to post %s buffer\n",
 			      scd->scd_svc->sv_name);
 		}
@@ -706,7 +715,7 @@
 	if (scd->scd_buf_adjust < 0) {
 		scd->scd_buf_adjust++;
 		if (scd->scd_buf_adjust < 0 &&
-		    scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
+		    !scd->scd_buf_total && !scd->scd_buf_posting) {
 			CDEBUG(D_INFO,
 			       "Try to recycle %d buffers but nothing left\n",
 			       scd->scd_buf_adjust);
@@ -732,9 +741,11 @@
 	cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
 		spin_lock(&scd->scd_lock);
 
-		/* schedule in-flight RPCs to notice the abort, NB:
+		/*
+		 * schedule in-flight RPCs to notice the abort, NB:
 		 * racing with incoming RPCs; complete fix should make test
-		 * RPCs carry session ID in its headers */
+		 * RPCs carry session ID in its headers
+		 */
 		list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
 			rpc->srpc_aborted = 1;
 			swi_schedule_workitem(&rpc->srpc_wi);
@@ -772,8 +783,10 @@
 
 		spin_unlock(&scd->scd_lock);
 
-		/* OK to traverse scd_buf_posted without lock, since no one
-		 * touches scd_buf_posted now */
+		/*
+		 * OK to traverse scd_buf_posted without lock, since no one
+		 * touches scd_buf_posted now
+		 */
 		list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
 			LNetMDUnlink(buf->buf_mdh);
 	}
@@ -794,7 +807,7 @@
 				    sizeof(srpc_msg_t), LNET_MD_OP_PUT,
 				    rpc->crpc_dest, LNET_NID_ANY,
 				    &rpc->crpc_reqstmdh, ev);
-	if (rc != 0) {
+	if (rc) {
 		LASSERT(rc == -ENOMEM);
 		ev->ev_fired = 1;  /* no more event expected */
 	}
@@ -818,7 +831,7 @@
 				    &rpc->crpc_replymsg, sizeof(srpc_msg_t),
 				    LNET_MD_OP_PUT, rpc->crpc_dest,
 				    &rpc->crpc_replymdh, ev);
-	if (rc != 0) {
+	if (rc) {
 		LASSERT(rc == -ENOMEM);
 		ev->ev_fired = 1;  /* no more event expected */
 	}
@@ -836,7 +849,7 @@
 
 	LASSERT(bk->bk_niov <= LNET_MAX_IOV);
 
-	if (bk->bk_niov == 0)
+	if (!bk->bk_niov)
 		return 0; /* nothing to do */
 
 	opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
@@ -851,7 +864,7 @@
 	rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
 				    &bk->bk_iovs[0], bk->bk_niov, opt,
 				    rpc->crpc_dest, &bk->bk_mdh, ev);
-	if (rc != 0) {
+	if (rc) {
 		LASSERT(rc == -ENOMEM);
 		ev->ev_fired = 1;  /* no more event expected */
 	}
@@ -867,7 +880,7 @@
 	int rc;
 	int opt;
 
-	LASSERT(bk != NULL);
+	LASSERT(bk);
 
 	opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
 	opt |= LNET_MD_KIOV;
@@ -880,7 +893,7 @@
 				   &bk->bk_iovs[0], bk->bk_niov, opt,
 				   rpc->srpc_peer, rpc->srpc_self,
 				   &bk->bk_mdh, ev);
-	if (rc != 0)
+	if (rc)
 		ev->ev_fired = 1;  /* no more event expected */
 	return rc;
 }
@@ -893,30 +906,32 @@
 	struct srpc_service *sv  = scd->scd_svc;
 	srpc_buffer_t *buffer;
 
-	LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
+	LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
 
 	rpc->srpc_status = status;
 
-	CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
-		"Server RPC %p done: service %s, peer %s, status %s:%d\n",
-		rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
-		swi_state2str(rpc->srpc_wi.swi_state), status);
+	CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
+		     "Server RPC %p done: service %s, peer %s, status %s:%d\n",
+		     rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+		     swi_state2str(rpc->srpc_wi.swi_state), status);
 
-	if (status != 0) {
+	if (status) {
 		spin_lock(&srpc_data.rpc_glock);
 		srpc_data.rpc_counters.rpcs_dropped++;
 		spin_unlock(&srpc_data.rpc_glock);
 	}
 
-	if (rpc->srpc_done != NULL)
+	if (rpc->srpc_done)
 		(*rpc->srpc_done) (rpc);
-	LASSERT(rpc->srpc_bulk == NULL);
+	LASSERT(!rpc->srpc_bulk);
 
 	spin_lock(&scd->scd_lock);
 
-	if (rpc->srpc_reqstbuf != NULL) {
-		/* NB might drop sv_lock in srpc_service_recycle_buffer, but
-		 * sv won't go away for scd_rpc_active must not be empty */
+	if (rpc->srpc_reqstbuf) {
+		/*
+		 * NB might drop sv_lock in srpc_service_recycle_buffer, but
+		 * sv won't go away for scd_rpc_active must not be empty
+		 */
 		srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
 		rpc->srpc_reqstbuf = NULL;
 	}
@@ -934,7 +949,7 @@
 
 	if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
 		buffer = list_entry(scd->scd_buf_blocked.next,
-					srpc_buffer_t, buf_list);
+				    srpc_buffer_t, buf_list);
 		list_del(&buffer->buf_list);
 
 		srpc_init_server_rpc(rpc, scd, buffer);
@@ -965,7 +980,7 @@
 	if (sv->sv_shuttingdown || rpc->srpc_aborted) {
 		spin_unlock(&scd->scd_lock);
 
-		if (rpc->srpc_bulk != NULL)
+		if (rpc->srpc_bulk)
 			LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
 		LNetMDUnlink(rpc->srpc_replymdh);
 
@@ -988,7 +1003,7 @@
 		msg = &rpc->srpc_reqstbuf->buf_msg;
 		reply = &rpc->srpc_replymsg.msg_body.reply;
 
-		if (msg->msg_magic == 0) {
+		if (!msg->msg_magic) {
 			/* moaned already in srpc_lnet_ev_handler */
 			srpc_server_rpc_done(rpc, EBADMSG);
 			return 1;
@@ -1004,8 +1019,8 @@
 		} else {
 			reply->status = 0;
 			rc = (*sv->sv_handler)(rpc);
-			LASSERT(reply->status == 0 || !rpc->srpc_bulk);
-			if (rc != 0) {
+			LASSERT(!reply->status || !rpc->srpc_bulk);
+			if (rc) {
 				srpc_server_rpc_done(rpc, rc);
 				return 1;
 			}
@@ -1013,9 +1028,9 @@
 
 		wi->swi_state = SWI_STATE_BULK_STARTED;
 
-		if (rpc->srpc_bulk != NULL) {
+		if (rpc->srpc_bulk) {
 			rc = srpc_do_bulk(rpc);
-			if (rc == 0)
+			if (!rc)
 				return 0; /* wait for bulk */
 
 			LASSERT(ev->ev_fired);
@@ -1023,15 +1038,15 @@
 		}
 	}
 	case SWI_STATE_BULK_STARTED:
-		LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired);
+		LASSERT(!rpc->srpc_bulk || ev->ev_fired);
 
-		if (rpc->srpc_bulk != NULL) {
+		if (rpc->srpc_bulk) {
 			rc = ev->ev_status;
 
-			if (sv->sv_bulk_ready != NULL)
+			if (sv->sv_bulk_ready)
 				rc = (*sv->sv_bulk_ready) (rpc, rc);
 
-			if (rc != 0) {
+			if (rc) {
 				srpc_server_rpc_done(rpc, rc);
 				return 1;
 			}
@@ -1039,7 +1054,7 @@
 
 		wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
 		rc = srpc_send_reply(rpc);
-		if (rc == 0)
+		if (!rc)
 			return 0; /* wait for reply */
 		srpc_server_rpc_done(rpc, rc);
 		return 1;
@@ -1067,8 +1082,8 @@
 	srpc_client_rpc_t *rpc = data;
 
 	CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
-	       rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
-	       rpc->crpc_timeout);
+	      rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+	      rpc->crpc_timeout);
 
 	spin_lock(&rpc->crpc_lock);
 
@@ -1082,12 +1097,12 @@
 	spin_unlock(&srpc_data.rpc_glock);
 }
 
-inline void
+static void
 srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
 {
 	stt_timer_t *timer = &rpc->crpc_timer;
 
-	if (rpc->crpc_timeout == 0)
+	if (!rpc->crpc_timeout)
 		return;
 
 	INIT_LIST_HEAD(&timer->stt_list);
@@ -1102,12 +1117,13 @@
  * Called with rpc->crpc_lock held.
  *
  * Upon exit the RPC expiry timer is not queued and the handler is not
- * running on any CPU. */
+ * running on any CPU.
+ */
 static void
 srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
 {
 	/* timer not planted or already exploded */
-	if (rpc->crpc_timeout == 0)
+	if (!rpc->crpc_timeout)
 		return;
 
 	/* timer successfully defused */
@@ -1115,7 +1131,7 @@
 		return;
 
 	/* timer detonated, wait for it to explode */
-	while (rpc->crpc_timeout != 0) {
+	while (rpc->crpc_timeout) {
 		spin_unlock(&rpc->crpc_lock);
 
 		schedule();
@@ -1129,20 +1145,20 @@
 {
 	swi_workitem_t *wi = &rpc->crpc_wi;
 
-	LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
+	LASSERT(status || wi->swi_state == SWI_STATE_DONE);
 
 	spin_lock(&rpc->crpc_lock);
 
 	rpc->crpc_closed = 1;
-	if (rpc->crpc_status == 0)
+	if (!rpc->crpc_status)
 		rpc->crpc_status = status;
 
 	srpc_del_client_rpc_timer(rpc);
 
-	CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
-		"Client RPC done: service %d, peer %s, status %s:%d:%d\n",
-		rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
-		swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
+	CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
+		     "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
+		     rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+		     swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
 
 	/*
 	 * No one can schedule me now since:
@@ -1170,11 +1186,11 @@
 	srpc_msg_t *reply;
 	int do_bulk;
 
-	LASSERT(wi != NULL);
+	LASSERT(wi);
 
 	rpc = wi->swi_workitem.wi_data;
 
-	LASSERT(rpc != NULL);
+	LASSERT(rpc);
 	LASSERT(wi == &rpc->crpc_wi);
 
 	reply = &rpc->crpc_replymsg;
@@ -1196,13 +1212,13 @@
 		LASSERT(!srpc_event_pending(rpc));
 
 		rc = srpc_prepare_reply(rpc);
-		if (rc != 0) {
+		if (rc) {
 			srpc_client_rpc_done(rpc, rc);
 			return 1;
 		}
 
 		rc = srpc_prepare_bulk(rpc);
-		if (rc != 0)
+		if (rc)
 			break;
 
 		wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
@@ -1210,14 +1226,16 @@
 		break;
 
 	case SWI_STATE_REQUEST_SUBMITTED:
-		/* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
+		/*
+		 * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
 		 * order; however, they're processed in a strict order:
-		 * rqt, rpy, and bulk. */
+		 * rqt, rpy, and bulk.
+		 */
 		if (!rpc->crpc_reqstev.ev_fired)
 			break;
 
 		rc = rpc->crpc_reqstev.ev_status;
-		if (rc != 0)
+		if (rc)
 			break;
 
 		wi->swi_state = SWI_STATE_REQUEST_SENT;
@@ -1229,7 +1247,7 @@
 			break;
 
 		rc = rpc->crpc_replyev.ev_status;
-		if (rc != 0)
+		if (rc)
 			break;
 
 		srpc_unpack_msg_hdr(reply);
@@ -1244,7 +1262,7 @@
 			break;
 		}
 
-		if (do_bulk && reply->msg_body.reply.status != 0) {
+		if (do_bulk && reply->msg_body.reply.status) {
 			CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
 			      reply->msg_body.reply.status,
 			      libcfs_id2str(rpc->crpc_dest));
@@ -1259,12 +1277,14 @@
 
 		rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
 
-		/* Bulk buffer was unlinked due to remote error. Clear error
+		/*
+		 * Bulk buffer was unlinked due to remote error. Clear error
 		 * since reply buffer still contains valid data.
 		 * NB rpc->crpc_done shouldn't look into bulk data in case of
-		 * remote error. */
+		 * remote error.
+		 */
 		if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
-		    rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
+		    !rpc->crpc_status && reply->msg_body.reply.status)
 			rc = 0;
 
 		wi->swi_state = SWI_STATE_DONE;
@@ -1272,7 +1292,7 @@
 		return 1;
 	}
 
-	if (rc != 0) {
+	if (rc) {
 		spin_lock(&rpc->crpc_lock);
 		srpc_abort_rpc(rpc, rc);
 		spin_unlock(&rpc->crpc_lock);
@@ -1294,15 +1314,15 @@
 
 srpc_client_rpc_t *
 srpc_create_client_rpc(lnet_process_id_t peer, int service,
-			int nbulkiov, int bulklen,
-			void (*rpc_done)(srpc_client_rpc_t *),
-			void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+		       int nbulkiov, int bulklen,
+		       void (*rpc_done)(srpc_client_rpc_t *),
+		       void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
 {
 	srpc_client_rpc_t *rpc;
 
 	LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
 				   crpc_bulk.bk_iovs[nbulkiov]));
-	if (rpc == NULL)
+	if (!rpc)
 		return NULL;
 
 	srpc_init_client_rpc(rpc, peer, service, nbulkiov,
@@ -1314,16 +1334,15 @@
 void
 srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
 {
-	LASSERT(why != 0);
+	LASSERT(why);
 
 	if (rpc->crpc_aborted || /* already aborted */
 	    rpc->crpc_closed)    /* callback imminent */
 		return;
 
-	CDEBUG(D_NET,
-		"Aborting RPC: service %d, peer %s, state %s, why %d\n",
-		rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
-		swi_state2str(rpc->crpc_wi.swi_state), why);
+	CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
+	       rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+	       swi_state2str(rpc->crpc_wi.swi_state), why);
 
 	rpc->crpc_aborted = 1;
 	rpc->crpc_status  = why;
@@ -1339,8 +1358,8 @@
 	LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
 
 	CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
-		libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
-		rpc->crpc_timeout);
+	       libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
+	       rpc->crpc_timeout);
 
 	srpc_add_client_rpc_timer(rpc);
 	swi_schedule_workitem(&rpc->crpc_wi);
@@ -1358,15 +1377,17 @@
 	__u64 rpyid;
 	int rc;
 
-	LASSERT(buffer != NULL);
+	LASSERT(buffer);
 	rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
 
 	spin_lock(&scd->scd_lock);
 
 	if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
-		/* Repost buffer before replying since test client
-		 * might send me another RPC once it gets the reply */
-		if (srpc_service_post_buffer(scd, buffer) != 0)
+		/*
+		 * Repost buffer before replying since test client
+		 * might send me another RPC once it gets the reply
+		 */
+		if (srpc_service_post_buffer(scd, buffer))
 			CWARN("Failed to repost %s buffer\n", sv->sv_name);
 		rpc->srpc_reqstbuf = NULL;
 	}
@@ -1385,7 +1406,7 @@
 				   sizeof(*msg), LNET_MD_OP_PUT,
 				   rpc->srpc_peer, rpc->srpc_self,
 				   &rpc->srpc_replymdh, ev);
-	if (rc != 0)
+	if (rc)
 		ev->ev_fired = 1;  /* no more event expected */
 	return rc;
 }
@@ -1405,7 +1426,7 @@
 
 	LASSERT(!in_interrupt());
 
-	if (ev->status != 0) {
+	if (ev->status) {
 		spin_lock(&srpc_data.rpc_glock);
 		srpc_data.rpc_counters.errors++;
 		spin_unlock(&srpc_data.rpc_glock);
@@ -1419,7 +1440,7 @@
 		       rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
 		LBUG();
 	case SRPC_REQUEST_SENT:
-		if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+		if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
 			spin_lock(&srpc_data.rpc_glock);
 			srpc_data.rpc_counters.rpcs_sent++;
 			spin_unlock(&srpc_data.rpc_glock);
@@ -1441,7 +1462,7 @@
 
 		spin_lock(&crpc->crpc_lock);
 
-		LASSERT(rpcev->ev_fired == 0);
+		LASSERT(!rpcev->ev_fired);
 		rpcev->ev_fired  = 1;
 		rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
 						-EINTR : ev->status;
@@ -1460,9 +1481,9 @@
 
 		LASSERT(ev->unlinked);
 		LASSERT(ev->type == LNET_EVENT_PUT ||
-			 ev->type == LNET_EVENT_UNLINK);
+			ev->type == LNET_EVENT_UNLINK);
 		LASSERT(ev->type != LNET_EVENT_UNLINK ||
-			 sv->sv_shuttingdown);
+			sv->sv_shuttingdown);
 
 		buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
 		buffer->buf_peer = ev->initiator;
@@ -1472,21 +1493,23 @@
 		scd->scd_buf_nposted--;
 
 		if (sv->sv_shuttingdown) {
-			/* Leave buffer on scd->scd_buf_nposted since
-			 * srpc_finish_service needs to traverse it. */
+			/*
+			 * Leave buffer on scd->scd_buf_nposted since
+			 * srpc_finish_service needs to traverse it.
+			 */
 			spin_unlock(&scd->scd_lock);
 			break;
 		}
 
-		if (scd->scd_buf_err_stamp != 0 &&
+		if (scd->scd_buf_err_stamp &&
 		    scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
 			/* re-enable adding buffer */
 			scd->scd_buf_err_stamp = 0;
 			scd->scd_buf_err = 0;
 		}
 
-		if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
-		    scd->scd_buf_adjust == 0 &&
+		if (!scd->scd_buf_err && /* adding buffer is enabled */
+		    !scd->scd_buf_adjust &&
 		    scd->scd_buf_nposted < scd->scd_buf_low) {
 			scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
 						  SFW_TEST_WI_MIN);
@@ -1497,7 +1520,7 @@
 		msg = &buffer->buf_msg;
 		type = srpc_service2request(sv->sv_id);
 
-		if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
+		if (ev->status || ev->mlength != sizeof(*msg) ||
 		    (msg->msg_type != type &&
 		     msg->msg_type != __swab32(type)) ||
 		    (msg->msg_magic != SRPC_MSG_MAGIC &&
@@ -1507,25 +1530,27 @@
 			       ev->status, ev->mlength,
 			       msg->msg_type, msg->msg_magic);
 
-			/* NB can't call srpc_service_recycle_buffer here since
+			/*
+			 * NB can't call srpc_service_recycle_buffer here since
 			 * it may call LNetM[DE]Attach. The invalid magic tells
-			 * srpc_handle_rpc to drop this RPC */
+			 * srpc_handle_rpc to drop this RPC
+			 */
 			msg->msg_magic = 0;
 		}
 
 		if (!list_empty(&scd->scd_rpc_free)) {
 			srpc = list_entry(scd->scd_rpc_free.next,
-					      struct srpc_server_rpc,
-					      srpc_list);
+					  struct srpc_server_rpc,
+					  srpc_list);
 			list_del(&srpc->srpc_list);
 
 			srpc_init_server_rpc(srpc, scd, buffer);
 			list_add_tail(&srpc->srpc_list,
-					  &scd->scd_rpc_active);
+				      &scd->scd_rpc_active);
 			swi_schedule_workitem(&srpc->srpc_wi);
 		} else {
 			list_add_tail(&buffer->buf_list,
-					  &scd->scd_buf_blocked);
+				      &scd->scd_buf_blocked);
 		}
 
 		spin_unlock(&scd->scd_lock);
@@ -1537,14 +1562,14 @@
 
 	case SRPC_BULK_GET_RPLD:
 		LASSERT(ev->type == LNET_EVENT_SEND ||
-			 ev->type == LNET_EVENT_REPLY ||
-			 ev->type == LNET_EVENT_UNLINK);
+			ev->type == LNET_EVENT_REPLY ||
+			ev->type == LNET_EVENT_UNLINK);
 
 		if (!ev->unlinked)
 			break; /* wait for final event */
 
 	case SRPC_BULK_PUT_SENT:
-		if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+		if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
 			spin_lock(&srpc_data.rpc_glock);
 
 			if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
@@ -1587,7 +1612,7 @@
 
 	srpc_data.rpc_state = SRPC_STATE_NONE;
 
-	rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+	rc = LNetNIInit(LNET_PID_LUSTRE);
 	if (rc < 0) {
 		CERROR("LNetNIInit() has failed: %d\n", rc);
 		return rc;
@@ -1597,22 +1622,22 @@
 
 	LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
 	rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("LNetEQAlloc() has failed: %d\n", rc);
 		goto bail;
 	}
 
 	rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 	rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
-	LASSERT(rc == 0);
+	LASSERT(!rc);
 
 	srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
 
 	rc = stt_startup();
 
 bail:
-	if (rc != 0)
+	if (rc)
 		srpc_shutdown();
 	else
 		srpc_data.rpc_state = SRPC_STATE_RUNNING;
@@ -1639,9 +1664,8 @@
 		for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
 			srpc_service_t *sv = srpc_data.rpc_services[i];
 
-			LASSERTF(sv == NULL,
-				  "service not empty: id %d, name %s\n",
-				  i, sv->sv_name);
+			LASSERTF(!sv, "service not empty: id %d, name %s\n",
+				 i, sv->sv_name);
 		}
 
 		spin_unlock(&srpc_data.rpc_glock);
@@ -1651,9 +1675,9 @@
 	case SRPC_STATE_EQ_INIT:
 		rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
 		rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
-		LASSERT(rc == 0);
+		LASSERT(!rc);
 		rc = LNetEQFree(srpc_data.rpc_lnet_eq);
-		LASSERT(rc == 0); /* the EQ should have no user by now */
+		LASSERT(!rc); /* the EQ should have no user by now */
 
 	case SRPC_STATE_NI_INIT:
 		LNetNIFini();
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 6b4a32a..9dfb366 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -281,8 +281,10 @@
 	if (msg->msg_magic == SRPC_MSG_MAGIC)
 		return; /* no flipping needed */
 
-	/* We do not swap the magic number here as it is needed to
-	   determine whether the body needs to be swapped. */
+	/*
+	 * We do not swap the magic number here as it is needed to
+	 * determine whether the body needs to be swapped.
+	 */
 	/* __swab32s(&msg->msg_magic); */
 	__swab32s(&msg->msg_type);
 	__swab32s(&msg->msg_version);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 8704983..f6c8244 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -94,11 +94,11 @@
 #define SRPC_RDMA_PORTAL              52
 
 static inline srpc_msg_type_t
-srpc_service2request (int service)
+srpc_service2request(int service)
 {
 	switch (service) {
 	default:
-		LBUG ();
+		LBUG();
 	case SRPC_SERVICE_DEBUG:
 		return SRPC_MSG_DEBUG_REQST;
 
@@ -129,7 +129,7 @@
 }
 
 static inline srpc_msg_type_t
-srpc_service2reply (int service)
+srpc_service2reply(int service)
 {
 	return srpc_service2request(service) + 1;
 }
@@ -255,9 +255,9 @@
 		srpc_destroy_client_rpc(rpc);                            \
 } while (0)
 
-#define srpc_event_pending(rpc)   ((rpc)->crpc_bulkev.ev_fired == 0 ||   \
-				   (rpc)->crpc_reqstev.ev_fired == 0 ||  \
-				   (rpc)->crpc_replyev.ev_fired == 0)
+#define srpc_event_pending(rpc)   (!(rpc)->crpc_bulkev.ev_fired ||	\
+				   !(rpc)->crpc_reqstev.ev_fired ||	\
+				   !(rpc)->crpc_replyev.ev_fired)
 
 /* CPU partition data of srpc service */
 struct srpc_service_cd {
@@ -427,7 +427,7 @@
 void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
 int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
 		    int sink);
-int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
 
 srpc_client_rpc_t *
 srpc_create_client_rpc(lnet_process_id_t peer, int service,
@@ -502,26 +502,25 @@
 void srpc_shutdown(void);
 
 static inline void
-srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
 {
-	LASSERT(rpc != NULL);
+	LASSERT(rpc);
 	LASSERT(!srpc_event_pending(rpc));
-	LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
+	LASSERT(!atomic_read(&rpc->crpc_refcount));
 
-	if (rpc->crpc_fini == NULL) {
+	if (!rpc->crpc_fini)
 		LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
-	} else {
+	else
 		(*rpc->crpc_fini) (rpc);
-	}
 
 	return;
 }
 
 static inline void
-srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
-		      int service, int nbulkiov, int bulklen,
-		      void (*rpc_done)(srpc_client_rpc_t *),
-		      void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+		     int service, int nbulkiov, int bulklen,
+		     void (*rpc_done)(srpc_client_rpc_t *),
+		     void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
 {
 	LASSERT(nbulkiov <= LNET_MAX_IOV);
 
@@ -546,8 +545,8 @@
 	LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
 
 	/* no event is expected at this point */
-	rpc->crpc_bulkev.ev_fired  =
-	rpc->crpc_reqstev.ev_fired =
+	rpc->crpc_bulkev.ev_fired = 1;
+	rpc->crpc_reqstev.ev_fired = 1;
 	rpc->crpc_replyev.ev_fired = 1;
 
 	rpc->crpc_reqstmsg.msg_magic   = SRPC_MSG_MAGIC;
@@ -557,7 +556,7 @@
 }
 
 static inline const char *
-swi_state2str (int state)
+swi_state2str(int state)
 {
 #define STATE2STR(x) case x: return #x
 	switch (state) {
@@ -602,11 +601,11 @@
 
 	LASSERT(sv->sv_shuttingdown);
 
-	while (srpc_finish_service(sv) == 0) {
+	while (!srpc_finish_service(sv)) {
 		i++;
-		CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
-			"Waiting for %s service to shutdown...\n",
-			sv->sv_name);
+		CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
+		       "Waiting for %s service to shutdown...\n",
+		       sv->sv_name);
 		selftest_wait_events();
 	}
 }
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index b98c08a..c891371 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -75,7 +75,7 @@
 
 	LASSERT(stt_data.stt_nthreads > 0);
 	LASSERT(!stt_data.stt_shuttingdown);
-	LASSERT(timer->stt_func != NULL);
+	LASSERT(timer->stt_func);
 	LASSERT(list_empty(&timer->stt_list));
 	LASSERT(timer->stt_expires > ktime_get_real_seconds());
 
@@ -218,7 +218,7 @@
 	stt_data.stt_nthreads = 0;
 	init_waitqueue_head(&stt_data.stt_waitq);
 	rc = stt_start_timer_thread();
-	if (rc != 0)
+	if (rc)
 		CERROR("Can't spawn timer thread: %d\n", rc);
 
 	return rc;
@@ -237,7 +237,7 @@
 	stt_data.stt_shuttingdown = 1;
 
 	wake_up(&stt_data.stt_waitq);
-	lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+	lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock,
 		       "waiting for %d threads to terminate\n",
 		       stt_data.stt_nthreads);
 
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index ff8f38d..70400aa 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -68,7 +68,7 @@
 
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
 					LUSTRE_MDS_VERSION, SEQ_QUERY);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	/* Init operation code */
@@ -95,7 +95,8 @@
 		 * precreating objects on this OST), and it will send the
 		 * request to MDT0 here, so we can not keep resending the
 		 * request here, otherwise if MDT0 is failed(umounted),
-		 * it can not release the export of MDT0 */
+		 * it can not release the export of MDT0
+		 */
 		if (seq->lcs_type == LUSTRE_SEQ_DATA)
 			req->rq_no_delay = req->rq_no_resend = 1;
 		debug_mask = D_CONSOLE;
@@ -152,7 +153,8 @@
 		/* If meta server return -EINPROGRESS or EAGAIN,
 		 * it means meta server might not be ready to
 		 * allocate super sequence from sequence controller
-		 * (MDT0)yet */
+		 * (MDT0)yet
+		 */
 		rc = seq_client_rpc(seq, &seq->lcs_space,
 				    SEQ_ALLOC_META, "meta");
 	} while (rc == -EINPROGRESS || rc == -EAGAIN);
@@ -226,8 +228,8 @@
 	wait_queue_t link;
 	int rc;
 
-	LASSERT(seq != NULL);
-	LASSERT(fid != NULL);
+	LASSERT(seq);
+	LASSERT(fid);
 
 	init_waitqueue_entry(&link, current);
 	mutex_lock(&seq->lcs_mutex);
@@ -292,7 +294,7 @@
 {
 	wait_queue_t link;
 
-	LASSERT(seq != NULL);
+	LASSERT(seq);
 	init_waitqueue_entry(&link, current);
 	mutex_lock(&seq->lcs_mutex);
 
@@ -375,8 +377,8 @@
 {
 	int rc;
 
-	LASSERT(seq != NULL);
-	LASSERT(prefix != NULL);
+	LASSERT(seq);
+	LASSERT(prefix);
 
 	seq->lcs_type = type;
 
@@ -438,7 +440,7 @@
 {
 	struct client_obd *cli = &obd->u.cli;
 
-	if (cli->cl_seq != NULL) {
+	if (cli->cl_seq) {
 		seq_client_fini(cli->cl_seq);
 		kfree(cli->cl_seq);
 		cli->cl_seq = NULL;
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 39f2aa3..1f0e786 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -66,7 +66,7 @@
 	int rc;
 	char kernbuf[MAX_FID_RANGE_STRLEN];
 
-	LASSERT(range != NULL);
+	LASSERT(range);
 
 	if (count >= sizeof(kernbuf))
 		return -EINVAL;
@@ -85,6 +85,8 @@
 	rc = sscanf(kernbuf, "[%llx - %llx]\n",
 		    (unsigned long long *)&tmp.lsr_start,
 		    (unsigned long long *)&tmp.lsr_end);
+	if (rc != 2)
+		return -EINVAL;
 	if (!range_is_sane(&tmp) || range_is_zero(&tmp) ||
 	    tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
 		return -EINVAL;
@@ -102,7 +104,6 @@
 	int rc;
 
 	seq = ((struct seq_file *)file->private_data)->private;
-	LASSERT(seq != NULL);
 
 	mutex_lock(&seq->lcs_mutex);
 	rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space);
@@ -122,8 +123,6 @@
 {
 	struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
 
-	LASSERT(seq != NULL);
-
 	mutex_lock(&seq->lcs_mutex);
 	seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space));
 	mutex_unlock(&seq->lcs_mutex);
@@ -141,7 +140,6 @@
 	int rc, val;
 
 	seq = ((struct seq_file *)file->private_data)->private;
-	LASSERT(seq != NULL);
 
 	rc = lprocfs_write_helper(buffer, count, &val);
 	if (rc)
@@ -170,8 +168,6 @@
 {
 	struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
 
-	LASSERT(seq != NULL);
-
 	mutex_lock(&seq->lcs_mutex);
 	seq_printf(m, "%llu\n", seq->lcs_width);
 	mutex_unlock(&seq->lcs_mutex);
@@ -184,8 +180,6 @@
 {
 	struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
 
-	LASSERT(seq != NULL);
-
 	mutex_lock(&seq->lcs_mutex);
 	seq_printf(m, DFID "\n", PFID(&seq->lcs_fid));
 	mutex_unlock(&seq->lcs_mutex);
@@ -199,9 +193,7 @@
 	struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
 	struct client_obd *cli;
 
-	LASSERT(seq != NULL);
-
-	if (seq->lcs_exp != NULL) {
+	if (seq->lcs_exp) {
 		cli = &seq->lcs_exp->exp_obd->u.cli;
 		seq_printf(m, "%s\n", cli->cl_target_uuid.uuid);
 	}
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index d9459e5..2b09b76 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -65,7 +65,7 @@
 {
 	struct fld_cache *cache;
 
-	LASSERT(name != NULL);
+	LASSERT(name);
 	LASSERT(cache_threshold < cache_size);
 
 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
@@ -100,7 +100,7 @@
 {
 	__u64 pct;
 
-	LASSERT(cache != NULL);
+	LASSERT(cache);
 	fld_cache_flush(cache);
 
 	if (cache->fci_stat.fst_count > 0) {
@@ -183,7 +183,8 @@
 			}
 
 			/* we could have overlap over next
-			 * range too. better restart. */
+			 * range too. better restart.
+			 */
 			goto restart_fixup;
 		}
 
@@ -218,8 +219,6 @@
 	struct list_head *curr;
 	int num = 0;
 
-	LASSERT(cache != NULL);
-
 	if (cache->fci_cache_count < cache->fci_cache_size)
 		return 0;
 
@@ -304,7 +303,8 @@
 	const u32 mdt = range->lsr_index;
 
 	/* this is overlap case, these case are checking overlapping with
-	 * prev range only. fixup will handle overlapping with next range. */
+	 * prev range only. fixup will handle overlapping with next range.
+	 */
 
 	if (f_curr->fce_range.lsr_index == mdt) {
 		f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
@@ -319,7 +319,8 @@
 	} else if (new_start <= f_curr->fce_range.lsr_start &&
 			f_curr->fce_range.lsr_end <= new_end) {
 		/* case 1: new range completely overshadowed existing range.
-		 *	 e.g. whole range migrated. update fld cache entry */
+		 *	 e.g. whole range migrated. update fld cache entry
+		 */
 
 		f_curr->fce_range = *range;
 		kfree(f_new);
@@ -414,7 +415,7 @@
 		}
 	}
 
-	if (prev == NULL)
+	if (!prev)
 		prev = head;
 
 	CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
@@ -499,7 +500,7 @@
 	cache->fci_stat.fst_count++;
 	list_for_each_entry(flde, head, fce_list) {
 		if (flde->fce_range.lsr_start > seq) {
-			if (prev != NULL)
+			if (prev)
 				*range = prev->fce_range;
 			break;
 		}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 12eb164..e8a3caf 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -58,22 +58,16 @@
 	__u64   fst_inflight;
 };
 
-typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64);
-
-typedef struct lu_fld_target *
-(*fld_scan_func_t) (struct lu_client_fld *, __u64);
-
 struct lu_fld_hash {
 	const char	      *fh_name;
-	fld_hash_func_t	  fh_hash_func;
-	fld_scan_func_t	  fh_scan_func;
+	int (*fh_hash_func)(struct lu_client_fld *, __u64);
+	struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64);
 };
 
 struct fld_cache_entry {
 	struct list_head	       fce_lru;
 	struct list_head	       fce_list;
-	/**
-	 * fld cache entries are sorted on range->lsr_start field. */
+	/** fld cache entries are sorted on range->lsr_start field. */
 	struct lu_seq_range      fce_range;
 };
 
@@ -84,32 +78,25 @@
 	 */
 	rwlock_t		 fci_lock;
 
-	/**
-	 * Cache shrink threshold */
+	/** Cache shrink threshold */
 	int		      fci_threshold;
 
-	/**
-	 * Preferred number of cached entries */
+	/** Preferred number of cached entries */
 	int		      fci_cache_size;
 
-	/**
-	 * Current number of cached entries. Protected by \a fci_lock */
+	/** Current number of cached entries. Protected by \a fci_lock */
 	int		      fci_cache_count;
 
-	/**
-	 * LRU list fld entries. */
+	/** LRU list fld entries. */
 	struct list_head	       fci_lru;
 
-	/**
-	 * sorted fld entries. */
+	/** sorted fld entries. */
 	struct list_head	       fci_entries_head;
 
-	/**
-	 * Cache statistics. */
+	/** Cache statistics. */
 	struct fld_stats	 fci_stat;
 
-	/**
-	 * Cache name used for debug and messages. */
+	/** Cache name used for debug and messages. */
 	char		     fci_name[LUSTRE_MDT_MAXNAMELEN];
 	unsigned int		 fci_no_shrink:1;
 };
@@ -169,7 +156,7 @@
 static inline const char *
 fld_target_name(struct lu_fld_target *tar)
 {
-	if (tar->ft_srv != NULL)
+	if (tar->ft_srv)
 		return tar->ft_srv->lsf_name;
 
 	return (const char *)tar->ft_exp->exp_obd->obd_name;
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index d92c01b..b50a57b 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -58,7 +58,8 @@
 #include "fld_internal.h"
 
 /* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
- * It should be common thing. The same about mdc RPC lock */
+ * It should be common thing. The same about mdc RPC lock
+ */
 static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
 {
 	int rc;
@@ -124,7 +125,8 @@
 	 * it should go to index 0 directly, instead of calculating
 	 * hash again, and also if other MDTs is not being connected,
 	 * the fld lookup requests(for seq on MDT0) should not be
-	 * blocked because of other MDTs */
+	 * blocked because of other MDTs
+	 */
 	if (fid_seq_is_norm(seq))
 		hash = fld_rrb_hash(fld, seq);
 	else
@@ -139,7 +141,8 @@
 	if (hash != 0) {
 		/* It is possible the remote target(MDT) are not connected to
 		 * with client yet, so we will refer this to MDT0, which should
-		 * be connected during mount */
+		 * be connected during mount
+		 */
 		hash = 0;
 		goto again;
 	}
@@ -148,9 +151,9 @@
 		fld->lcf_name, hash, seq, fld->lcf_count);
 
 	list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
-		const char *srv_name = target->ft_srv != NULL  ?
+		const char *srv_name = target->ft_srv ?
 			target->ft_srv->lsf_name : "<null>";
-		const char *exp_name = target->ft_exp != NULL ?
+		const char *exp_name = target->ft_exp ?
 			(char *)target->ft_exp->exp_obd->obd_uuid.uuid :
 			"<null>";
 
@@ -183,13 +186,13 @@
 {
 	struct lu_fld_target *target;
 
-	LASSERT(fld->lcf_hash != NULL);
+	LASSERT(fld->lcf_hash);
 
 	spin_lock(&fld->lcf_lock);
 	target = fld->lcf_hash->fh_scan_func(fld, seq);
 	spin_unlock(&fld->lcf_lock);
 
-	if (target != NULL) {
+	if (target) {
 		CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n",
 		       fld->lcf_name, target->ft_idx, seq);
 	}
@@ -207,10 +210,10 @@
 	const char *name;
 	struct lu_fld_target *target, *tmp;
 
-	LASSERT(tar != NULL);
+	LASSERT(tar);
 	name = fld_target_name(tar);
-	LASSERT(name != NULL);
-	LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL);
+	LASSERT(name);
+	LASSERT(tar->ft_srv || tar->ft_exp);
 
 	if (fld->lcf_flags != LUSTRE_FLD_INIT) {
 		CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
@@ -236,7 +239,7 @@
 	}
 
 	target->ft_exp = tar->ft_exp;
-	if (target->ft_exp != NULL)
+	if (target->ft_exp)
 		class_export_get(target->ft_exp);
 	target->ft_srv = tar->ft_srv;
 	target->ft_idx = tar->ft_idx;
@@ -264,7 +267,7 @@
 			list_del(&target->ft_chain);
 			spin_unlock(&fld->lcf_lock);
 
-			if (target->ft_exp != NULL)
+			if (target->ft_exp)
 				class_export_put(target->ft_exp);
 
 			kfree(target);
@@ -326,8 +329,6 @@
 	int cache_size, cache_threshold;
 	int rc;
 
-	LASSERT(fld != NULL);
-
 	snprintf(fld->lcf_name, sizeof(fld->lcf_name),
 		 "cli-%s", prefix);
 
@@ -379,13 +380,13 @@
 				     &fld->lcf_targets, ft_chain) {
 		fld->lcf_count--;
 		list_del(&target->ft_chain);
-		if (target->ft_exp != NULL)
+		if (target->ft_exp)
 			class_export_put(target->ft_exp);
 		kfree(target);
 	}
 	spin_unlock(&fld->lcf_lock);
 
-	if (fld->lcf_cache != NULL) {
+	if (fld->lcf_cache) {
 		if (!IS_ERR(fld->lcf_cache))
 			fld_cache_fini(fld->lcf_cache);
 		fld->lcf_cache = NULL;
@@ -402,12 +403,12 @@
 	int		    rc;
 	struct obd_import     *imp;
 
-	LASSERT(exp != NULL);
+	LASSERT(exp);
 
 	imp = class_exp2cliimp(exp);
 	req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
 					FLD_QUERY);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
@@ -436,7 +437,7 @@
 		goto out_req;
 
 	prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
-	if (prange == NULL) {
+	if (!prange) {
 		rc = -EFAULT;
 		goto out_req;
 	}
@@ -463,7 +464,7 @@
 
 	/* Can not find it in the cache */
 	target = fld_client_get_target(fld, seq);
-	LASSERT(target != NULL);
+	LASSERT(target);
 
 	CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
 			fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 41ceaa8..da31281 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -60,8 +60,6 @@
 	struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
 	struct lu_fld_target *target;
 
-	LASSERT(fld != NULL);
-
 	spin_lock(&fld->lcf_lock);
 	list_for_each_entry(target,
 				&fld->lcf_targets, ft_chain)
@@ -76,8 +74,6 @@
 {
 	struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
 
-	LASSERT(fld != NULL);
-
 	spin_lock(&fld->lcf_lock);
 	seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
 	spin_unlock(&fld->lcf_lock);
@@ -102,9 +98,8 @@
 		return -EFAULT;
 
 	fld = ((struct seq_file *)file->private_data)->private;
-	LASSERT(fld != NULL);
 
-	for (i = 0; fld_hash[i].fh_name != NULL; i++) {
+	for (i = 0; fld_hash[i].fh_name; i++) {
 		if (count != strlen(fld_hash[i].fh_name))
 			continue;
 
@@ -114,7 +109,7 @@
 		}
 	}
 
-	if (hash != NULL) {
+	if (hash) {
 		spin_lock(&fld->lcf_lock);
 		fld->lcf_hash = hash;
 		spin_unlock(&fld->lcf_lock);
@@ -132,8 +127,6 @@
 {
 	struct lu_client_fld *fld = file->private_data;
 
-	LASSERT(fld != NULL);
-
 	fld_cache_flush(fld->lcf_cache);
 
 	CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index bd7acc2..2a77ea2f8 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -157,7 +157,8 @@
 };
 
 /** \addtogroup cl_object cl_object
- * @{ */
+ * @{
+ */
 /**
  * "Data attributes" of cl_object. Data attributes can be updated
  * independently for a sub-object, and top-object's attributes are calculated
@@ -288,13 +289,14 @@
 
 enum {
 	/** configure layout, set up a new stripe, must be called while
-	 * holding layout lock. */
+	 * holding layout lock.
+	 */
 	OBJECT_CONF_SET = 0,
 	/** invalidate the current stripe configuration due to losing
-	 * layout lock. */
+	 * layout lock.
+	 */
 	OBJECT_CONF_INVALIDATE = 1,
-	/** wait for old layout to go away so that new layout can be
-	 * set up. */
+	/** wait for old layout to go away so that new layout can be set up. */
 	OBJECT_CONF_WAIT = 2
 };
 
@@ -393,7 +395,8 @@
  */
 struct cl_object_header {
 	/** Standard lu_object_header. cl_object::co_lu::lo_header points
-	 * here. */
+	 * here.
+	 */
 	struct lu_object_header  coh_lu;
 	/** \name locks
 	 * \todo XXX move locks below to the separate cache-lines, they are
@@ -464,7 +467,8 @@
 #define CL_PAGE_EOF ((pgoff_t)~0ull)
 
 /** \addtogroup cl_page cl_page
- * @{ */
+ * @{
+ */
 
 /** \struct cl_page
  * Layered client page.
@@ -687,12 +691,14 @@
 
 enum cl_page_type {
 	/** Host page, the page is from the host inode which the cl_page
-	 * belongs to. */
+	 * belongs to.
+	 */
 	CPT_CACHEABLE = 1,
 
 	/** Transient page, the transient cl_page is used to bind a cl_page
 	 *  to vmpage which is not belonging to the same object of cl_page.
-	 *  it is used in DirectIO, lockless IO and liblustre. */
+	 *  it is used in DirectIO, lockless IO and liblustre.
+	 */
 	CPT_TRANSIENT,
 };
 
@@ -728,7 +734,8 @@
 	/** Parent page, NULL for top-level page. Immutable after creation. */
 	struct cl_page	  *cp_parent;
 	/** Lower-layer page. NULL for bottommost page. Immutable after
-	 * creation. */
+	 * creation.
+	 */
 	struct cl_page	  *cp_child;
 	/**
 	 * Page state. This field is const to avoid accidental update, it is
@@ -1126,7 +1133,8 @@
 /** @} cl_page */
 
 /** \addtogroup cl_lock cl_lock
- * @{ */
+ * @{
+ */
 /** \struct cl_lock
  *
  * Extent locking on the client.
@@ -1641,7 +1649,8 @@
 struct cl_lock_slice {
 	struct cl_lock		  *cls_lock;
 	/** Object slice corresponding to this lock slice. Immutable after
-	 * creation. */
+	 * creation.
+	 */
 	struct cl_object		*cls_obj;
 	const struct cl_lock_operations *cls_ops;
 	/** Linkage into cl_lock::cll_layers. Immutable after creation. */
@@ -1885,7 +1894,8 @@
 /** @} cl_page_list */
 
 /** \addtogroup cl_io cl_io
- * @{ */
+ * @{
+ */
 /** \struct cl_io
  * I/O
  *
@@ -2284,7 +2294,8 @@
 	/** discard all of dirty pages in a specific file range */
 	CL_FSYNC_DISCARD = 2,
 	/** start writeback and make sure they have reached storage before
-	 * return. OST_SYNC RPC must be issued and finished */
+	 * return. OST_SYNC RPC must be issued and finished
+	 */
 	CL_FSYNC_ALL   = 3
 };
 
@@ -2403,7 +2414,8 @@
 /** @} cl_io */
 
 /** \addtogroup cl_req cl_req
- * @{ */
+ * @{
+ */
 /** \struct cl_req
  * Transfer.
  *
@@ -2582,7 +2594,8 @@
 	/** how many entities are in the cache right now */
 	CS_total,
 	/** how many entities in the cache are actively used (and cannot be
-	 * evicted) right now */
+	 * evicted) right now
+	 */
 	CS_busy,
 	/** how many entities were created at all */
 	CS_create,
@@ -2613,7 +2626,7 @@
 	 * Statistical counters. Atomics do not scale, something better like
 	 * per-cpu counters is needed.
 	 *
-	 * These are exported as /proc/fs/lustre/llite/.../site
+	 * These are exported as /sys/kernel/debug/lustre/llite/.../site
 	 *
 	 * When interpreting keep in mind that both sub-locks (and sub-pages)
 	 * and top-locks (and top-pages) are accounted here.
@@ -2653,7 +2666,7 @@
 
 static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
 {
-	LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
+	LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d));
 	return container_of0(d, struct cl_device, cd_lu_dev);
 }
 
@@ -2664,7 +2677,7 @@
 
 static inline struct cl_object *lu2cl(const struct lu_object *o)
 {
-	LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
+	LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
 	return container_of0(o, struct cl_object, co_lu);
 }
 
@@ -2681,7 +2694,7 @@
 
 static inline struct cl_device *cl_object_device(const struct cl_object *o)
 {
-	LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
+	LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
 	return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
 }
 
@@ -2725,7 +2738,8 @@
 /** @} helpers */
 
 /** \defgroup cl_object cl_object
- * @{ */
+ * @{
+ */
 struct cl_object *cl_object_top (struct cl_object *o);
 struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
 				 const struct lu_fid *fid,
@@ -2770,7 +2784,8 @@
 /** @} cl_object */
 
 /** \defgroup cl_page cl_page
- * @{ */
+ * @{
+ */
 enum {
 	CLP_GANG_OKAY = 0,
 	CLP_GANG_RESCHED,
@@ -2888,7 +2903,8 @@
 /** @} cl_page */
 
 /** \defgroup cl_lock cl_lock
- * @{ */
+ * @{
+ */
 
 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
 			     const struct cl_lock_descr *need,
@@ -2966,7 +2982,8 @@
  *
  * cl_use_try()     NONE	 cl_lock_operations::clo_use()     CLS_HELD
  *
- * @{ */
+ * @{
+ */
 
 int   cl_wait       (const struct lu_env *env, struct cl_lock *lock);
 void  cl_unuse      (const struct lu_env *env, struct cl_lock *lock);
@@ -3019,7 +3036,8 @@
 /** @} cl_lock */
 
 /** \defgroup cl_io cl_io
- * @{ */
+ * @{
+ */
 
 int   cl_io_init	 (const struct lu_env *env, struct cl_io *io,
 			  enum cl_io_type iot, struct cl_object *obj);
@@ -3094,7 +3112,8 @@
 /** @} cl_io */
 
 /** \defgroup cl_page_list cl_page_list
- * @{ */
+ * @{
+ */
 
 /**
  * Last page in the page list.
@@ -3137,7 +3156,8 @@
 /** @} cl_page_list */
 
 /** \defgroup cl_req cl_req
- * @{ */
+ * @{
+ */
 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
 			    enum cl_req_type crt, int nr_objects);
 
@@ -3214,7 +3234,8 @@
  *       - cl_env_reexit(cl_env_reenter had to be called priorly)
  *
  * \see lu_env, lu_context, lu_context_key
- * @{ */
+ * @{
+ */
 
 struct cl_env_nest {
 	int   cen_refcheck;
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index 36e7a67..5d839a9 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -127,7 +127,7 @@
 	struct ccc_thread_info      *info;
 
 	info = lu_context_key_get(&env->le_ctx, &ccc_key);
-	LASSERT(info != NULL);
+	LASSERT(info);
 	return info;
 }
 
@@ -156,7 +156,7 @@
 	struct ccc_session *ses;
 
 	ses = lu_context_key_get(env->le_ses, &ccc_session_key);
-	LASSERT(ses != NULL);
+	LASSERT(ses);
 	return ses;
 }
 
@@ -383,7 +383,8 @@
  *
  * NB: If you find you have to use these interfaces for your new code, please
  * think about it again. These interfaces may be removed in the future for
- * better layering. */
+ * better layering.
+ */
 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
 void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
 int lov_read_and_clear_async_rc(struct cl_object *clob);
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 468bc28..3907bf4 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -57,23 +57,23 @@
 
 #define CLIENT_OBD_LIST_LOCK_DEBUG 1
 
-typedef struct {
+struct client_obd_lock {
 	spinlock_t		lock;
 
 	unsigned long       time;
 	struct task_struct *task;
 	const char	 *func;
 	int		 line;
-} client_obd_lock_t;
+};
 
-static inline void __client_obd_list_lock(client_obd_lock_t *lock,
+static inline void __client_obd_list_lock(struct client_obd_lock *lock,
 					  const char *func, int line)
 {
 	unsigned long cur = jiffies;
 
 	while (1) {
 		if (spin_trylock(&lock->lock)) {
-			LASSERT(lock->task == NULL);
+			LASSERT(!lock->task);
 			lock->task = current;
 			lock->func = func;
 			lock->line = line;
@@ -85,7 +85,7 @@
 		    time_before(lock->time + 5 * HZ, jiffies)) {
 			struct task_struct *task = lock->task;
 
-			if (task == NULL)
+			if (!task)
 				continue;
 
 			LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
@@ -106,20 +106,20 @@
 #define client_obd_list_lock(lock) \
 	__client_obd_list_lock(lock, __func__, __LINE__)
 
-static inline void client_obd_list_unlock(client_obd_lock_t *lock)
+static inline void client_obd_list_unlock(struct client_obd_lock *lock)
 {
-	LASSERT(lock->task != NULL);
+	LASSERT(lock->task);
 	lock->task = NULL;
 	lock->time = jiffies;
 	spin_unlock(&lock->lock);
 }
 
-static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
+static inline void client_obd_list_lock_init(struct client_obd_lock *lock)
 {
 	spin_lock_init(&lock->lock);
 }
 
-static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
+static inline void client_obd_list_lock_done(struct client_obd_lock *lock)
 {}
 
 #endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 0ac8e0e..0d98111 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -54,7 +54,7 @@
 	struct file_operations	*fops;
 	void			*data;
 	/**
-	 * /proc file mode.
+	 * sysfs file mode.
 	 */
 	umode_t			proc_mode;
 };
@@ -175,7 +175,8 @@
 enum lprocfs_stats_flags {
 	LPROCFS_STATS_FLAG_NONE     = 0x0000, /* per cpu counter */
 	LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
-					       * area and need locking */
+					       * area and need locking
+					       */
 	LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
 };
 
@@ -196,7 +197,8 @@
 	unsigned short			ls_biggest_alloc_num;
 	enum lprocfs_stats_flags	ls_flags;
 	/* Lock used when there are no percpu stats areas; For percpu stats,
-	 * it is used to protect ls_biggest_alloc_num change */
+	 * it is used to protect ls_biggest_alloc_num change
+	 */
 	spinlock_t			ls_lock;
 
 	/* has ls_num of counter headers */
@@ -274,20 +276,7 @@
 			OPC_RANGE(OST));
 	} else if (opc < FLD_LAST_OPC) {
 		/* FLD opcode */
-		 return (opc - FLD_FIRST_OPC +
-			OPC_RANGE(SEC) +
-			OPC_RANGE(SEQ) +
-			OPC_RANGE(QUOTA) +
-			OPC_RANGE(LLOG) +
-			OPC_RANGE(OBD) +
-			OPC_RANGE(MGS) +
-			OPC_RANGE(LDLM) +
-			OPC_RANGE(MDS) +
-			OPC_RANGE(OST));
-	} else if (opc < UPDATE_LAST_OPC) {
-		/* update opcode */
-		return (opc - UPDATE_FIRST_OPC +
-			OPC_RANGE(FLD) +
+		return (opc - FLD_FIRST_OPC +
 			OPC_RANGE(SEC) +
 			OPC_RANGE(SEQ) +
 			OPC_RANGE(QUOTA) +
@@ -312,8 +301,7 @@
 			    OPC_RANGE(SEC)  + \
 			    OPC_RANGE(SEQ)  + \
 			    OPC_RANGE(SEC)  + \
-			    OPC_RANGE(FLD)  + \
-			    OPC_RANGE(UPDATE))
+			    OPC_RANGE(FLD))
 
 #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR)  + \
 			    OPC_RANGE(EXTRA))
@@ -407,7 +395,7 @@
 		} else {
 			unsigned int cpuid = get_cpu();
 
-			if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
+			if (unlikely(!stats->ls_percpu[cpuid])) {
 				rc = lprocfs_stats_alloc_one(stats, cpuid);
 				if (rc < 0) {
 					put_cpu();
@@ -521,11 +509,11 @@
 	unsigned long flags	= 0;
 	__u64	      ret	= 0;
 
-	LASSERT(stats != NULL);
+	LASSERT(stats);
 
 	num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
 	for (i = 0; i < num_cpu; i++) {
-		if (stats->ls_percpu[i] == NULL)
+		if (!stats->ls_percpu[i])
 			continue;
 		ret += lprocfs_read_helper(
 				lprocfs_stats_counter_get(stats, i, idx),
@@ -608,7 +596,7 @@
 			 int *val);
 int lprocfs_write_u64_helper(const char __user *buffer,
 			     unsigned long count, __u64 *val);
-int lprocfs_write_frac_u64_helper(const char *buffer,
+int lprocfs_write_frac_u64_helper(const char __user *buffer,
 				  unsigned long count,
 				  __u64 *val, int mult);
 char *lprocfs_find_named_value(const char *buffer, const char *name,
@@ -625,9 +613,10 @@
 int lprocfs_seq_release(struct inode *, struct file *);
 
 /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
-  proc entries; otherwise, you will define name##_seq_write function also for
-  a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
-  call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
+ * proc entries; otherwise, you will define name##_seq_write function also for
+ * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
+ * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data);
+ */
 #define __LPROC_SEQ_FOPS(name, custom_seq_write)			\
 static int name##_single_open(struct inode *inode, struct file *file)	\
 {									\
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 1d79341..b5088b1 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -164,11 +164,12 @@
 /**
  * For lu_object_conf flags
  */
-typedef enum {
+enum loc_flags {
 	/* This is a new object to be allocated, or the file
-	 * corresponding to the object does not exists. */
+	 * corresponding to the object does not exists.
+	 */
 	LOC_F_NEW	= 0x00000001,
-} loc_flags_t;
+};
 
 /**
  * Object configuration, describing particulars of object being created. On
@@ -179,7 +180,7 @@
 	/**
 	 * Some hints for obj find and alloc.
 	 */
-	loc_flags_t     loc_flags;
+	enum loc_flags     loc_flags;
 };
 
 /**
@@ -392,7 +393,7 @@
 
 static inline int lu_device_is_md(const struct lu_device *d)
 {
-	return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
+	return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD);
 }
 
 /**
@@ -488,7 +489,7 @@
 	/**
 	 * Mark this object has already been taken out of cache.
 	 */
-	LU_OBJECT_UNHASHED = 1
+	LU_OBJECT_UNHASHED = 1,
 };
 
 enum lu_object_header_attr {
@@ -756,7 +757,7 @@
 /**
  * return device operations vector for this object
  */
-static const inline struct lu_device_operations *
+static inline const struct lu_device_operations *
 lu_object_ops(const struct lu_object *o)
 {
 	return o->lo_dev->ld_ops;
@@ -895,7 +896,8 @@
 /** @} helpers */
 
 /** \name lu_context
- * @{ */
+ * @{
+ */
 
 /** For lu_context health-checks */
 enum lu_context_state {
@@ -1119,7 +1121,7 @@
 		CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value));       \
 								  \
 		value = kzalloc(sizeof(*value), GFP_NOFS);	\
-		if (value == NULL)				\
+		if (!value)				\
 			value = ERR_PTR(-ENOMEM);		 \
 								  \
 		return value;				     \
@@ -1174,7 +1176,7 @@
 		do {						    \
 			LU_CONTEXT_KEY_INIT(key);		       \
 			key = va_arg(args, struct lu_context_key *);    \
-		} while (key != NULL);				  \
+		} while (key);				  \
 		va_end(args);					   \
 	}
 
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
index 97cd157..f7dfd83 100644
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ b/drivers/staging/lustre/lustre/include/lu_ref.h
@@ -17,10 +17,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #ifndef __LUSTRE_LU_REF_H
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index 09088f4..07d45de 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -47,9 +47,11 @@
 
 struct ll_fiemap_extent {
 	__u64 fe_logical;  /* logical offset in bytes for the start of
-			    * the extent from the beginning of the file */
+			    * the extent from the beginning of the file
+			    */
 	__u64 fe_physical; /* physical offset in bytes for the start
-			    * of the extent from the beginning of the disk */
+			    * of the extent from the beginning of the disk
+			    */
 	__u64 fe_length;   /* length in bytes for this extent */
 	__u64 fe_reserved64[2];
 	__u32 fe_flags;    /* FIEMAP_EXTENT_* flags for this extent */
@@ -59,9 +61,11 @@
 
 struct ll_user_fiemap {
 	__u64 fm_start;  /* logical offset (inclusive) at
-			  * which to start mapping (in) */
+			  * which to start mapping (in)
+			  */
 	__u64 fm_length; /* logical length of mapping which
-			  * userspace wants (in) */
+			  * userspace wants (in)
+			  */
 	__u32 fm_flags;  /* FIEMAP_FLAG_* flags for request (in/out) */
 	__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
 	__u32 fm_extent_count;  /* size of fm_extents array (in) */
@@ -71,28 +75,38 @@
 
 #define FIEMAP_MAX_OFFSET      (~0ULL)
 
-#define FIEMAP_FLAG_SYNC	 0x00000001 /* sync file data before map */
-#define FIEMAP_FLAG_XATTR	0x00000002 /* map extended attribute tree */
-
-#define FIEMAP_EXTENT_LAST	      0x00000001 /* Last extent in file. */
-#define FIEMAP_EXTENT_UNKNOWN	   0x00000002 /* Data location unknown. */
-#define FIEMAP_EXTENT_DELALLOC	  0x00000004 /* Location still pending.
-						    * Sets EXTENT_UNKNOWN. */
-#define FIEMAP_EXTENT_ENCODED	   0x00000008 /* Data can not be read
-						    * while fs is unmounted */
-#define FIEMAP_EXTENT_DATA_ENCRYPTED    0x00000080 /* Data is encrypted by fs.
-						    * Sets EXTENT_NO_DIRECT. */
+#define FIEMAP_FLAG_SYNC		0x00000001 /* sync file data before
+						    * map
+						    */
+#define FIEMAP_FLAG_XATTR		0x00000002 /* map extended attribute
+						    * tree
+						    */
+#define FIEMAP_EXTENT_LAST		0x00000001 /* Last extent in file. */
+#define FIEMAP_EXTENT_UNKNOWN		0x00000002 /* Data location unknown. */
+#define FIEMAP_EXTENT_DELALLOC		0x00000004 /* Location still pending.
+						    * Sets EXTENT_UNKNOWN.
+						    */
+#define FIEMAP_EXTENT_ENCODED		0x00000008 /* Data can not be read
+						    * while fs is unmounted
+						    */
+#define FIEMAP_EXTENT_DATA_ENCRYPTED	0x00000080 /* Data is encrypted by fs.
+						    * Sets EXTENT_NO_DIRECT.
+						    */
 #define FIEMAP_EXTENT_NOT_ALIGNED       0x00000100 /* Extent offsets may not be
-						    * block aligned. */
+						    * block aligned.
+						    */
 #define FIEMAP_EXTENT_DATA_INLINE       0x00000200 /* Data mixed with metadata.
 						    * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_DATA_TAIL	 0x00000400 /* Multiple files in block.
-						    * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_UNWRITTEN	 0x00000800 /* Space allocated, but
-						    * no data (i.e. zero). */
-#define FIEMAP_EXTENT_MERGED	    0x00001000 /* File does not natively
+#define FIEMAP_EXTENT_DATA_TAIL		0x00000400 /* Multiple files in block.
+						    * Sets EXTENT_NOT_ALIGNED.
+						    */
+#define FIEMAP_EXTENT_UNWRITTEN		0x00000800 /* Space allocated, but
+						    * no data (i.e. zero).
+						    */
+#define FIEMAP_EXTENT_MERGED		0x00001000 /* File does not natively
 						    * support extents. Result
-						    * merged for efficiency. */
+						    * merged for efficiency.
+						    */
 
 static inline size_t fiemap_count_to_size(size_t extent_count)
 {
@@ -114,7 +128,8 @@
 
 /* Lustre specific flags - use a high bit, don't conflict with upstream flag */
 #define FIEMAP_EXTENT_NO_DIRECT	 0x40000000 /* Data mapping undefined */
-#define FIEMAP_EXTENT_NET	       0x80000000 /* Data stored remotely.
-						    * Sets NO_DIRECT flag */
+#define FIEMAP_EXTENT_NET	 0x80000000 /* Data stored remotely.
+					     * Sets NO_DIRECT flag
+					     */
 
 #endif /* _LUSTRE_FIEMAP_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
deleted file mode 100644
index 93a3d7d..0000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0"
-#define LUSTRE_RELEASE 3.9.0_g6e62c21
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index b064b58..284affb 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -113,25 +113,25 @@
 
 #define CONNMGR_REQUEST_PORTAL	  1
 #define CONNMGR_REPLY_PORTAL	    2
-//#define OSC_REQUEST_PORTAL	    3
+/*#define OSC_REQUEST_PORTAL	    3 */
 #define OSC_REPLY_PORTAL		4
-//#define OSC_BULK_PORTAL	       5
+/*#define OSC_BULK_PORTAL	       5 */
 #define OST_IO_PORTAL		   6
 #define OST_CREATE_PORTAL	       7
 #define OST_BULK_PORTAL		 8
-//#define MDC_REQUEST_PORTAL	    9
+/*#define MDC_REQUEST_PORTAL	    9 */
 #define MDC_REPLY_PORTAL	       10
-//#define MDC_BULK_PORTAL	      11
+/*#define MDC_BULK_PORTAL	      11 */
 #define MDS_REQUEST_PORTAL	     12
-//#define MDS_REPLY_PORTAL	     13
+/*#define MDS_REPLY_PORTAL	     13 */
 #define MDS_BULK_PORTAL		14
 #define LDLM_CB_REQUEST_PORTAL	 15
 #define LDLM_CB_REPLY_PORTAL	   16
 #define LDLM_CANCEL_REQUEST_PORTAL     17
 #define LDLM_CANCEL_REPLY_PORTAL       18
-//#define PTLBD_REQUEST_PORTAL	   19
-//#define PTLBD_REPLY_PORTAL	     20
-//#define PTLBD_BULK_PORTAL	      21
+/*#define PTLBD_REQUEST_PORTAL	   19 */
+/*#define PTLBD_REPLY_PORTAL	     20 */
+/*#define PTLBD_BULK_PORTAL	      21 */
 #define MDS_SETATTR_PORTAL	     22
 #define MDS_READPAGE_PORTAL	    23
 #define OUT_PORTAL		    24
@@ -146,7 +146,9 @@
 #define SEQ_CONTROLLER_PORTAL	  32
 #define MGS_BULK_PORTAL		33
 
-/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
+/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
+ *						n8851@cray.com
+ */
 
 /* packet types */
 #define PTL_RPC_MSG_REQUEST 4711
@@ -295,7 +297,8 @@
 	fld_range_is_mdt(range) ? "mdt" : "ost"
 
 /** \defgroup lu_fid lu_fid
- * @{ */
+ * @{
+ */
 
 /**
  * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
@@ -307,7 +310,8 @@
 	LMAC_SOM	= 0x00000002,
 	LMAC_NOT_IN_OI	= 0x00000004, /* the object does NOT need OI mapping */
 	LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
-				       * under /O/<seq>/d<x>. */
+				       * under /O/<seq>/d<x>.
+				       */
 };
 
 /**
@@ -319,7 +323,8 @@
 	LMAI_RELEASED		= 0x00000001, /* file is released */
 	LMAI_AGENT		= 0x00000002, /* agent inode */
 	LMAI_REMOTE_PARENT	= 0x00000004, /* the parent of the object
-						 is on the remote MDT */
+					       * is on the remote MDT
+					       */
 };
 
 #define LMA_INCOMPAT_SUPP	(LMAI_AGENT | LMAI_REMOTE_PARENT)
@@ -395,12 +400,14 @@
 	FID_SEQ_LOCAL_FILE	= 0x200000001ULL,
 	FID_SEQ_DOT_LUSTRE	= 0x200000002ULL,
 	/* sequence is used for local named objects FIDs generated
-	 * by local_object_storage library */
+	 * by local_object_storage library
+	 */
 	FID_SEQ_LOCAL_NAME	= 0x200000003ULL,
 	/* Because current FLD will only cache the fid sequence, instead
 	 * of oid on the client side, if the FID needs to be exposed to
 	 * clients sides, it needs to make sure all of fids under one
-	 * sequence will be located in one MDT. */
+	 * sequence will be located in one MDT.
+	 */
 	FID_SEQ_SPECIAL		= 0x200000004ULL,
 	FID_SEQ_QUOTA		= 0x200000005ULL,
 	FID_SEQ_QUOTA_GLB	= 0x200000006ULL,
@@ -601,7 +608,8 @@
 		oi->oi_fid.f_seq = seq;
 		/* Note: if f_oid + f_ver is zero, we need init it
 		 * to be 1, otherwise, ostid_seq will treat this
-		 * as old ostid (oi_seq == 0) */
+		 * as old ostid (oi_seq == 0)
+		 */
 		if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
 			oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
 	}
@@ -689,11 +697,12 @@
 		 * that we map into the IDIF namespace.  It allows up to 2^48
 		 * objects per OST, as this is the object namespace that has
 		 * been in production for years.  This can handle create rates
-		 * of 1M objects/s/OST for 9 years, or combinations thereof. */
+		 * of 1M objects/s/OST for 9 years, or combinations thereof.
+		 */
 		if (ostid_id(ostid) >= IDIF_MAX_OID) {
-			 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
-				POSTID(ostid), ost_idx);
-			 return -EBADF;
+			CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
+			       POSTID(ostid), ost_idx);
+			return -EBADF;
 		}
 		fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
 		/* truncate to 32 bits by assignment */
@@ -704,7 +713,8 @@
 	       /* This is either an IDIF object, which identifies objects across
 		* all OSTs, or a regular FID.  The IDIF namespace maps legacy
 		* OST objects into the FID namespace.  In both cases, we just
-		* pass the FID through, no conversion needed. */
+		* pass the FID through, no conversion needed.
+		*/
 		if (ostid->oi_fid.f_ver != 0) {
 			CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
 				POSTID(ostid), ost_idx);
@@ -807,7 +817,7 @@
 
 static inline int fid_is_sane(const struct lu_fid *fid)
 {
-	return fid != NULL &&
+	return fid &&
 	       ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
 		fid_is_igif(fid) || fid_is_idif(fid) ||
 		fid_seq_is_rsvd(fid_seq(fid)));
@@ -868,7 +878,8 @@
 /** @} lu_fid */
 
 /** \defgroup lu_dir lu_dir
- * @{ */
+ * @{
+ */
 
 /**
  * Enumeration of possible directory entry attributes.
@@ -880,24 +891,8 @@
 	LUDA_FID		= 0x0001,
 	LUDA_TYPE		= 0x0002,
 	LUDA_64BITHASH		= 0x0004,
-
-	/* The following attrs are used for MDT internal only,
-	 * not visible to client */
-
-	/* Verify the dirent consistency */
-	LUDA_VERIFY		= 0x8000,
-	/* Only check but not repair the dirent inconsistency */
-	LUDA_VERIFY_DRYRUN	= 0x4000,
-	/* The dirent has been repaired, or to be repaired (dryrun). */
-	LUDA_REPAIR		= 0x2000,
-	/* The system is upgraded, has beed or to be repaired (dryrun). */
-	LUDA_UPGRADE		= 0x1000,
-	/* Ignore this record, go to next directly. */
-	LUDA_IGNORE		= 0x0800,
 };
 
-#define LU_DIRENT_ATTRS_MASK	0xf800
-
 /**
  * Layout of readdir pages, as transmitted on wire.
  */
@@ -1128,7 +1123,8 @@
 	__u32 pb_conn_cnt;
 	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
 	__u32 pb_service_time; /* for rep, actual service time, also used for
-				  net_latency of req */
+				* net_latency of req
+				*/
 	__u32 pb_limit;
 	__u64 pb_slv;
 	/* VBR: pre-versions */
@@ -1174,7 +1170,8 @@
 /* #define MSG_AT_SUPPORT	 0x0008
  * This was used in early prototypes of adaptive timeouts, and while there
  * shouldn't be any users of that code there also isn't a need for using this
- * bits. Defer usage until at least 1.10 to avoid potential conflict. */
+ * bits. Defer usage until at least 1.10 to avoid potential conflict.
+ */
 #define MSG_DELAY_REPLAY	  0x0010
 #define MSG_VERSION_REPLAY	0x0020
 #define MSG_REQ_REPLAY_DONE       0x0040
@@ -1187,7 +1184,7 @@
 #define MSG_CONNECT_RECOVERING  0x00000001
 #define MSG_CONNECT_RECONNECT   0x00000002
 #define MSG_CONNECT_REPLAYABLE  0x00000004
-//#define MSG_CONNECT_PEER	0x8
+/*#define MSG_CONNECT_PEER	0x8 */
 #define MSG_CONNECT_LIBCLIENT   0x00000010
 #define MSG_CONNECT_INITIAL     0x00000020
 #define MSG_CONNECT_ASYNC       0x00000040
@@ -1195,60 +1192,65 @@
 #define MSG_CONNECT_TRANSNO     0x00000100 /* report transno */
 
 /* Connect flags */
-#define OBD_CONNECT_RDONLY		0x1ULL /*client has read-only access*/
-#define OBD_CONNECT_INDEX		 0x2ULL /*connect specific LOV idx */
-#define OBD_CONNECT_MDS		   0x4ULL /*connect from MDT to OST */
-#define OBD_CONNECT_GRANT		 0x8ULL /*OSC gets grant at connect */
-#define OBD_CONNECT_SRVLOCK	      0x10ULL /*server takes locks for cli */
-#define OBD_CONNECT_VERSION	      0x20ULL /*Lustre versions in ocd */
-#define OBD_CONNECT_REQPORTAL	    0x40ULL /*Separate non-IO req portal */
-#define OBD_CONNECT_ACL		  0x80ULL /*access control lists */
-#define OBD_CONNECT_XATTR	       0x100ULL /*client use extended attr */
+#define OBD_CONNECT_RDONLY		  0x1ULL /*client has read-only access*/
+#define OBD_CONNECT_INDEX		  0x2ULL /*connect specific LOV idx */
+#define OBD_CONNECT_MDS			  0x4ULL /*connect from MDT to OST */
+#define OBD_CONNECT_GRANT		  0x8ULL /*OSC gets grant at connect */
+#define OBD_CONNECT_SRVLOCK		 0x10ULL /*server takes locks for cli */
+#define OBD_CONNECT_VERSION		 0x20ULL /*Lustre versions in ocd */
+#define OBD_CONNECT_REQPORTAL		 0x40ULL /*Separate non-IO req portal */
+#define OBD_CONNECT_ACL			 0x80ULL /*access control lists */
+#define OBD_CONNECT_XATTR		0x100ULL /*client use extended attr */
 #define OBD_CONNECT_CROW		0x200ULL /*MDS+OST create obj on write*/
-#define OBD_CONNECT_TRUNCLOCK	   0x400ULL /*locks on server for punch */
-#define OBD_CONNECT_TRANSNO	     0x800ULL /*replay sends init transno */
-#define OBD_CONNECT_IBITS	      0x1000ULL /*support for inodebits locks*/
+#define OBD_CONNECT_TRUNCLOCK		0x400ULL /*locks on server for punch */
+#define OBD_CONNECT_TRANSNO		0x800ULL /*replay sends init transno */
+#define OBD_CONNECT_IBITS	       0x1000ULL /*support for inodebits locks*/
 #define OBD_CONNECT_JOIN	       0x2000ULL /*files can be concatenated.
 						  *We do not support JOIN FILE
 						  *anymore, reserve this flags
 						  *just for preventing such bit
-						  *to be reused.*/
-#define OBD_CONNECT_ATTRFID	    0x4000ULL /*Server can GetAttr By Fid*/
-#define OBD_CONNECT_NODEVOH	    0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT	0x10000ULL /*Remote client */
+						  *to be reused.
+						  */
+#define OBD_CONNECT_ATTRFID	       0x4000ULL /*Server can GetAttr By Fid*/
+#define OBD_CONNECT_NODEVOH	       0x8000ULL /*No open hndl on specl nodes*/
+#define OBD_CONNECT_RMT_CLIENT	      0x10000ULL /*Remote client */
 #define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /*Remote client by force */
-#define OBD_CONNECT_BRW_SIZE	  0x40000ULL /*Max bytes per rpc */
-#define OBD_CONNECT_QUOTA64	   0x80000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_MDS_CAPA	 0x100000ULL /*MDS capability */
-#define OBD_CONNECT_OSS_CAPA	 0x200000ULL /*OSS capability */
-#define OBD_CONNECT_CANCELSET	0x400000ULL /*Early batched cancels. */
-#define OBD_CONNECT_SOM	      0x800000ULL /*Size on MDS */
-#define OBD_CONNECT_AT	      0x1000000ULL /*client uses AT */
+#define OBD_CONNECT_BRW_SIZE	      0x40000ULL /*Max bytes per rpc */
+#define OBD_CONNECT_QUOTA64	      0x80000ULL /*Not used since 2.4 */
+#define OBD_CONNECT_MDS_CAPA	     0x100000ULL /*MDS capability */
+#define OBD_CONNECT_OSS_CAPA	     0x200000ULL /*OSS capability */
+#define OBD_CONNECT_CANCELSET	     0x400000ULL /*Early batched cancels. */
+#define OBD_CONNECT_SOM		     0x800000ULL /*Size on MDS */
+#define OBD_CONNECT_AT		    0x1000000ULL /*client uses AT */
 #define OBD_CONNECT_LRU_RESIZE      0x2000000ULL /*LRU resize feature. */
-#define OBD_CONNECT_MDS_MDS	 0x4000000ULL /*MDS-MDS connection */
+#define OBD_CONNECT_MDS_MDS	    0x4000000ULL /*MDS-MDS connection */
 #define OBD_CONNECT_REAL	    0x8000000ULL /*real connection */
 #define OBD_CONNECT_CHANGE_QS      0x10000000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_CKSUM	  0x20000000ULL /*support several cksum algos*/
-#define OBD_CONNECT_FID	    0x40000000ULL /*FID is supported by server */
-#define OBD_CONNECT_VBR	    0x80000000ULL /*version based recovery */
-#define OBD_CONNECT_LOV_V3	0x100000000ULL /*client supports LOV v3 EA */
+#define OBD_CONNECT_CKSUM	   0x20000000ULL /*support several cksum algos*/
+#define OBD_CONNECT_FID		   0x40000000ULL /*FID is supported by server */
+#define OBD_CONNECT_VBR		   0x80000000ULL /*version based recovery */
+#define OBD_CONNECT_LOV_V3	  0x100000000ULL /*client supports LOV v3 EA */
 #define OBD_CONNECT_GRANT_SHRINK  0x200000000ULL /* support grant shrink */
 #define OBD_CONNECT_SKIP_ORPHAN   0x400000000ULL /* don't reuse orphan objids */
 #define OBD_CONNECT_MAX_EASIZE    0x800000000ULL /* preserved for large EA */
 #define OBD_CONNECT_FULL20       0x1000000000ULL /* it is 2.0 client */
 #define OBD_CONNECT_LAYOUTLOCK   0x2000000000ULL /* client uses layout lock */
 #define OBD_CONNECT_64BITHASH    0x4000000000ULL /* client supports 64-bits
-						  * directory hash */
+						  * directory hash
+						  */
 #define OBD_CONNECT_MAXBYTES     0x8000000000ULL /* max stripe size */
 #define OBD_CONNECT_IMP_RECOV   0x10000000000ULL /* imp recovery support */
 #define OBD_CONNECT_JOBSTATS    0x20000000000ULL /* jobid in ptlrpc_body */
 #define OBD_CONNECT_UMASK       0x40000000000ULL /* create uses client umask */
 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
-						  * RPC error properly */
+						  * RPC error properly
+						  */
 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
-						  * finer space reservation */
+						  * finer space reservation
+						  */
 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
-						   * policy and 2.x server */
+						   * policy and 2.x server
+						   */
 #define OBD_CONNECT_LVB_TYPE	0x400000000000ULL /* variable type of LVB */
 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
@@ -1264,61 +1266,19 @@
  * submit a small patch against EVERY branch that ONLY adds the new flag,
  * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
  * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
- * can be approved and landed easily to reserve the flag for future use. */
+ * can be approved and landed easily to reserve the flag for future use.
+ */
 
 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
  * connection.  It is a temporary bug fix for Imperative Recovery interop
  * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
- * 2.2 clients/servers is no longer needed.  LU-1252/LU-1644. */
+ * 2.2 clients/servers is no longer needed.  LU-1252/LU-1644.
+ */
 #define OBD_CONNECT_MNE_SWAB		 OBD_CONNECT_MDS_MDS
 
 #define OCD_HAS_FLAG(ocd, flg)  \
 	(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
 
-#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
-
-#define MDT_CONNECT_SUPPORTED  (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
-				OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
-				OBD_CONNECT_IBITS | \
-				OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
-				OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
-				OBD_CONNECT_RMT_CLIENT | \
-				OBD_CONNECT_RMT_CLIENT_FORCE | \
-				OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
-				OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
-				OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
-				OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
-				OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
-				OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
-				OBD_CONNECT_EINPROGRESS | \
-				OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
-				OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
-				OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
-				OBD_CONNECT_FLOCK_DEAD | \
-				OBD_CONNECT_DISP_STRIPE)
-
-#define OST_CONNECT_SUPPORTED  (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
-				OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
-				OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
-				OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
-				OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
-				LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
-				OBD_CONNECT_RMT_CLIENT | \
-				OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
-				OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
-				OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
-				OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
-				OBD_CONNECT_MAX_EASIZE | \
-				OBD_CONNECT_EINPROGRESS | \
-				OBD_CONNECT_JOBSTATS | \
-				OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
-				OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
-				OBD_CONNECT_PINGLESS)
-#define ECHO_CONNECT_SUPPORTED (0)
-#define MGS_CONNECT_SUPPORTED  (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
-				OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
-				OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
-
 /* Features required for this version of the client to work with server */
 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
 				 OBD_CONNECT_FULL20)
@@ -1334,7 +1294,8 @@
 /* This structure is used for both request and reply.
  *
  * If we eventually have separate connect data for different types, which we
- * almost certainly will, then perhaps we stick a union in here. */
+ * almost certainly will, then perhaps we stick a union in here.
+ */
 struct obd_connect_data_v1 {
 	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
 	__u32 ocd_version;	 /* lustre release version number */
@@ -1364,7 +1325,7 @@
 	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
 	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
 	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
-	__u32 ocd_unused;	/* also fix lustre_swab_connect */
+	__u32 ocd_unused;	 /* also fix lustre_swab_connect */
 	__u64 ocd_transno;       /* first transno from client to be replayed */
 	__u32 ocd_group;	 /* MDS group on OST */
 	__u32 ocd_cksum_types;   /* supported checksum algorithms */
@@ -1374,7 +1335,8 @@
 	/* Fields after ocd_maxbytes are only accessible by the receiver
 	 * if the corresponding flag in ocd_connect_flags is set. Accessing
 	 * any field after ocd_maxbytes on the receiver without a valid flag
-	 * may result in out-of-bound memory access and kernel oops. */
+	 * may result in out-of-bound memory access and kernel oops.
+	 */
 	__u64 padding1;	  /* added 2.1.0. also fix lustre_swab_connect */
 	__u64 padding2;	  /* added 2.1.0. also fix lustre_swab_connect */
 	__u64 padding3;	  /* added 2.1.0. also fix lustre_swab_connect */
@@ -1398,7 +1360,8 @@
  * with senior engineers before starting to use a new field.  Then, submit
  * a small patch against EVERY branch that ONLY adds the new field along with
  * the matching OBD_CONNECT flag, so that can be approved and landed easily to
- * reserve the flag for future use. */
+ * reserve the flag for future use.
+ */
 
 void lustre_swab_connect(struct obd_connect_data *ocd);
 
@@ -1408,18 +1371,18 @@
  * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
  * algorithm and also the OBD_FL_CKSUM* flags.
  */
-typedef enum {
+enum cksum_type {
 	OBD_CKSUM_CRC32  = 0x00000001,
 	OBD_CKSUM_ADLER  = 0x00000002,
 	OBD_CKSUM_CRC32C = 0x00000004,
-} cksum_type_t;
+};
 
 /*
  *   OST requests: OBDO & OBD request records
  */
 
 /* opcodes */
-typedef enum {
+enum ost_cmd {
 	OST_REPLY      =  0,       /* reply ? */
 	OST_GETATTR    =  1,
 	OST_SETATTR    =  2,
@@ -1440,14 +1403,14 @@
 	OST_QUOTACTL   = 19,
 	OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
 	OST_LAST_OPC
-} ost_cmd_t;
+};
 #define OST_FIRST_OPC  OST_REPLY
 
 enum obdo_flags {
 	OBD_FL_INLINEDATA   = 0x00000001,
 	OBD_FL_OBDMDEXISTS  = 0x00000002,
 	OBD_FL_DELORPHAN    = 0x00000004, /* if set in o_flags delete orphans */
-	OBD_FL_NORPC	= 0x00000008, /* set in o_flags do in OSC not OST */
+	OBD_FL_NORPC	    = 0x00000008, /* set in o_flags do in OSC not OST */
 	OBD_FL_IDONLY       = 0x00000010, /* set in o_flags only adjust obj id*/
 	OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
 	OBD_FL_DEBUG_CHECK  = 0x00000040, /* echo client/server debug check */
@@ -1461,14 +1424,16 @@
 	OBD_FL_CKSUM_RSVD2  = 0x00008000, /* for future cksum types */
 	OBD_FL_CKSUM_RSVD3  = 0x00010000, /* for future cksum types */
 	OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
-	OBD_FL_MMAP	 = 0x00040000, /* object is mmapped on the client.
+	OBD_FL_MMAP	    = 0x00040000, /* object is mmapped on the client.
 					   * XXX: obsoleted - reserved for old
-					   * clients prior than 2.2 */
+					   * clients prior than 2.2
+					   */
 	OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
 	OBD_FL_NOSPC_BLK    = 0x00100000, /* no more block space on OST */
 
 	/* Note that while these checksum values are currently separate bits,
-	 * in 2.x we can actually allow all values from 1-31 if we wanted. */
+	 * in 2.x we can actually allow all values from 1-31 if we wanted.
+	 */
 	OBD_FL_CKSUM_ALL    = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
 			      OBD_FL_CKSUM_CRC32C,
 
@@ -1657,7 +1622,7 @@
 	}
 }
 
-#define OBD_MD_FLID	(0x00000001ULL) /* object ID */
+#define OBD_MD_FLID	   (0x00000001ULL) /* object ID */
 #define OBD_MD_FLATIME     (0x00000002ULL) /* access time */
 #define OBD_MD_FLMTIME     (0x00000004ULL) /* data modification time */
 #define OBD_MD_FLCTIME     (0x00000008ULL) /* change time */
@@ -1683,22 +1648,23 @@
 #define OBD_MD_FLGROUP     (0x01000000ULL) /* group */
 #define OBD_MD_FLFID       (0x02000000ULL) /* ->ost write inline fid */
 #define OBD_MD_FLEPOCH     (0x04000000ULL) /* ->ost write with ioepoch */
-					   /* ->mds if epoch opens or closes */
+					   /* ->mds if epoch opens or closes
+					    */
 #define OBD_MD_FLGRANT     (0x08000000ULL) /* ost preallocation space grant */
 #define OBD_MD_FLDIREA     (0x10000000ULL) /* dir's extended attribute data */
 #define OBD_MD_FLUSRQUOTA  (0x20000000ULL) /* over quota flags sent from ost */
 #define OBD_MD_FLGRPQUOTA  (0x40000000ULL) /* over quota flags sent from ost */
 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
 
-#define OBD_MD_MDS	 (0x0000000100000000ULL) /* where an inode lives on */
+#define OBD_MD_MDS	   (0x0000000100000000ULL) /* where an inode lives on */
 #define OBD_MD_REINT       (0x0000000200000000ULL) /* reintegrate oa */
-#define OBD_MD_MEA	 (0x0000000400000000ULL) /* CMD split EA  */
+#define OBD_MD_MEA	   (0x0000000400000000ULL) /* CMD split EA  */
 #define OBD_MD_TSTATE      (0x0000000800000000ULL) /* transient state field */
 
 #define OBD_MD_FLXATTR       (0x0000001000000000ULL) /* xattr */
 #define OBD_MD_FLXATTRLS     (0x0000002000000000ULL) /* xattr list */
 #define OBD_MD_FLXATTRRM     (0x0000004000000000ULL) /* xattr remove */
-#define OBD_MD_FLACL	 (0x0000008000000000ULL) /* ACL */
+#define OBD_MD_FLACL	     (0x0000008000000000ULL) /* ACL */
 #define OBD_MD_FLRMTPERM     (0x0000010000000000ULL) /* remote permission */
 #define OBD_MD_FLMDSCAPA     (0x0000020000000000ULL) /* MDS capability */
 #define OBD_MD_FLOSSCAPA     (0x0000040000000000ULL) /* OSS capability */
@@ -1707,7 +1673,8 @@
 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
 						      * under lock; for xattr
 						      * requests means the
-						      * client holds the lock */
+						      * client holds the lock
+						      */
 #define OBD_MD_FLOBJCOUNT    (0x0000400000000000ULL) /* for multiple destroy */
 
 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
@@ -1727,7 +1694,8 @@
 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
 
 /* don't forget obdo_fid which is way down at the bottom so it can
- * come after the definition of llog_cookie */
+ * come after the definition of llog_cookie
+ */
 
 enum hss_valid {
 	HSS_SETMASK	= 0x01,
@@ -1749,19 +1717,20 @@
 
 /* ost_body.data values for OST_BRW */
 
-#define OBD_BRW_READ	    0x01
-#define OBD_BRW_WRITE	   0x02
-#define OBD_BRW_RWMASK	  (OBD_BRW_READ | OBD_BRW_WRITE)
-#define OBD_BRW_SYNC	    0x08 /* this page is a part of synchronous
+#define OBD_BRW_READ		0x01
+#define OBD_BRW_WRITE		0x02
+#define OBD_BRW_RWMASK		(OBD_BRW_READ | OBD_BRW_WRITE)
+#define OBD_BRW_SYNC		0x08 /* this page is a part of synchronous
 				      * transfer and is not accounted in
-				      * the grant. */
-#define OBD_BRW_CHECK	   0x10
+				      * the grant.
+				      */
+#define OBD_BRW_CHECK		0x10
 #define OBD_BRW_FROM_GRANT      0x20 /* the osc manages this under llite */
-#define OBD_BRW_GRANTED	 0x40 /* the ost manages this */
-#define OBD_BRW_NOCACHE	 0x80 /* this page is a part of non-cached IO */
-#define OBD_BRW_NOQUOTA	0x100
-#define OBD_BRW_SRVLOCK	0x200 /* Client holds no lock over this page */
-#define OBD_BRW_ASYNC	  0x400 /* Server may delay commit to disk */
+#define OBD_BRW_GRANTED		0x40 /* the ost manages this */
+#define OBD_BRW_NOCACHE		0x80 /* this page is a part of non-cached IO */
+#define OBD_BRW_NOQUOTA	       0x100
+#define OBD_BRW_SRVLOCK	       0x200 /* Client holds no lock over this page */
+#define OBD_BRW_ASYNC	       0x400 /* Server may delay commit to disk */
 #define OBD_BRW_MEMALLOC       0x800 /* Client runs in the "kswapd" context */
 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
@@ -1775,7 +1744,8 @@
 	struct ost_id	ioo_oid;	/* object ID, if multi-obj BRW */
 	__u32		ioo_max_brw;	/* low 16 bits were o_mode before 2.4,
 					 * now (PTLRPC_BULK_OPS_COUNT - 1) in
-					 * high 16 bits in 2.4 and later */
+					 * high 16 bits in 2.4 and later
+					 */
 	__u32		ioo_bufcnt;	/* number of niobufs for this object */
 };
 
@@ -1799,7 +1769,8 @@
 /* lock value block communicated between the filter and llite */
 
 /* OST_LVB_ERR_INIT is needed because the return code in rc is
- * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
+ * negative, i.e. because ((MASK + rc) & MASK) != MASK.
+ */
 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
 #define OST_LVB_IS_ERR(blocks)					  \
@@ -1836,23 +1807,12 @@
  *   lquota data structures
  */
 
-#ifndef QUOTABLOCK_BITS
-#define QUOTABLOCK_BITS 10
-#endif
-
-#ifndef QUOTABLOCK_SIZE
-#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
-#endif
-
-#ifndef toqb
-#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
-#endif
-
 /* The lquota_id structure is an union of all the possible identifier types that
  * can be used with quota, this includes:
  * - 64-bit user ID
  * - 64-bit group ID
- * - a FID which can be used for per-directory quota in the future */
+ * - a FID which can be used for per-directory quota in the future
+ */
 union lquota_id {
 	struct lu_fid	qid_fid; /* FID for per-directory quota */
 	__u64		qid_uid; /* user identifier */
@@ -1889,89 +1849,6 @@
 	Q_COPY(out, in, qc_dqblk);	\
 } while (0)
 
-/* Body of quota request used for quota acquire/release RPCs between quota
- * master (aka QMT) and slaves (ak QSD). */
-struct quota_body {
-	struct lu_fid	qb_fid;     /* FID of global index packing the pool ID
-				      * and type (data or metadata) as well as
-				      * the quota type (user or group). */
-	union lquota_id	qb_id;      /* uid or gid or directory FID */
-	__u32		qb_flags;   /* see below */
-	__u32		qb_padding;
-	__u64		qb_count;   /* acquire/release count (kbytes/inodes) */
-	__u64		qb_usage;   /* current slave usage (kbytes/inodes) */
-	__u64		qb_slv_ver; /* slave index file version */
-	struct lustre_handle	qb_lockh;     /* per-ID lock handle */
-	struct lustre_handle	qb_glb_lockh; /* global lock handle */
-	__u64		qb_padding1[4];
-};
-
-/* When the quota_body is used in the reply of quota global intent
- * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
-#define qb_slv_fid	qb_fid
-/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
- * quota reply */
-#define qb_qunit	qb_usage
-
-#define QUOTA_DQACQ_FL_ACQ	0x1  /* acquire quota */
-#define QUOTA_DQACQ_FL_PREACQ	0x2  /* pre-acquire */
-#define QUOTA_DQACQ_FL_REL	0x4  /* release quota */
-#define QUOTA_DQACQ_FL_REPORT	0x8  /* report usage */
-
-void lustre_swab_quota_body(struct quota_body *b);
-
-/* Quota types currently supported */
-enum {
-	LQUOTA_TYPE_USR	= 0x00, /* maps to USRQUOTA */
-	LQUOTA_TYPE_GRP	= 0x01, /* maps to GRPQUOTA */
-	LQUOTA_TYPE_MAX
-};
-
-/* There are 2 different resource types on which a quota limit can be enforced:
- * - inodes on the MDTs
- * - blocks on the OSTs */
-enum {
-	LQUOTA_RES_MD		= 0x01, /* skip 0 to avoid null oid in FID */
-	LQUOTA_RES_DT		= 0x02,
-	LQUOTA_LAST_RES,
-	LQUOTA_FIRST_RES	= LQUOTA_RES_MD
-};
-
-#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
-
-/*
- * Space accounting support
- * Format of an accounting record, providing disk usage information for a given
- * user or group
- */
-struct lquota_acct_rec { /* 16 bytes */
-	__u64 bspace;  /* current space in use */
-	__u64 ispace;  /* current # inodes in use */
-};
-
-/*
- * Global quota index support
- * Format of a global record, providing global quota settings for a given quota
- * identifier
- */
-struct lquota_glb_rec { /* 32 bytes */
-	__u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
-	__u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
-	__u64 qbr_time;      /* grace time, in seconds */
-	__u64 qbr_granted;   /* how much is granted to slaves, in #inodes or
-			      * kbytes */
-};
-
-/*
- * Slave index support
- * Format of a slave record, recording how much space is granted to a given
- * slave
- */
-struct lquota_slv_rec { /* 8 bytes */
-	__u64 qsr_granted; /* space granted to the slave for the key=ID,
-			    * in #inodes or kbytes */
-};
-
 /* Data structures associated with the quota locks */
 
 /* Glimpse descriptor used for the index & per-ID quota locks */
@@ -1985,9 +1862,6 @@
 	__u64		gl_pad2;
 };
 
-#define gl_qunit	gl_hardlimit /* current qunit value used when
-				      * glimpsing per-ID quota locks */
-
 /* quota glimpse flags */
 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
 
@@ -2002,15 +1876,12 @@
 
 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
 
-/* LVB used with global quota lock */
-#define lvb_glb_ver  lvb_id_may_rel /* current version of the global index */
-
 /* op codes */
-typedef enum {
+enum quota_cmd {
 	QUOTA_DQACQ	= 601,
 	QUOTA_DQREL	= 602,
 	QUOTA_LAST_OPC
-} quota_cmd_t;
+};
 #define QUOTA_FIRST_OPC	QUOTA_DQACQ
 
 /*
@@ -2018,7 +1889,7 @@
  */
 
 /* opcodes */
-typedef enum {
+enum mds_cmd {
 	MDS_GETATTR		= 33,
 	MDS_GETATTR_NAME	= 34,
 	MDS_CLOSE		= 35,
@@ -2049,23 +1920,15 @@
 	MDS_HSM_CT_UNREGISTER	= 60,
 	MDS_SWAP_LAYOUTS	= 61,
 	MDS_LAST_OPC
-} mds_cmd_t;
+};
 
 #define MDS_FIRST_OPC    MDS_GETATTR
 
-/* opcodes for object update */
-typedef enum {
-	UPDATE_OBJ	= 1000,
-	UPDATE_LAST_OPC
-} update_cmd_t;
-
-#define UPDATE_FIRST_OPC    UPDATE_OBJ
-
 /*
  * Do not exceed 63
  */
 
-typedef enum {
+enum mdt_reint_cmd {
 	REINT_SETATTR  = 1,
 	REINT_CREATE   = 2,
 	REINT_LINK     = 3,
@@ -2074,9 +1937,9 @@
 	REINT_OPEN     = 6,
 	REINT_SETXATTR = 7,
 	REINT_RMENTRY  = 8,
-//      REINT_WRITE    = 9,
+/*      REINT_WRITE    = 9, */
 	REINT_MAX
-} mds_reint_t, mdt_reint_t;
+};
 
 void lustre_swab_generic_32s(__u32 *val);
 
@@ -2097,7 +1960,8 @@
 /* INODE LOCK PARTS */
 #define MDS_INODELOCK_LOOKUP 0x000001	/* For namespace, dentry etc, and also
 					 * was used to protect permission (mode,
-					 * owner, group etc) before 2.4. */
+					 * owner, group etc) before 2.4.
+					 */
 #define MDS_INODELOCK_UPDATE 0x000002	/* size, links, timestamps */
 #define MDS_INODELOCK_OPEN   0x000004	/* For opened files */
 #define MDS_INODELOCK_LAYOUT 0x000008	/* for layout */
@@ -2110,7 +1974,8 @@
  * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
  * For Remote directory, the master MDT, where the remote directory is, will
  * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
- * will grant LOOKUP_LOCK. */
+ * will grant LOOKUP_LOCK.
+ */
 #define MDS_INODELOCK_PERM   0x000010
 #define MDS_INODELOCK_XATTR  0x000020	/* extended attributes */
 
@@ -2120,7 +1985,8 @@
 
 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
  * but was moved into name[1] along with the OID to avoid consuming the
- * name[2,3] fields that need to be used for the quota id (also a FID). */
+ * name[2,3] fields that need to be used for the quota id (also a FID).
+ */
 enum {
 	LUSTRE_RES_ID_SEQ_OFF = 0,
 	LUSTRE_RES_ID_VER_OID_OFF = 1,
@@ -2156,7 +2022,8 @@
 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES   0x1
 
 /* these should be identical to their EXT4_*_FL counterparts, they are
- * redefined here only to avoid dragging in fs/ext4/ext4.h */
+ * redefined here only to avoid dragging in fs/ext4/ext4.h
+ */
 #define LUSTRE_SYNC_FL	 0x00000008 /* Synchronous updates */
 #define LUSTRE_IMMUTABLE_FL    0x00000010 /* Immutable file */
 #define LUSTRE_APPEND_FL       0x00000020 /* writes to file may only append */
@@ -2168,15 +2035,14 @@
  * protocol equivalents of LDISKFS_*_FL values stored on disk, while
  * the S_* flags are kernel-internal values that change between kernel
  * versions.  These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
- * See b=16526 for a full history. */
+ * See b=16526 for a full history.
+ */
 static inline int ll_ext_to_inode_flags(int flags)
 {
 	return (((flags & LUSTRE_SYNC_FL)      ? S_SYNC      : 0) |
 		((flags & LUSTRE_NOATIME_FL)   ? S_NOATIME   : 0) |
 		((flags & LUSTRE_APPEND_FL)    ? S_APPEND    : 0) |
-#if defined(S_DIRSYNC)
 		((flags & LUSTRE_DIRSYNC_FL)   ? S_DIRSYNC   : 0) |
-#endif
 		((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
 }
 
@@ -2185,9 +2051,7 @@
 	return (((iflags & S_SYNC)      ? LUSTRE_SYNC_FL      : 0) |
 		((iflags & S_NOATIME)   ? LUSTRE_NOATIME_FL   : 0) |
 		((iflags & S_APPEND)    ? LUSTRE_APPEND_FL    : 0) |
-#if defined(S_DIRSYNC)
 		((iflags & S_DIRSYNC)   ? LUSTRE_DIRSYNC_FL   : 0) |
-#endif
 		((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
 }
 
@@ -2207,9 +2071,10 @@
 	__s64	  ctime;
 	__u64	  blocks; /* XID, in the case of MDS_READPAGE */
 	__u64	  ioepoch;
-	__u64	       t_state; /* transient file state defined in
-				 * enum md_transient_state
-				 * was "ino" until 2.4.0 */
+	__u64	  t_state; /* transient file state defined in
+			    * enum md_transient_state
+			    * was "ino" until 2.4.0
+			    */
 	__u32	  fsuid;
 	__u32	  fsgid;
 	__u32	  capability;
@@ -2219,7 +2084,7 @@
 	__u32	  flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
 	__u32	  rdev;
 	__u32	  nlink; /* #bytes to read in the case of MDS_READPAGE */
-	__u32	       unused2; /* was "generation" until 2.4.0 */
+	__u32	  unused2; /* was "generation" until 2.4.0 */
 	__u32	  suppgid;
 	__u32	  eadatasize;
 	__u32	  aclsize;
@@ -2256,7 +2121,8 @@
 };
 
 /* inode access permission for remote user, the inode info are omitted,
- * for client knows them. */
+ * for client knows them.
+ */
 struct mdt_remote_perm {
 	__u32	   rp_uid;
 	__u32	   rp_gid;
@@ -2306,13 +2172,13 @@
  * since the client and MDS may run different kernels (see bug 13828)
  * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
  */
-#define MDS_ATTR_MODE	  0x1ULL /* = 1 */
-#define MDS_ATTR_UID	   0x2ULL /* = 2 */
-#define MDS_ATTR_GID	   0x4ULL /* = 4 */
-#define MDS_ATTR_SIZE	  0x8ULL /* = 8 */
-#define MDS_ATTR_ATIME	0x10ULL /* = 16 */
-#define MDS_ATTR_MTIME	0x20ULL /* = 32 */
-#define MDS_ATTR_CTIME	0x40ULL /* = 64 */
+#define MDS_ATTR_MODE	       0x1ULL /* = 1 */
+#define MDS_ATTR_UID	       0x2ULL /* = 2 */
+#define MDS_ATTR_GID	       0x4ULL /* = 4 */
+#define MDS_ATTR_SIZE	       0x8ULL /* = 8 */
+#define MDS_ATTR_ATIME	      0x10ULL /* = 16 */
+#define MDS_ATTR_MTIME	      0x20ULL /* = 32 */
+#define MDS_ATTR_CTIME	      0x40ULL /* = 64 */
 #define MDS_ATTR_ATIME_SET    0x80ULL /* = 128 */
 #define MDS_ATTR_MTIME_SET   0x100ULL /* = 256 */
 #define MDS_ATTR_FORCE       0x200ULL /* = 512, Not a change, but a change it */
@@ -2320,14 +2186,11 @@
 #define MDS_ATTR_KILL_SUID   0x800ULL /* = 2048 */
 #define MDS_ATTR_KILL_SGID  0x1000ULL /* = 4096 */
 #define MDS_ATTR_CTIME_SET  0x2000ULL /* = 8192 */
-#define MDS_ATTR_FROM_OPEN  0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
+#define MDS_ATTR_FROM_OPEN  0x4000ULL /* = 16384, called from open path,
+				       * ie O_TRUNC
+				       */
 #define MDS_ATTR_BLOCKS     0x8000ULL /* = 32768 */
 
-#ifndef FMODE_READ
-#define FMODE_READ	       00000001
-#define FMODE_WRITE	      00000002
-#endif
-
 #define MDS_FMODE_CLOSED	 00000000
 #define MDS_FMODE_EXEC	   00000004
 /* IO Epoch is opened on a closed file. */
@@ -2354,9 +2217,10 @@
 					   * We do not support JOIN FILE
 					   * anymore, reserve this flags
 					   * just for preventing such bit
-					   * to be reused. */
+					   * to be reused.
+					   */
 
-#define MDS_OPEN_LOCK	 04000000000 /* This open requires open lock */
+#define MDS_OPEN_LOCK	      04000000000 /* This open requires open lock */
 #define MDS_OPEN_HAS_EA      010000000000 /* specify object create pattern */
 #define MDS_OPEN_HAS_OBJS    020000000000 /* Just set the EA the obj exist */
 #define MDS_OPEN_NORESTORE  0100000000000ULL /* Do not restore file at open */
@@ -2409,7 +2273,8 @@
 	__u32	   cr_bias;
 	/* use of helpers set/get_mrc_cr_flags() is needed to access
 	 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
-	 * extend cr_flags size without breaking 1.8 compat */
+	 * extend cr_flags size without breaking 1.8 compat
+	 */
 	__u32	   cr_flags_l;     /* for use with open, low  32 bits  */
 	__u32	   cr_flags_h;     /* for use with open, high 32 bits */
 	__u32	   cr_umask;       /* umask for create */
@@ -2630,7 +2495,8 @@
 #define LOV_MAX_UUID_BUFFER_SIZE  8192
 /* The size of the buffer the lov/mdc reserves for the
  * array of UUIDs returned by the MDS.  With the current
- * protocol, this will limit the max number of OSTs per LOV */
+ * protocol, this will limit the max number of OSTs per LOV
+ */
 
 #define LOV_DESC_MAGIC 0xB0CCDE5C
 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5  /* Seconds */
@@ -2639,13 +2505,13 @@
 /* LOV settings descriptor (should only contain static info) */
 struct lov_desc {
 	__u32 ld_tgt_count;		/* how many OBD's */
-	__u32 ld_active_tgt_count;	 /* how many active */
-	__u32 ld_default_stripe_count;     /* how many objects are used */
-	__u32 ld_pattern;		  /* default PATTERN_RAID0 */
-	__u64 ld_default_stripe_size;      /* in bytes */
-	__u64 ld_default_stripe_offset;    /* in bytes */
+	__u32 ld_active_tgt_count;	/* how many active */
+	__u32 ld_default_stripe_count;  /* how many objects are used */
+	__u32 ld_pattern;		/* default PATTERN_RAID0 */
+	__u64 ld_default_stripe_size;   /* in bytes */
+	__u64 ld_default_stripe_offset; /* in bytes */
 	__u32 ld_padding_0;		/* unused */
-	__u32 ld_qos_maxage;	       /* in second */
+	__u32 ld_qos_maxage;		/* in second */
 	__u32 ld_padding_1;		/* also fix lustre_swab_lov_desc */
 	__u32 ld_padding_2;		/* also fix lustre_swab_lov_desc */
 	struct obd_uuid ld_uuid;
@@ -2659,7 +2525,7 @@
  *   LDLM requests:
  */
 /* opcodes -- MUST be distinct from OST/MDS opcodes */
-typedef enum {
+enum ldlm_cmd {
 	LDLM_ENQUEUE     = 101,
 	LDLM_CONVERT     = 102,
 	LDLM_CANCEL      = 103,
@@ -2668,7 +2534,7 @@
 	LDLM_GL_CALLBACK = 106,
 	LDLM_SET_INFO    = 107,
 	LDLM_LAST_OPC
-} ldlm_cmd_t;
+};
 #define LDLM_FIRST_OPC LDLM_ENQUEUE
 
 #define RES_NAME_SIZE 4
@@ -2687,7 +2553,7 @@
 }
 
 /* lock types */
-typedef enum {
+enum ldlm_mode {
 	LCK_MINMODE = 0,
 	LCK_EX      = 1,
 	LCK_PW      = 2,
@@ -2698,17 +2564,17 @@
 	LCK_GROUP   = 64,
 	LCK_COS     = 128,
 	LCK_MAXMODE
-} ldlm_mode_t;
+};
 
 #define LCK_MODE_NUM    8
 
-typedef enum {
+enum ldlm_type {
 	LDLM_PLAIN     = 10,
 	LDLM_EXTENT    = 11,
 	LDLM_FLOCK     = 12,
 	LDLM_IBITS     = 13,
 	LDLM_MAX_TYPE
-} ldlm_type_t;
+};
 
 #define LDLM_MIN_TYPE LDLM_PLAIN
 
@@ -2747,7 +2613,8 @@
  * the first fields of the ldlm_flock structure because there is only
  * one ldlm_swab routine to process the ldlm_policy_data_t union. if
  * this ever changes we will need to swab the union differently based
- * on the resource type. */
+ * on the resource type.
+ */
 
 typedef union {
 	struct ldlm_extent l_extent;
@@ -2768,15 +2635,15 @@
 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
 
 struct ldlm_resource_desc {
-	ldlm_type_t lr_type;
+	enum ldlm_type lr_type;
 	__u32 lr_padding;       /* also fix lustre_swab_ldlm_resource_desc */
 	struct ldlm_res_id lr_name;
 };
 
 struct ldlm_lock_desc {
 	struct ldlm_resource_desc l_resource;
-	ldlm_mode_t l_req_mode;
-	ldlm_mode_t l_granted_mode;
+	enum ldlm_mode l_req_mode;
+	enum ldlm_mode l_granted_mode;
 	ldlm_wire_policy_data_t l_policy_data;
 };
 
@@ -2793,7 +2660,8 @@
 void lustre_swab_ldlm_request(struct ldlm_request *rq);
 
 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
- * Otherwise, 2 are available. */
+ * Otherwise, 2 are available.
+ */
 #define ldlm_request_bufsize(count, type)				\
 ({								      \
 	int _avail = LDLM_LOCKREQ_HANDLES;			      \
@@ -2820,7 +2688,7 @@
 /*
  * Opcodes for mountconf (mgs and mgc)
  */
-typedef enum {
+enum mgs_cmd {
 	MGS_CONNECT = 250,
 	MGS_DISCONNECT,
 	MGS_EXCEPTION,	 /* node died, etc. */
@@ -2829,7 +2697,7 @@
 	MGS_SET_INFO,
 	MGS_CONFIG_READ,
 	MGS_LAST_OPC
-} mgs_cmd_t;
+};
 #define MGS_FIRST_OPC MGS_CONNECT
 
 #define MGS_PARAM_MAXLEN 1024
@@ -2918,13 +2786,13 @@
  * Opcodes for multiple servers.
  */
 
-typedef enum {
+enum obd_cmd {
 	OBD_PING = 400,
 	OBD_LOG_CANCEL,
 	OBD_QC_CALLBACK,
 	OBD_IDX_READ,
 	OBD_LAST_OPC
-} obd_cmd_t;
+};
 #define OBD_FIRST_OPC OBD_PING
 
 /* catalog of log objects */
@@ -2933,7 +2801,7 @@
 struct llog_logid {
 	struct ost_id		lgl_oi;
 	__u32		   lgl_ogen;
-} __attribute__((packed));
+} __packed;
 
 /** Records written to the CATALOGS list */
 #define CATLIST "CATALOGS"
@@ -2942,7 +2810,7 @@
 	__u32		   lci_padding1;
 	__u32		   lci_padding2;
 	__u32		   lci_padding3;
-} __attribute__((packed));
+} __packed;
 
 /* Log data record types - there is no specific reason that these need to
  * be related to the RPC opcodes, but no reason not to (may be handy later?)
@@ -2950,7 +2818,7 @@
 #define LLOG_OP_MAGIC 0x10600000
 #define LLOG_OP_MASK  0xfff00000
 
-typedef enum {
+enum llog_op_type {
 	LLOG_PAD_MAGIC		= LLOG_OP_MAGIC | 0x00000,
 	OST_SZ_REC		= LLOG_OP_MAGIC | 0x00f00,
 	/* OST_RAID1_REC	= LLOG_OP_MAGIC | 0x01000, never used */
@@ -2970,7 +2838,7 @@
 	HSM_AGENT_REC		= LLOG_OP_MAGIC | 0x80000,
 	LLOG_HDR_MAGIC		= LLOG_OP_MAGIC | 0x45539,
 	LLOG_LOGID_MAGIC	= LLOG_OP_MAGIC | 0x4553b,
-} llog_op_type;
+};
 
 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
 	(((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
@@ -3006,7 +2874,7 @@
 	__u64			lid_padding2;
 	__u64			lid_padding3;
 	struct llog_rec_tail	lid_tail;
-} __attribute__((packed));
+} __packed;
 
 struct llog_unlink_rec {
 	struct llog_rec_hdr	lur_hdr;
@@ -3014,7 +2882,7 @@
 	__u32			lur_oseq;
 	__u32			lur_count;
 	struct llog_rec_tail	lur_tail;
-} __attribute__((packed));
+} __packed;
 
 struct llog_unlink64_rec {
 	struct llog_rec_hdr	lur_hdr;
@@ -3024,7 +2892,7 @@
 	__u64			lur_padding2;
 	__u64			lur_padding3;
 	struct llog_rec_tail    lur_tail;
-} __attribute__((packed));
+} __packed;
 
 struct llog_setattr64_rec {
 	struct llog_rec_hdr	lsr_hdr;
@@ -3035,7 +2903,7 @@
 	__u32			lsr_gid_h;
 	__u64			lsr_padding;
 	struct llog_rec_tail    lsr_tail;
-} __attribute__((packed));
+} __packed;
 
 struct llog_size_change_rec {
 	struct llog_rec_hdr	lsc_hdr;
@@ -3045,16 +2913,7 @@
 	__u64			lsc_padding2;
 	__u64			lsc_padding3;
 	struct llog_rec_tail	lsc_tail;
-} __attribute__((packed));
-
-#define CHANGELOG_MAGIC 0xca103000
-
-/** \a changelog_rec_type's that can't be masked */
-#define CHANGELOG_MINMASK (1 << CL_MARK)
-/** bits covering all \a changelog_rec_type's */
-#define CHANGELOG_ALLMASK 0XFFFFFFFF
-/** default \a changelog_rec_type mask */
-#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
+} __packed;
 
 /* changelog llog name, needed by client replicators */
 #define CHANGELOG_CATALOG "changelog_catalog"
@@ -3062,22 +2921,20 @@
 struct changelog_setinfo {
 	__u64 cs_recno;
 	__u32 cs_id;
-} __attribute__((packed));
+} __packed;
 
 /** changelog record */
 struct llog_changelog_rec {
 	struct llog_rec_hdr  cr_hdr;
 	struct changelog_rec cr;
 	struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
+} __packed;
 
 struct llog_changelog_ext_rec {
 	struct llog_rec_hdr      cr_hdr;
 	struct changelog_ext_rec cr;
 	struct llog_rec_tail     cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
-
-#define CHANGELOG_USER_PREFIX "cl"
+} __packed;
 
 struct llog_changelog_user_rec {
 	struct llog_rec_hdr   cur_hdr;
@@ -3085,7 +2942,7 @@
 	__u32		 cur_padding;
 	__u64		 cur_endrec;
 	struct llog_rec_tail  cur_tail;
-} __attribute__((packed));
+} __packed;
 
 enum agent_req_status {
 	ARS_WAITING,
@@ -3123,21 +2980,22 @@
 	struct llog_rec_hdr	arr_hdr;	/**< record header */
 	__u32			arr_status;	/**< status of the request */
 						/* must match enum
-						 * agent_req_status */
+						 * agent_req_status
+						 */
 	__u32			arr_archive_id;	/**< backend archive number */
 	__u64			arr_flags;	/**< req flags */
-	__u64			arr_compound_id;	/**< compound cookie */
+	__u64			arr_compound_id;/**< compound cookie */
 	__u64			arr_req_create;	/**< req. creation time */
 	__u64			arr_req_change;	/**< req. status change time */
 	struct hsm_action_item	arr_hai;	/**< req. to the agent */
-	struct llog_rec_tail	arr_tail; /**< record tail for_sizezof_only */
-} __attribute__((packed));
+	struct llog_rec_tail	arr_tail;   /**< record tail for_sizezof_only */
+} __packed;
 
 /* Old llog gen for compatibility */
 struct llog_gen {
 	__u64 mnt_cnt;
 	__u64 conn_cnt;
-} __attribute__((packed));
+} __packed;
 
 struct llog_gen_rec {
 	struct llog_rec_hdr	lgr_hdr;
@@ -3175,19 +3033,21 @@
 	__u32		   llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
 	__u32		   llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
 	struct llog_rec_tail    llh_tail;
-} __attribute__((packed));
+} __packed;
 
 #define LLOG_BITMAP_SIZE(llh)  (__u32)((llh->llh_hdr.lrh_len -		\
 					llh->llh_bitmap_offset -	\
 					sizeof(llh->llh_tail)) * 8)
 
-/** log cookies are used to reference a specific log file and a record therein */
+/** log cookies are used to reference a specific log file and a record
+ * therein
+ */
 struct llog_cookie {
 	struct llog_logid       lgc_lgl;
 	__u32		   lgc_subsys;
 	__u32		   lgc_index;
 	__u32		   lgc_padding;
-} __attribute__((packed));
+} __packed;
 
 /** llog protocol */
 enum llogd_rpc_ops {
@@ -3196,7 +3056,7 @@
 	LLOG_ORIGIN_HANDLE_READ_HEADER  = 503,
 	LLOG_ORIGIN_HANDLE_WRITE_REC    = 504,
 	LLOG_ORIGIN_HANDLE_CLOSE	= 505,
-	LLOG_ORIGIN_CONNECT	     = 506,
+	LLOG_ORIGIN_CONNECT		= 506,
 	LLOG_CATINFO			= 507,  /* deprecated */
 	LLOG_ORIGIN_HANDLE_PREV_BLOCK   = 508,
 	LLOG_ORIGIN_HANDLE_DESTROY      = 509,  /* for destroy llog object*/
@@ -3212,13 +3072,13 @@
 	__u32 lgd_saved_index;
 	__u32 lgd_len;
 	__u64 lgd_cur_offset;
-} __attribute__((packed));
+} __packed;
 
 struct llogd_conn_body {
 	struct llog_gen	 lgdc_gen;
 	struct llog_logid       lgdc_logid;
 	__u32		   lgdc_ctxt_idx;
-} __attribute__((packed));
+} __packed;
 
 /* Note: 64-bit types are 64-bit aligned in structure */
 struct obdo {
@@ -3245,17 +3105,18 @@
 	__u64		   o_ioepoch;      /* epoch in ost writes */
 	__u32		   o_stripe_idx;   /* holds stripe idx */
 	__u32		   o_parent_ver;
-	struct lustre_handle    o_handle;       /* brw: lock handle to prolong
-						 * locks */
-	struct llog_cookie      o_lcookie;      /* destroy: unlink cookie from
-						 * MDS */
+	struct lustre_handle    o_handle;  /* brw: lock handle to prolong locks
+					    */
+	struct llog_cookie      o_lcookie; /* destroy: unlink cookie from MDS
+					    */
 	__u32			o_uid_h;
 	__u32			o_gid_h;
 
 	__u64			o_data_version; /* getattr: sum of iversion for
 						 * each stripe.
 						 * brw: grant space consumed on
-						 * the client for the write */
+						 * the client for the write
+						 */
 	__u64			o_padding_4;
 	__u64			o_padding_5;
 	__u64			o_padding_6;
@@ -3273,13 +3134,14 @@
 {
 	*wobdo = *lobdo;
 	wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
-	if (ocd == NULL)
+	if (!ocd)
 		return;
 
 	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
 	    fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
 		/* Currently OBD_FL_OSTID will only be used when 2.4 echo
-		 * client communicate with pre-2.4 server */
+		 * client communicate with pre-2.4 server
+		 */
 		wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
 		wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
 	}
@@ -3292,7 +3154,7 @@
 	__u32 local_flags = 0;
 
 	if (lobdo->o_valid & OBD_MD_FLFLAGS)
-		 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
+		local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
 
 	*lobdo = *wobdo;
 	if (local_flags != 0) {
@@ -3300,7 +3162,7 @@
 		lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
 		lobdo->o_flags |= local_flags;
 	}
-	if (ocd == NULL)
+	if (!ocd)
 		return;
 
 	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
@@ -3349,100 +3211,14 @@
 void dump_ost_body(struct ost_body *ob);
 void dump_rcs(__u32 *rc);
 
-#define IDX_INFO_MAGIC 0x3D37CC37
-
-/* Index file transfer through the network. The server serializes the index into
- * a byte stream which is sent to the client via a bulk transfer */
-struct idx_info {
-	__u32		ii_magic;
-
-	/* reply: see idx_info_flags below */
-	__u32		ii_flags;
-
-	/* request & reply: number of lu_idxpage (to be) transferred */
-	__u16		ii_count;
-	__u16		ii_pad0;
-
-	/* request: requested attributes passed down to the iterator API */
-	__u32		ii_attrs;
-
-	/* request & reply: index file identifier (FID) */
-	struct lu_fid	ii_fid;
-
-	/* reply: version of the index file before starting to walk the index.
-	 * Please note that the version can be modified at any time during the
-	 * transfer */
-	__u64		ii_version;
-
-	/* request: hash to start with:
-	 * reply: hash of the first entry of the first lu_idxpage and hash
-	 *	of the entry to read next if any */
-	__u64		ii_hash_start;
-	__u64		ii_hash_end;
-
-	/* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
-	 * set */
-	__u16		ii_keysize;
-
-	/* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
-	 * is set */
-	__u16		ii_recsize;
-
-	__u32		ii_pad1;
-	__u64		ii_pad2;
-	__u64		ii_pad3;
-};
-
-void lustre_swab_idx_info(struct idx_info *ii);
-
-#define II_END_OFF	MDS_DIR_END_OFF /* all entries have been read */
-
-/* List of flags used in idx_info::ii_flags */
-enum idx_info_flags {
-	II_FL_NOHASH	= 1 << 0, /* client doesn't care about hash value */
-	II_FL_VARKEY	= 1 << 1, /* keys can be of variable size */
-	II_FL_VARREC	= 1 << 2, /* records can be of variable size */
-	II_FL_NONUNQ	= 1 << 3, /* index supports non-unique keys */
-};
-
-#define LIP_MAGIC 0x8A6D6B6C
-
-/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
-struct lu_idxpage {
-	/* 16-byte header */
-	__u32	lip_magic;
-	__u16	lip_flags;
-	__u16	lip_nr;   /* number of entries in the container */
-	__u64	lip_pad0; /* additional padding for future use */
-
-	/* key/record pairs are stored in the remaining 4080 bytes.
-	 * depending upon the flags in idx_info::ii_flags, each key/record
-	 * pair might be preceded by:
-	 * - a hash value
-	 * - the key size (II_FL_VARKEY is set)
-	 * - the record size (II_FL_VARREC is set)
-	 *
-	 * For the time being, we only support fixed-size key & record. */
-	char	lip_entries[0];
-};
-
-#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
-
-/* Gather all possible type associated with a 4KB container */
-union lu_page {
-	struct lu_dirpage	lp_dir; /* for MDS_READPAGE */
-	struct lu_idxpage	lp_idx; /* for OBD_IDX_READ */
-	char			lp_array[LU_PAGE_SIZE];
-};
-
 /* security opcodes */
-typedef enum {
+enum sec_cmd {
 	SEC_CTX_INIT	    = 801,
 	SEC_CTX_INIT_CONT       = 802,
 	SEC_CTX_FINI	    = 803,
 	SEC_LAST_OPC,
 	SEC_FIRST_OPC	   = SEC_CTX_INIT
-} sec_cmd_t;
+};
 
 /*
  * capa related definitions
@@ -3451,7 +3227,8 @@
 #define CAPA_HMAC_KEY_MAX_LEN   56
 
 /* NB take care when changing the sequence of elements this struct,
- * because the offset info is used in find_capa() */
+ * because the offset info is used in find_capa()
+ */
 struct lustre_capa {
 	struct lu_fid   lc_fid;	 /** fid */
 	__u64	   lc_opc;	 /** operations allowed */
@@ -3463,7 +3240,7 @@
 /* FIXME: y2038 time_t overflow: */
 	__u32	   lc_expiry;      /** expiry time (sec) */
 	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
-} __attribute__((packed));
+} __packed;
 
 void lustre_swab_lustre_capa(struct lustre_capa *c);
 
@@ -3497,7 +3274,7 @@
 	__u32   lk_keyid;     /**< key# */
 	__u32   lk_padding;
 	__u8    lk_key[CAPA_HMAC_KEY_MAX_LEN];    /**< key */
-} __attribute__((packed));
+} __packed;
 
 /** The link ea holds 1 \a link_ea_entry for each hardlink */
 #define LINK_EA_MAGIC 0x11EAF1DFUL
@@ -3518,7 +3295,7 @@
 	unsigned char      lee_reclen[2];
 	unsigned char      lee_parent_fid[sizeof(struct lu_fid)];
 	char	       lee_name[0];
-} __attribute__((packed));
+} __packed;
 
 /** fid2path request/reply structure */
 struct getinfo_fid2path {
@@ -3527,7 +3304,7 @@
 	__u32	   gf_linkno;
 	__u32	   gf_pathlen;
 	char	    gf_path[0];
-} __attribute__((packed));
+} __packed;
 
 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
 
@@ -3558,7 +3335,7 @@
  */
 struct hsm_progress_kernel {
 	/* Field taken from struct hsm_progress */
-	lustre_fid		hpk_fid;
+	struct lu_fid		hpk_fid;
 	__u64			hpk_cookie;
 	struct hsm_extent	hpk_extent;
 	__u16			hpk_flags;
@@ -3567,7 +3344,7 @@
 	/* Additional fields */
 	__u64			hpk_data_version;
 	__u64			hpk_padding2;
-} __attribute__((packed));
+} __packed;
 
 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
 void lustre_swab_hsm_current_action(struct hsm_current_action *action);
@@ -3576,92 +3353,6 @@
 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
 void lustre_swab_hsm_request(struct hsm_request *hr);
 
-/**
- * These are object update opcode under UPDATE_OBJ, which is currently
- * being used by cross-ref operations between MDT.
- *
- * During the cross-ref operation, the Master MDT, which the client send the
- * request to, will disassembly the operation into object updates, then OSP
- * will send these updates to the remote MDT to be executed.
- *
- *   Update request format
- *   magic:  UPDATE_BUFFER_MAGIC_V1
- *   Count:  How many updates in the req.
- *   bufs[0] : following are packets of object.
- *   update[0]:
- *		type: object_update_op, the op code of update
- *		fid: The object fid of the update.
- *		lens/bufs: other parameters of the update.
- *   update[1]:
- *		type: object_update_op, the op code of update
- *		fid: The object fid of the update.
- *		lens/bufs: other parameters of the update.
- *   ..........
- *   update[7]:	type: object_update_op, the op code of update
- *		fid: The object fid of the update.
- *		lens/bufs: other parameters of the update.
- *   Current 8 maxim updates per object update request.
- *
- *******************************************************************
- *   update reply format:
- *
- *   ur_version: UPDATE_REPLY_V1
- *   ur_count:   The count of the reply, which is usually equal
- *		 to the number of updates in the request.
- *   ur_lens:    The reply lengths of each object update.
- *
- *   replies:    1st update reply  [4bytes_ret: other body]
- *		 2nd update reply  [4bytes_ret: other body]
- *		 .....
- *		 nth update reply  [4bytes_ret: other body]
- *
- *   For each reply of the update, the format would be
- *	 result(4 bytes):Other stuff
- */
-
-#define UPDATE_MAX_OPS		10
-#define UPDATE_BUFFER_MAGIC_V1	0xBDDE0001
-#define UPDATE_BUFFER_MAGIC	UPDATE_BUFFER_MAGIC_V1
-#define UPDATE_BUF_COUNT	8
-enum object_update_op {
-	OBJ_CREATE		= 1,
-	OBJ_DESTROY		= 2,
-	OBJ_REF_ADD		= 3,
-	OBJ_REF_DEL		= 4,
-	OBJ_ATTR_SET		= 5,
-	OBJ_ATTR_GET		= 6,
-	OBJ_XATTR_SET		= 7,
-	OBJ_XATTR_GET		= 8,
-	OBJ_INDEX_LOOKUP	= 9,
-	OBJ_INDEX_INSERT	= 10,
-	OBJ_INDEX_DELETE	= 11,
-	OBJ_LAST
-};
-
-struct update {
-	__u32		u_type;
-	__u32		u_batchid;
-	struct lu_fid	u_fid;
-	__u32		u_lens[UPDATE_BUF_COUNT];
-	__u32		u_bufs[0];
-};
-
-struct update_buf {
-	__u32	ub_magic;
-	__u32	ub_count;
-	__u32	ub_bufs[0];
-};
-
-#define UPDATE_REPLY_V1		0x00BD0001
-struct update_reply {
-	__u32	ur_version;
-	__u32	ur_count;
-	__u32	ur_lens[0];
-};
-
-void lustre_swab_update_buf(struct update_buf *ub);
-void lustre_swab_update_reply_buf(struct update_reply *ur);
-
 /** layout swap request structure
  * fid1 and fid2 are in mdt_body
  */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 2b4dd65..2e9f025 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -85,9 +85,8 @@
 	__u32	   os_namelen;
 	__u64	   os_maxbytes;
 	__u32	   os_state;       /**< obd_statfs_state OS_STATE_* flag */
-	__u32	   os_fprecreated;	/* objs available now to the caller */
-					/* used in QoS code to find preferred
-					 * OSTs */
+	__u32	   os_fprecreated; /* objs available now to the caller */
+				   /* used in QoS code to find preferred OSTs */
 	__u32	   os_spare2;
 	__u32	   os_spare3;
 	__u32	   os_spare4;
@@ -135,8 +134,9 @@
 
 /* Userspace should treat lu_fid as opaque, and only use the following methods
  * to print or parse them.  Other functions (e.g. compare, swab) could be moved
- * here from lustre_idl.h if needed. */
-typedef struct lu_fid lustre_fid;
+ * here from lustre_idl.h if needed.
+ */
+struct lu_fid;
 
 /**
  * Following struct for object attributes, that will be kept inode's EA.
@@ -266,7 +266,8 @@
 /* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
  * files, but are unlikely to be used in practice and are not harmful if
  * used incorrectly.  O_NOCTTY and FASYNC are only meaningful for character
- * devices and are safe for use on new files (See LU-812, LU-4209). */
+ * devices and are safe for use on new files (See LU-812, LU-4209).
+ */
 #define O_LOV_DELAY_CREATE	(O_NOCTTY | FASYNC)
 
 #define LL_FILE_IGNORE_LOCK     0x00000001
@@ -302,7 +303,8 @@
  * The limit of 12 pages is somewhat arbitrary, but is a reasonably large
  * allocation that is sufficient for the current generation of systems.
  *
- * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */
+ * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1)
+ */
 #define LOV_MAX_STRIPE_COUNT 2000  /* ((12 * 4096 - 256) / 24) */
 #define LOV_ALL_STRIPES       0xffff /* only valid for directories */
 #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
@@ -323,9 +325,11 @@
 	__u16 lmm_stripe_count;   /* num stripes in use for this object */
 	union {
 		__u16 lmm_stripe_offset;  /* starting stripe offset in
-					   * lmm_objects, use when writing */
+					   * lmm_objects, use when writing
+					   */
 		__u16 lmm_layout_gen;     /* layout generation number
-					   * used when reading */
+					   * used when reading
+					   */
 	};
 	struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
 } __attribute__((packed,  __may_alias__));
@@ -338,9 +342,11 @@
 	__u16 lmm_stripe_count;   /* num stripes in use for this object */
 	union {
 		__u16 lmm_stripe_offset;  /* starting stripe offset in
-					   * lmm_objects, use when writing */
+					   * lmm_objects, use when writing
+					   */
 		__u16 lmm_layout_gen;     /* layout generation number
-					   * used when reading */
+					   * used when reading
+					   */
 	};
 	char  lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */
 	struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
@@ -444,7 +450,8 @@
 {
 	if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
 		/* Obviously not safe, but for printfs, no real harm done...
-		   we're always null-terminated, even in a race. */
+		 * we're always null-terminated, even in a race.
+		 */
 		static char temp[sizeof(*uuid)];
 
 		memcpy(temp, uuid->uuid, sizeof(*uuid) - 1);
@@ -455,8 +462,9 @@
 }
 
 /* Extract fsname from uuid (or target name) of a target
-   e.g. (myfs-OST0007_UUID -> myfs)
-   see also deuuidify. */
+ * e.g. (myfs-OST0007_UUID -> myfs)
+ * see also deuuidify.
+ */
 static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
 {
 	char *p;
@@ -465,11 +473,12 @@
 	buf[buflen - 1] = '\0';
 	p = strrchr(buf, '-');
 	if (p)
-	   *p = '\0';
+		*p = '\0';
 }
 
 /* printf display format
-   e.g. printf("file FID is "DFID"\n", PFID(fid)); */
+ * e.g. printf("file FID is "DFID"\n", PFID(fid));
+ */
 #define FID_NOBRACE_LEN 40
 #define FID_LEN (FID_NOBRACE_LEN + 2)
 #define DFID_NOBRACE "%#llx:0x%x:0x%x"
@@ -480,7 +489,8 @@
 	(fid)->f_ver
 
 /* scanf input parse format -- strip '[' first.
-   e.g. sscanf(fidstr, SFID, RFID(&fid)); */
+ * e.g. sscanf(fidstr, SFID, RFID(&fid));
+ */
 #define SFID "0x%llx:0x%x:0x%x"
 #define RFID(fid)     \
 	&((fid)->f_seq), \
@@ -566,9 +576,9 @@
 /* hdr + MDT index */
 #define LUSTRE_VOLATILE_IDX	LUSTRE_VOLATILE_HDR":%.4X:"
 
-typedef enum lustre_quota_version {
+enum lustre_quota_version {
 	LUSTRE_QUOTA_V2 = 1
-} lustre_quota_version_t;
+};
 
 /* XXX: same as if_dqinfo struct in kernel */
 struct obd_dqinfo {
@@ -698,7 +708,8 @@
 #define CLF_HSM_LAST	15
 
 /* Remove bits higher than _h, then extract the value
- * between _h and _l by shifting lower weigth to bit 0. */
+ * between _h and _l by shifting lower weigth to bit 0.
+ */
 #define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \
 				   >> (CLF_HSM_LAST - _h + _l))
 
@@ -761,10 +772,10 @@
 	__u64		 cr_prev;  /**< last index for this target fid */
 	__u64		 cr_time;
 	union {
-		lustre_fid    cr_tfid;	/**< target fid */
+		struct lu_fid    cr_tfid;	/**< target fid */
 		__u32	 cr_markerflags; /**< CL_MARK flags */
 	};
-	lustre_fid	    cr_pfid;	/**< parent fid */
+	struct lu_fid	    cr_pfid;	/**< parent fid */
 	char		  cr_name[0];     /**< last element */
 } __packed;
 
@@ -775,18 +786,19 @@
 struct changelog_ext_rec {
 	__u16			cr_namelen;
 	__u16			cr_flags; /**< (flags & CLF_FLAGMASK) |
-						CLF_EXT_VERSION */
+					   *	CLF_EXT_VERSION
+					   */
 	__u32			cr_type;  /**< \a changelog_rec_type */
 	__u64			cr_index; /**< changelog record number */
 	__u64			cr_prev;  /**< last index for this target fid */
 	__u64			cr_time;
 	union {
-		lustre_fid	cr_tfid;	/**< target fid */
+		struct lu_fid	cr_tfid;	/**< target fid */
 		__u32		cr_markerflags; /**< CL_MARK flags */
 	};
-	lustre_fid		cr_pfid;	/**< target parent fid */
-	lustre_fid		cr_sfid;	/**< source fid, or zero */
-	lustre_fid		cr_spfid;       /**< source parent fid, or zero */
+	struct lu_fid		cr_pfid;	/**< target parent fid */
+	struct lu_fid		cr_sfid;	/**< source fid, or zero */
+	struct lu_fid		cr_spfid;     /**< source parent fid, or zero */
 	char			cr_name[0];     /**< last element */
 } __packed;
 
@@ -835,7 +847,8 @@
 };
 
 #define LL_DV_NOFLUSH 0x01   /* Do not take READ EXTENT LOCK before sampling
-				version. Dirty caches are left unchanged. */
+			      * version. Dirty caches are left unchanged.
+			      */
 
 #ifndef offsetof
 # define offsetof(typ, memb)     ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -976,8 +989,8 @@
 };
 
 struct hsm_user_item {
-       lustre_fid	hui_fid;
-       struct hsm_extent hui_extent;
+	struct lu_fid	hui_fid;
+	struct hsm_extent hui_extent;
 } __packed;
 
 struct hsm_user_request {
@@ -1046,8 +1059,8 @@
 struct hsm_action_item {
 	__u32      hai_len;     /* valid size of this struct */
 	__u32      hai_action;  /* hsm_copytool_action, but use known size */
-	lustre_fid hai_fid;     /* Lustre FID to operated on */
-	lustre_fid hai_dfid;    /* fid used for data access */
+	struct lu_fid hai_fid;     /* Lustre FID to operated on */
+	struct lu_fid hai_dfid;    /* fid used for data access */
 	struct hsm_extent hai_extent;  /* byte range to operate on */
 	__u64      hai_cookie;  /* action cookie from coordinator */
 	__u64      hai_gid;     /* grouplock id */
@@ -1095,7 +1108,8 @@
 	__u32 padding1;
 	char  hal_fsname[0];   /* null-terminated */
 	/* struct hsm_action_item[hal_count] follows, aligned on 8-byte
-	   boundaries. See hai_zero */
+	 * boundaries. See hai_zero
+	 */
 } __packed;
 
 #ifndef HAVE_CFS_SIZE_ROUND
@@ -1157,7 +1171,7 @@
 #define HP_FLAG_RETRY     0x02
 
 struct hsm_progress {
-	lustre_fid		hp_fid;
+	struct lu_fid		hp_fid;
 	__u64			hp_cookie;
 	struct hsm_extent	hp_extent;
 	__u16			hp_flags;
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index eb6b292..23ae832 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -55,7 +55,8 @@
 /** If the LCFG_REQUIRED bit is set in a configuration command,
  * then the client is required to understand this parameter
  * in order to mount the filesystem. If it does not understand
- * a REQUIRED command the client mount will fail. */
+ * a REQUIRED command the client mount will fail.
+ */
 #define LCFG_REQUIRED	 0x0001000
 
 enum lcfg_command_type {
@@ -87,9 +88,11 @@
 	LCFG_POOL_DEL	   = 0x00ce023, /**< destroy an ost pool name */
 	LCFG_SET_LDLM_TIMEOUT   = 0x00ce030, /**< set ldlm_timeout */
 	LCFG_PRE_CLEANUP	= 0x00cf031, /**< call type-specific pre
-					      * cleanup cleanup */
+					      * cleanup cleanup
+					      */
 	LCFG_SET_PARAM		= 0x00ce032, /**< use set_param syntax to set
-					      *a proc parameters */
+					      * a proc parameters
+					      */
 };
 
 struct lustre_cfg_bufs {
@@ -128,7 +131,7 @@
 {
 	if (index >= LUSTRE_CFG_MAX_BUFCOUNT)
 		return;
-	if (bufs == NULL)
+	if (!bufs)
 		return;
 
 	if (bufs->lcfg_bufcount <= index)
@@ -158,7 +161,6 @@
 	int offset;
 	int bufcount;
 
-	LASSERT (lcfg != NULL);
 	LASSERT (index >= 0);
 
 	bufcount = lcfg->lcfg_bufcount;
@@ -191,7 +193,7 @@
 		return NULL;
 
 	s = lustre_cfg_buf(lcfg, index);
-	if (s == NULL)
+	if (!s)
 		return NULL;
 
 	/*
@@ -252,10 +254,6 @@
 
 static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
 {
-	int len;
-
-	len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens);
-
 	kfree(lcfg);
 	return;
 }
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 7c6933f..95fd360 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -65,7 +65,8 @@
 /****************** mount command *********************/
 
 /* The lmd is only used internally by Lustre; mount simply passes
-   everything as string options */
+ * everything as string options
+ */
 
 #define LMD_MAGIC    0xbdacbd03
 #define LMD_PARAMS_MAXLEN	4096
@@ -79,23 +80,26 @@
 	int	lmd_recovery_time_soft;
 	int	lmd_recovery_time_hard;
 	char      *lmd_dev;	   /* device name */
-	char      *lmd_profile;       /* client only */
+	char      *lmd_profile;    /* client only */
 	char      *lmd_mgssec;	/* sptlrpc flavor to mgs */
-	char      *lmd_opts;	  /* lustre mount options (as opposed to
-					 _device_ mount options) */
+	char      *lmd_opts;	/* lustre mount options (as opposed to
+				 * _device_ mount options)
+				 */
 	char      *lmd_params;	/* lustre params */
-	__u32     *lmd_exclude;       /* array of OSTs to ignore */
-	char	*lmd_mgs;	   /* MGS nid */
-	char	*lmd_osd_type;      /* OSD type */
+	__u32     *lmd_exclude; /* array of OSTs to ignore */
+	char	*lmd_mgs;	/* MGS nid */
+	char	*lmd_osd_type;  /* OSD type */
 };
 
 #define LMD_FLG_SERVER		0x0001	/* Mounting a server */
 #define LMD_FLG_CLIENT		0x0002	/* Mounting a client */
 #define LMD_FLG_ABORT_RECOV	0x0008	/* Abort recovery */
 #define LMD_FLG_NOSVC		0x0010	/* Only start MGS/MGC for servers,
-					   no other services */
-#define LMD_FLG_NOMGS		0x0020	/* Only start target for servers, reusing
-					   existing MGS services */
+					 * no other services
+					 */
+#define LMD_FLG_NOMGS		0x0020	/* Only start target for servers,
+					 * reusing existing MGS services
+					 */
 #define LMD_FLG_WRITECONF	0x0040	/* Rewrite config log */
 #define LMD_FLG_NOIR		0x0080	/* NO imperative recovery */
 #define LMD_FLG_NOSCRUB		0x0100	/* Do not trigger scrub automatically */
@@ -116,231 +120,6 @@
 #define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */
 #define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */
 
-#define LR_SERVER_SIZE   512
-#define LR_CLIENT_START 8192
-#define LR_CLIENT_SIZE   128
-#if LR_CLIENT_START < LR_SERVER_SIZE
-#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE"
-#endif
-
-/*
- * This limit is arbitrary (131072 clients on x86), but it is convenient to use
- * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation.
- * If we need more than 131072 clients (order-2 allocation on x86) then this
- * should become an array of single-page pointers that are allocated on demand.
- */
-#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
-#define LR_MAX_CLIENTS (128 * 1024UL)
-#else
-#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
-#endif
-
-/** COMPAT_146: this is an OST (temporary) */
-#define OBD_COMPAT_OST	  0x00000002
-/** COMPAT_146: this is an MDT (temporary) */
-#define OBD_COMPAT_MDT	  0x00000004
-/** 2.0 server, interop flag to show server version is changed */
-#define OBD_COMPAT_20	   0x00000008
-
-/** MDS handles LOV_OBJID file */
-#define OBD_ROCOMPAT_LOVOBJID   0x00000001
-
-/** OST handles group subdirs */
-#define OBD_INCOMPAT_GROUPS     0x00000001
-/** this is an OST */
-#define OBD_INCOMPAT_OST	0x00000002
-/** this is an MDT */
-#define OBD_INCOMPAT_MDT	0x00000004
-/** common last_rvcd format */
-#define OBD_INCOMPAT_COMMON_LR  0x00000008
-/** FID is enabled */
-#define OBD_INCOMPAT_FID	0x00000010
-/** Size-on-MDS is enabled */
-#define OBD_INCOMPAT_SOM	0x00000020
-/** filesystem using iam format to store directory entries */
-#define OBD_INCOMPAT_IAM_DIR    0x00000040
-/** LMA attribute contains per-inode incompatible flags */
-#define OBD_INCOMPAT_LMA	0x00000080
-/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16
- * bits are now used to store a generation. Once we start changing the layout
- * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count
- * will be confused by interpreting stripe_count | gen << 16 as the actual
- * stripe count */
-#define OBD_INCOMPAT_LMM_VER    0x00000100
-/** multiple OI files for MDT */
-#define OBD_INCOMPAT_MULTI_OI   0x00000200
-
-/* Data stored per server at the head of the last_rcvd file.  In le32 order.
-   This should be common to filter_internal.h, lustre_mds.h */
-struct lr_server_data {
-	__u8  lsd_uuid[40];	/* server UUID */
-	__u64 lsd_last_transno;    /* last completed transaction ID */
-	__u64 lsd_compat14;	/* reserved - compat with old last_rcvd */
-	__u64 lsd_mount_count;     /* incarnation number */
-	__u32 lsd_feature_compat;  /* compatible feature flags */
-	__u32 lsd_feature_rocompat;/* read-only compatible feature flags */
-	__u32 lsd_feature_incompat;/* incompatible feature flags */
-	__u32 lsd_server_size;     /* size of server data area */
-	__u32 lsd_client_start;    /* start of per-client data area */
-	__u16 lsd_client_size;     /* size of per-client data area */
-	__u16 lsd_subdir_count;    /* number of subdirectories for objects */
-	__u64 lsd_catalog_oid;     /* recovery catalog object id */
-	__u32 lsd_catalog_ogen;    /* recovery catalog inode generation */
-	__u8  lsd_peeruuid[40];    /* UUID of MDS associated with this OST */
-	__u32 lsd_osd_index;       /* index number of OST in LOV */
-	__u32 lsd_padding1;	/* was lsd_mdt_index, unused in 2.4.0 */
-	__u32 lsd_start_epoch;     /* VBR: start epoch from last boot */
-	/** transaction values since lsd_trans_table_time */
-	__u64 lsd_trans_table[LR_EXPIRE_INTERVALS];
-	/** start point of transno table below */
-	__u32 lsd_trans_table_time; /* time of first slot in table above */
-	__u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */
-	__u8  lsd_padding[LR_SERVER_SIZE - 288];
-};
-
-/* Data stored per client in the last_rcvd file.  In le32 order. */
-struct lsd_client_data {
-	__u8  lcd_uuid[40];      /* client UUID */
-	__u64 lcd_last_transno; /* last completed transaction ID */
-	__u64 lcd_last_xid;     /* xid for the last transaction */
-	__u32 lcd_last_result;  /* result from last RPC */
-	__u32 lcd_last_data;    /* per-op data (disposition for open &c.) */
-	/* for MDS_CLOSE requests */
-	__u64 lcd_last_close_transno; /* last completed transaction ID */
-	__u64 lcd_last_close_xid;     /* xid for the last transaction */
-	__u32 lcd_last_close_result;  /* result from last RPC */
-	__u32 lcd_last_close_data;    /* per-op data */
-	/* VBR: last versions */
-	__u64 lcd_pre_versions[4];
-	__u32 lcd_last_epoch;
-	/** orphans handling for delayed export rely on that */
-	__u32 lcd_first_epoch;
-	__u8  lcd_padding[LR_CLIENT_SIZE - 128];
-};
-
-/* bug20354: the lcd_uuid for export of clients may be wrong */
-static inline void check_lcd(char *obd_name, int index,
-			     struct lsd_client_data *lcd)
-{
-	int length = sizeof(lcd->lcd_uuid);
-
-	if (strnlen((char *)lcd->lcd_uuid, length) == length) {
-		lcd->lcd_uuid[length - 1] = '\0';
-
-		LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n",
-			       lcd->lcd_uuid, obd_name, index);
-	}
-}
-
-/* last_rcvd handling */
-static inline void lsd_le_to_cpu(struct lr_server_data *buf,
-				 struct lr_server_data *lsd)
-{
-	int i;
-
-	memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid));
-	lsd->lsd_last_transno     = le64_to_cpu(buf->lsd_last_transno);
-	lsd->lsd_compat14	 = le64_to_cpu(buf->lsd_compat14);
-	lsd->lsd_mount_count      = le64_to_cpu(buf->lsd_mount_count);
-	lsd->lsd_feature_compat   = le32_to_cpu(buf->lsd_feature_compat);
-	lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat);
-	lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat);
-	lsd->lsd_server_size      = le32_to_cpu(buf->lsd_server_size);
-	lsd->lsd_client_start     = le32_to_cpu(buf->lsd_client_start);
-	lsd->lsd_client_size      = le16_to_cpu(buf->lsd_client_size);
-	lsd->lsd_subdir_count     = le16_to_cpu(buf->lsd_subdir_count);
-	lsd->lsd_catalog_oid      = le64_to_cpu(buf->lsd_catalog_oid);
-	lsd->lsd_catalog_ogen     = le32_to_cpu(buf->lsd_catalog_ogen);
-	memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid));
-	lsd->lsd_osd_index	= le32_to_cpu(buf->lsd_osd_index);
-	lsd->lsd_padding1	= le32_to_cpu(buf->lsd_padding1);
-	lsd->lsd_start_epoch      = le32_to_cpu(buf->lsd_start_epoch);
-	for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
-		lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]);
-	lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time);
-	lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals);
-}
-
-static inline void lsd_cpu_to_le(struct lr_server_data *lsd,
-				 struct lr_server_data *buf)
-{
-	int i;
-
-	memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid));
-	buf->lsd_last_transno     = cpu_to_le64(lsd->lsd_last_transno);
-	buf->lsd_compat14	 = cpu_to_le64(lsd->lsd_compat14);
-	buf->lsd_mount_count      = cpu_to_le64(lsd->lsd_mount_count);
-	buf->lsd_feature_compat   = cpu_to_le32(lsd->lsd_feature_compat);
-	buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat);
-	buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat);
-	buf->lsd_server_size      = cpu_to_le32(lsd->lsd_server_size);
-	buf->lsd_client_start     = cpu_to_le32(lsd->lsd_client_start);
-	buf->lsd_client_size      = cpu_to_le16(lsd->lsd_client_size);
-	buf->lsd_subdir_count     = cpu_to_le16(lsd->lsd_subdir_count);
-	buf->lsd_catalog_oid      = cpu_to_le64(lsd->lsd_catalog_oid);
-	buf->lsd_catalog_ogen     = cpu_to_le32(lsd->lsd_catalog_ogen);
-	memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid));
-	buf->lsd_osd_index	  = cpu_to_le32(lsd->lsd_osd_index);
-	buf->lsd_padding1	  = cpu_to_le32(lsd->lsd_padding1);
-	buf->lsd_start_epoch      = cpu_to_le32(lsd->lsd_start_epoch);
-	for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
-		buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]);
-	buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time);
-	buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals);
-}
-
-static inline void lcd_le_to_cpu(struct lsd_client_data *buf,
-				 struct lsd_client_data *lcd)
-{
-	memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid));
-	lcd->lcd_last_transno       = le64_to_cpu(buf->lcd_last_transno);
-	lcd->lcd_last_xid	   = le64_to_cpu(buf->lcd_last_xid);
-	lcd->lcd_last_result	= le32_to_cpu(buf->lcd_last_result);
-	lcd->lcd_last_data	  = le32_to_cpu(buf->lcd_last_data);
-	lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno);
-	lcd->lcd_last_close_xid     = le64_to_cpu(buf->lcd_last_close_xid);
-	lcd->lcd_last_close_result  = le32_to_cpu(buf->lcd_last_close_result);
-	lcd->lcd_last_close_data    = le32_to_cpu(buf->lcd_last_close_data);
-	lcd->lcd_pre_versions[0]    = le64_to_cpu(buf->lcd_pre_versions[0]);
-	lcd->lcd_pre_versions[1]    = le64_to_cpu(buf->lcd_pre_versions[1]);
-	lcd->lcd_pre_versions[2]    = le64_to_cpu(buf->lcd_pre_versions[2]);
-	lcd->lcd_pre_versions[3]    = le64_to_cpu(buf->lcd_pre_versions[3]);
-	lcd->lcd_last_epoch	 = le32_to_cpu(buf->lcd_last_epoch);
-	lcd->lcd_first_epoch	= le32_to_cpu(buf->lcd_first_epoch);
-}
-
-static inline void lcd_cpu_to_le(struct lsd_client_data *lcd,
-				 struct lsd_client_data *buf)
-{
-	memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid));
-	buf->lcd_last_transno       = cpu_to_le64(lcd->lcd_last_transno);
-	buf->lcd_last_xid	   = cpu_to_le64(lcd->lcd_last_xid);
-	buf->lcd_last_result	= cpu_to_le32(lcd->lcd_last_result);
-	buf->lcd_last_data	  = cpu_to_le32(lcd->lcd_last_data);
-	buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno);
-	buf->lcd_last_close_xid     = cpu_to_le64(lcd->lcd_last_close_xid);
-	buf->lcd_last_close_result  = cpu_to_le32(lcd->lcd_last_close_result);
-	buf->lcd_last_close_data    = cpu_to_le32(lcd->lcd_last_close_data);
-	buf->lcd_pre_versions[0]    = cpu_to_le64(lcd->lcd_pre_versions[0]);
-	buf->lcd_pre_versions[1]    = cpu_to_le64(lcd->lcd_pre_versions[1]);
-	buf->lcd_pre_versions[2]    = cpu_to_le64(lcd->lcd_pre_versions[2]);
-	buf->lcd_pre_versions[3]    = cpu_to_le64(lcd->lcd_pre_versions[3]);
-	buf->lcd_last_epoch	 = cpu_to_le32(lcd->lcd_last_epoch);
-	buf->lcd_first_epoch	= cpu_to_le32(lcd->lcd_first_epoch);
-}
-
-static inline __u64 lcd_last_transno(struct lsd_client_data *lcd)
-{
-	return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ?
-		lcd->lcd_last_transno : lcd->lcd_last_close_transno);
-}
-
-static inline __u64 lcd_last_xid(struct lsd_client_data *lcd)
-{
-	return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ?
-		lcd->lcd_last_xid : lcd->lcd_last_close_xid);
-}
-
 /****************** superblock additional info *********************/
 
 struct ll_sb_info;
@@ -360,7 +139,8 @@
 	char			  lsi_osd_type[16];
 	char			  lsi_fstype[16];
 	struct backing_dev_info   lsi_bdi;     /* each client mountpoint needs
-						  own backing_dev_info */
+						* own backing_dev_info
+						*/
 };
 
 #define LSI_UMOUNT_FAILOVER	      0x00200000
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 9b319f1..144b5af 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -69,7 +69,7 @@
 /**
  * LDLM non-error return states
  */
-typedef enum {
+enum ldlm_error {
 	ELDLM_OK = 0,
 
 	ELDLM_LOCK_CHANGED = 300,
@@ -80,7 +80,7 @@
 
 	ELDLM_NAMESPACE_EXISTS = 400,
 	ELDLM_BAD_NAMESPACE    = 401
-} ldlm_error_t;
+};
 
 /**
  * LDLM namespace type.
@@ -145,14 +145,15 @@
 #define LCK_COMPAT_COS (LCK_COS)
 /** @} Lock Compatibility Matrix */
 
-extern ldlm_mode_t lck_compat_array[];
+extern enum ldlm_mode lck_compat_array[];
 
-static inline void lockmode_verify(ldlm_mode_t mode)
+static inline void lockmode_verify(enum ldlm_mode mode)
 {
        LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
 }
 
-static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
+static inline int lockmode_compat(enum ldlm_mode exist_mode,
+				  enum ldlm_mode new_mode)
 {
        return (lck_compat_array[exist_mode] & new_mode);
 }
@@ -249,7 +250,8 @@
 	/** Current biggest client lock volume. Protected by pl_lock. */
 	__u64			pl_client_lock_volume;
 	/** Lock volume factor. SLV on client is calculated as following:
-	 *  server_slv * lock_volume_factor. */
+	 *  server_slv * lock_volume_factor.
+	 */
 	atomic_t		pl_lock_volume_factor;
 	/** Time when last SLV from server was obtained. */
 	time64_t		pl_recalc_time;
@@ -295,10 +297,10 @@
  * LDLM pools related, type of lock pool in the namespace.
  * Greedy means release cached locks aggressively
  */
-typedef enum {
+enum ldlm_appetite {
 	LDLM_NAMESPACE_GREEDY = 1 << 0,
 	LDLM_NAMESPACE_MODEST = 1 << 1
-} ldlm_appetite_t;
+};
 
 struct ldlm_ns_bucket {
 	/** back pointer to namespace */
@@ -317,7 +319,7 @@
 	LDLM_NSS_LAST
 };
 
-typedef enum {
+enum ldlm_ns_type {
 	/** invalid type */
 	LDLM_NS_TYPE_UNKNOWN    = 0,
 	/** mdc namespace */
@@ -332,7 +334,7 @@
 	LDLM_NS_TYPE_MGC,
 	/** mgs namespace */
 	LDLM_NS_TYPE_MGT,
-} ldlm_ns_type_t;
+};
 
 /**
  * LDLM Namespace.
@@ -373,7 +375,7 @@
 
 	/**
 	 * Namespace connect flags supported by server (may be changed via
-	 * /proc, LRU resize may be disabled/enabled).
+	 * sysfs, LRU resize may be disabled/enabled).
 	 */
 	__u64			ns_connect_flags;
 
@@ -439,7 +441,7 @@
 	/** LDLM pool structure for this namespace */
 	struct ldlm_pool	ns_pool;
 	/** Definition of how eagerly unused locks will be released from LRU */
-	ldlm_appetite_t		ns_appetite;
+	enum ldlm_appetite	ns_appetite;
 
 	/** Limit of parallel AST RPC count. */
 	unsigned		ns_max_parallel_ast;
@@ -465,7 +467,6 @@
  */
 static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
 {
-	LASSERT(ns != NULL);
 	return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
 }
 
@@ -474,14 +475,12 @@
  */
 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
 {
-	LASSERT(ns != NULL);
 	return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
 }
 
 static inline void ns_register_cancel(struct ldlm_namespace *ns,
 				      ldlm_cancel_for_recovery arg)
 {
-	LASSERT(ns != NULL);
 	ns->ns_cancel_for_recovery = arg;
 }
 
@@ -503,7 +502,8 @@
 	struct list_head		 gl_list; /* linkage to other gl work structs */
 	__u32			 gl_flags;/* see LDLM_GL_WORK_* below */
 	union ldlm_gl_desc	*gl_desc; /* glimpse descriptor to be packed in
-					   * glimpse callback request */
+					   * glimpse callback request
+					   */
 };
 
 /** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
@@ -512,8 +512,9 @@
 /** Interval node data for each LDLM_EXTENT lock. */
 struct ldlm_interval {
 	struct interval_node	li_node;  /* node for tree management */
-	struct list_head		li_group; /* the locks which have the same
-					   * policy - group of the policy */
+	struct list_head	li_group; /* the locks which have the same
+					   * policy - group of the policy
+					   */
 };
 
 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
@@ -527,7 +528,7 @@
 struct ldlm_interval_tree {
 	/** Tree size. */
 	int			lit_size;
-	ldlm_mode_t		lit_mode;  /* lock mode */
+	enum ldlm_mode		lit_mode;  /* lock mode */
 	struct interval_node	*lit_root; /* actual ldlm_interval */
 };
 
@@ -535,12 +536,13 @@
 #define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
 
 /** Cancel flags. */
-typedef enum {
+enum ldlm_cancel_flags {
 	LCF_ASYNC      = 0x1, /* Cancel locks asynchronously. */
 	LCF_LOCAL      = 0x2, /* Cancel locks locally, not notifing server */
 	LCF_BL_AST     = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
-			       * in the same RPC */
-} ldlm_cancel_flags_t;
+			       * in the same RPC
+			       */
+};
 
 struct ldlm_flock {
 	__u64 start;
@@ -559,7 +561,7 @@
 	struct ldlm_inodebits l_inodebits;
 } ldlm_policy_data_t;
 
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
 				  const ldlm_wire_policy_data_t *wpolicy,
 				  ldlm_policy_data_t *lpolicy);
 
@@ -637,11 +639,11 @@
 	 * Requested mode.
 	 * Protected by lr_lock.
 	 */
-	ldlm_mode_t		l_req_mode;
+	enum ldlm_mode		l_req_mode;
 	/**
 	 * Granted mode, also protected by lr_lock.
 	 */
-	ldlm_mode_t		l_granted_mode;
+	enum ldlm_mode		l_granted_mode;
 	/** Lock completion handler pointer. Called when lock is granted. */
 	ldlm_completion_callback l_completion_ast;
 	/**
@@ -841,20 +843,19 @@
 
 	/**
 	 * protected by lr_lock
-	 * @{ */
+	 * @{
+	 */
 	/** List of locks in granted state */
 	struct list_head		lr_granted;
 	/**
 	 * List of locks that could not be granted due to conflicts and
-	 * that are waiting for conflicts to go away */
+	 * that are waiting for conflicts to go away
+	 */
 	struct list_head		lr_waiting;
 	/** @} */
 
-	/* XXX No longer needed? Remove ASAP */
-	ldlm_mode_t		lr_most_restr;
-
 	/** Type of locks this resource can hold. Only one type per resource. */
-	ldlm_type_t		lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
+	enum ldlm_type		lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
 
 	/** Resource name */
 	struct ldlm_res_id	lr_name;
@@ -921,7 +922,7 @@
 {
 	struct ldlm_namespace *ns = ldlm_res_to_ns(res);
 
-	if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
+	if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init)
 		return ns->ns_lvbo->lvbo_init(res);
 
 	return 0;
@@ -931,7 +932,7 @@
 {
 	struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
-	if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
+	if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size)
 		return ns->ns_lvbo->lvbo_size(lock);
 
 	return 0;
@@ -941,10 +942,9 @@
 {
 	struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
-	if (ns->ns_lvbo != NULL) {
-		LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
+	if (ns->ns_lvbo)
 		return ns->ns_lvbo->lvbo_fill(lock, buf, len);
-	}
+
 	return 0;
 }
 
@@ -1015,7 +1015,7 @@
 
 /** Non-rate-limited lock printing function for debugging purposes. */
 #define LDLM_DEBUG(lock, fmt, a...)   do {				  \
-	if (likely(lock != NULL)) {					    \
+	if (likely(lock)) {						    \
 		LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL);      \
 		ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock,	    \
 				"### " fmt, ##a);			    \
@@ -1025,7 +1025,7 @@
 } while (0)
 
 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
-				      int first_enq, ldlm_error_t *err,
+				      int first_enq, enum ldlm_error *err,
 				      struct list_head *work_list);
 
 /**
@@ -1042,7 +1042,8 @@
  *
  * LDLM provides for a way to iterate through every lock on a resource or
  * namespace or every resource in a namespace.
- * @{ */
+ * @{
+ */
 int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
 			  ldlm_iterator_t iter, void *data);
 /** @} ldlm_iterator */
@@ -1091,7 +1092,7 @@
 	struct ldlm_lock *lock;
 
 	lock = __ldlm_handle2lock(h, flags);
-	if (lock != NULL)
+	if (lock)
 		LDLM_LOCK_REF_DEL(lock);
 	return lock;
 }
@@ -1111,7 +1112,7 @@
 	return 0;
 }
 
-int ldlm_error2errno(ldlm_error_t error);
+int ldlm_error2errno(enum ldlm_error error);
 
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
 void ldlm_dump_export_locks(struct obd_export *exp);
@@ -1168,12 +1169,13 @@
 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
 void ldlm_lock_allow_match(struct ldlm_lock *lock);
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
-			    const struct ldlm_res_id *, ldlm_type_t type,
-			    ldlm_policy_data_t *, ldlm_mode_t mode,
-			    struct lustre_handle *, int unref);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
-					__u64 *bits);
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+			       const struct ldlm_res_id *,
+			       enum ldlm_type type, ldlm_policy_data_t *,
+			       enum ldlm_mode mode, struct lustre_handle *,
+			       int unref);
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+					   __u64 *bits);
 void ldlm_lock_cancel(struct ldlm_lock *lock);
 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
@@ -1181,8 +1183,8 @@
 /* resource.c */
 struct ldlm_namespace *
 ldlm_namespace_new(struct obd_device *obd, char *name,
-		   ldlm_side_t client, ldlm_appetite_t apt,
-		   ldlm_ns_type_t ns_type);
+		   ldlm_side_t client, enum ldlm_appetite apt,
+		   enum ldlm_ns_type ns_type);
 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
 void ldlm_namespace_get(struct ldlm_namespace *ns);
 void ldlm_namespace_put(struct ldlm_namespace *ns);
@@ -1193,7 +1195,7 @@
 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
 					struct ldlm_resource *parent,
 					const struct ldlm_res_id *,
-					ldlm_type_t type, int create);
+					enum ldlm_type type, int create);
 int ldlm_resource_putref(struct ldlm_resource *res);
 void ldlm_resource_add_lock(struct ldlm_resource *res,
 			    struct list_head *head,
@@ -1219,7 +1221,8 @@
  * These AST handlers are typically used for server-side local locks and are
  * also used by client-side lock handlers to perform minimum level base
  * processing.
- * @{ */
+ * @{
+ */
 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
 /** @} ldlm_local_ast */
@@ -1227,7 +1230,8 @@
 /** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
  * These are typically used by client and server (*_local versions)
  * to obtain and release locks.
- * @{ */
+ * @{
+ */
 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
 		     struct ldlm_enqueue_info *einfo,
 		     const struct ldlm_res_id *res_id,
@@ -1244,29 +1248,32 @@
 		      struct list_head *cancels, int count);
 
 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
-			  ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
+			  enum ldlm_type type, __u8 with_policy,
+			  enum ldlm_mode mode,
 			  __u64 *flags, void *lvb, __u32 lvb_len,
 			  struct lustre_handle *lockh, int rc);
 int ldlm_cli_update_pool(struct ptlrpc_request *req);
 int ldlm_cli_cancel(struct lustre_handle *lockh,
-		    ldlm_cancel_flags_t cancel_flags);
+		    enum ldlm_cancel_flags cancel_flags);
 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
-			   ldlm_cancel_flags_t flags, void *opaque);
+			   enum ldlm_cancel_flags flags, void *opaque);
 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
 				    const struct ldlm_res_id *res_id,
 				    ldlm_policy_data_t *policy,
-				    ldlm_mode_t mode,
-				    ldlm_cancel_flags_t flags,
+				    enum ldlm_mode mode,
+				    enum ldlm_cancel_flags flags,
 				    void *opaque);
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
 			       struct list_head *cancels,
 			       ldlm_policy_data_t *policy,
-			       ldlm_mode_t mode, __u64 lock_flags,
-			       ldlm_cancel_flags_t cancel_flags, void *opaque);
+			       enum ldlm_mode mode, __u64 lock_flags,
+			       enum ldlm_cancel_flags cancel_flags,
+			       void *opaque);
 int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
-			       ldlm_cancel_flags_t flags);
+			       enum ldlm_cancel_flags flags);
 int ldlm_cli_cancel_list(struct list_head *head, int count,
-			 struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
+			 struct ptlrpc_request *req,
+			 enum ldlm_cancel_flags flags);
 /** @} ldlm_cli_api */
 
 /* mds/handler.c */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 0d3ed87..7f2ba2f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -57,7 +57,8 @@
 
 /**
  * Server placed lock on granted list, or a recovering client wants the
- * lock added to the granted list, no questions asked. */
+ * lock added to the granted list, no questions asked.
+ */
 #define LDLM_FL_BLOCK_GRANTED           0x0000000000000002ULL /* bit 1 */
 #define ldlm_is_block_granted(_l)       LDLM_TEST_FLAG((_l), 1ULL <<  1)
 #define ldlm_set_block_granted(_l)      LDLM_SET_FLAG((_l), 1ULL <<  1)
@@ -65,7 +66,8 @@
 
 /**
  * Server placed lock on conv list, or a recovering client wants the lock
- * added to the conv list, no questions asked. */
+ * added to the conv list, no questions asked.
+ */
 #define LDLM_FL_BLOCK_CONV              0x0000000000000004ULL /* bit 2 */
 #define ldlm_is_block_conv(_l)          LDLM_TEST_FLAG((_l), 1ULL <<  2)
 #define ldlm_set_block_conv(_l)         LDLM_SET_FLAG((_l), 1ULL <<  2)
@@ -73,7 +75,8 @@
 
 /**
  * Server placed lock on wait list, or a recovering client wants the lock
- * added to the wait list, no questions asked. */
+ * added to the wait list, no questions asked.
+ */
 #define LDLM_FL_BLOCK_WAIT              0x0000000000000008ULL /* bit 3 */
 #define ldlm_is_block_wait(_l)          LDLM_TEST_FLAG((_l), 1ULL <<  3)
 #define ldlm_set_block_wait(_l)         LDLM_SET_FLAG((_l), 1ULL <<  3)
@@ -87,7 +90,8 @@
 
 /**
  * Lock is being replayed.  This could probably be implied by the fact that
- * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
+ * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.
+ */
 #define LDLM_FL_REPLAY                  0x0000000000000100ULL /* bit 8 */
 #define ldlm_is_replay(_l)              LDLM_TEST_FLAG((_l), 1ULL <<  8)
 #define ldlm_set_replay(_l)             LDLM_SET_FLAG((_l), 1ULL <<  8)
@@ -125,7 +129,8 @@
 
 /**
  * Server told not to wait if blocked. For AGL, OST will not send glimpse
- * callback. */
+ * callback.
+ */
 #define LDLM_FL_BLOCK_NOWAIT            0x0000000000040000ULL /* bit 18 */
 #define ldlm_is_block_nowait(_l)        LDLM_TEST_FLAG((_l), 1ULL << 18)
 #define ldlm_set_block_nowait(_l)       LDLM_SET_FLAG((_l), 1ULL << 18)
@@ -141,7 +146,8 @@
  * Immediately cancel such locks when they block some other locks. Send
  * cancel notification to original lock holder, but expect no reply. This
  * is for clients (like liblustre) that cannot be expected to reliably
- * response to blocking AST. */
+ * response to blocking AST.
+ */
 #define LDLM_FL_CANCEL_ON_BLOCK         0x0000000000800000ULL /* bit 23 */
 #define ldlm_is_cancel_on_block(_l)     LDLM_TEST_FLAG((_l), 1ULL << 23)
 #define ldlm_set_cancel_on_block(_l)    LDLM_SET_FLAG((_l), 1ULL << 23)
@@ -164,7 +170,8 @@
 
 /**
  * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
- * + race with upcoming bl_ast. */
+ * + race with upcoming bl_ast.
+ */
 #define LDLM_FL_FAIL_LOC                0x0000000100000000ULL /* bit 32 */
 #define ldlm_is_fail_loc(_l)            LDLM_TEST_FLAG((_l), 1ULL << 32)
 #define ldlm_set_fail_loc(_l)           LDLM_SET_FLAG((_l), 1ULL << 32)
@@ -172,7 +179,8 @@
 
 /**
  * Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it. */
+ * handled this lock and decided to skip it.
+ */
 #define LDLM_FL_SKIPPED                 0x0000000200000000ULL /* bit 33 */
 #define ldlm_is_skipped(_l)             LDLM_TEST_FLAG((_l), 1ULL << 33)
 #define ldlm_set_skipped(_l)            LDLM_SET_FLAG((_l), 1ULL << 33)
@@ -231,7 +239,8 @@
  * The proper fix is to do the granting inside of the completion AST,
  * which can be replaced with a LVB-aware wrapping function for OSC locks.
  * That change is pretty high-risk, though, and would need a lot more
- * testing. */
+ * testing.
+ */
 #define LDLM_FL_LVB_READY               0x0000020000000000ULL /* bit 41 */
 #define ldlm_is_lvb_ready(_l)           LDLM_TEST_FLAG((_l), 1ULL << 41)
 #define ldlm_set_lvb_ready(_l)          LDLM_SET_FLAG((_l), 1ULL << 41)
@@ -243,7 +252,8 @@
  * dirty pages.  It can remain on the granted list during this whole time.
  * Threads racing to update the KMS after performing their writeback need
  * to know to exclude each other's locks from the calculation as they walk
- * the granted list. */
+ * the granted list.
+ */
 #define LDLM_FL_KMS_IGNORE              0x0000040000000000ULL /* bit 42 */
 #define ldlm_is_kms_ignore(_l)          LDLM_TEST_FLAG((_l), 1ULL << 42)
 #define ldlm_set_kms_ignore(_l)         LDLM_SET_FLAG((_l), 1ULL << 42)
@@ -263,7 +273,8 @@
 
 /**
  * optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate */
+ * w/o involving separate thread. in order to decrease cs rate
+ */
 #define LDLM_FL_ATOMIC_CB               0x0000200000000000ULL /* bit 45 */
 #define ldlm_is_atomic_cb(_l)           LDLM_TEST_FLAG((_l), 1ULL << 45)
 #define ldlm_set_atomic_cb(_l)          LDLM_SET_FLAG((_l), 1ULL << 45)
@@ -280,7 +291,8 @@
  * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
  * dropped to let ldlm_callback_handler() return EINVAL to the server. It
  * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC. */
+ * too late to send a separate CANCEL RPC.
+ */
 #define LDLM_FL_BL_AST                  0x0000400000000000ULL /* bit 46 */
 #define ldlm_is_bl_ast(_l)              LDLM_TEST_FLAG((_l), 1ULL << 46)
 #define ldlm_set_bl_ast(_l)             LDLM_SET_FLAG((_l), 1ULL << 46)
@@ -295,7 +307,8 @@
 /**
  * Don't put lock into the LRU list, so that it is not canceled due
  * to aging.  Used by MGC locks, they are cancelled only at unmount or
- * by callback. */
+ * by callback.
+ */
 #define LDLM_FL_NO_LRU                  0x0001000000000000ULL /* bit 48 */
 #define ldlm_is_no_lru(_l)              LDLM_TEST_FLAG((_l), 1ULL << 48)
 #define ldlm_set_no_lru(_l)             LDLM_SET_FLAG((_l), 1ULL << 48)
@@ -304,7 +317,8 @@
 /**
  * Set for locks that failed and where the server has been notified.
  *
- * Protected by lock and resource locks. */
+ * Protected by lock and resource locks.
+ */
 #define LDLM_FL_FAIL_NOTIFIED           0x0002000000000000ULL /* bit 49 */
 #define ldlm_is_fail_notified(_l)       LDLM_TEST_FLAG((_l), 1ULL << 49)
 #define ldlm_set_fail_notified(_l)      LDLM_SET_FLAG((_l), 1ULL << 49)
@@ -315,7 +329,8 @@
  * be destroyed when last reference to them is released. Set by
  * ldlm_lock_destroy_internal().
  *
- * Protected by lock and resource locks. */
+ * Protected by lock and resource locks.
+ */
 #define LDLM_FL_DESTROYED               0x0004000000000000ULL /* bit 50 */
 #define ldlm_is_destroyed(_l)           LDLM_TEST_FLAG((_l), 1ULL << 50)
 #define ldlm_set_destroyed(_l)          LDLM_SET_FLAG((_l), 1ULL << 50)
@@ -333,7 +348,8 @@
  * NB: compared with check_res_locked(), checking this bit is cheaper.
  * Also, spin_is_locked() is deprecated for kernel code; one reason is
  * because it works only for SMP so user needs to add extra macros like
- * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels.
+ */
 #define LDLM_FL_RES_LOCKED              0x0010000000000000ULL /* bit 52 */
 #define ldlm_is_res_locked(_l)          LDLM_TEST_FLAG((_l), 1ULL << 52)
 #define ldlm_set_res_locked(_l)         LDLM_SET_FLAG((_l), 1ULL << 52)
@@ -343,7 +359,8 @@
  * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
  * lock-timeout timer and it will never be reset.
  *
- * Protected by lock and resource locks. */
+ * Protected by lock and resource locks.
+ */
 #define LDLM_FL_WAITED                  0x0020000000000000ULL /* bit 53 */
 #define ldlm_is_waited(_l)              LDLM_TEST_FLAG((_l), 1ULL << 53)
 #define ldlm_set_waited(_l)             LDLM_SET_FLAG((_l), 1ULL << 53)
@@ -365,10 +382,10 @@
 #define LDLM_TEST_FLAG(_l, _b)        (((_l)->l_flags & (_b)) != 0)
 
 /** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b)         (((_l)->l_flags |= (_b))
+#define LDLM_SET_FLAG(_l, _b)         ((_l)->l_flags |= (_b))
 
 /** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b)       (((_l)->l_flags &= ~(_b))
+#define LDLM_CLEAR_FLAG(_l, _b)       ((_l)->l_flags &= ~(_b))
 
 /** Mask of flags inherited from parent lock when doing intents. */
 #define LDLM_INHERIT_FLAGS            LDLM_FL_INHERIT_MASK
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 311e5aa..3014d27 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -50,62 +50,6 @@
 #include "lustre/lustre_idl.h"
 #include "lustre_dlm.h"
 
-struct mds_client_data;
-struct mdt_client_data;
-struct mds_idmap_table;
-struct mdt_idmap_table;
-
-/**
- * Target-specific export data
- */
-struct tg_export_data {
-	/** Protects led_lcd below */
-	struct mutex		ted_lcd_lock;
-	/** Per-client data for each export */
-	struct lsd_client_data	*ted_lcd;
-	/** Offset of record in last_rcvd file */
-	loff_t			ted_lr_off;
-	/** Client index in last_rcvd file */
-	int			ted_lr_idx;
-};
-
-/**
- * MDT-specific export data
- */
-struct mdt_export_data {
-	struct tg_export_data	med_ted;
-	/** List of all files opened by client on this MDT */
-	struct list_head		med_open_head;
-	spinlock_t		med_open_lock; /* med_open_head, mfd_list */
-	/** Bitmask of all ibit locks this MDT understands */
-	__u64			med_ibits_known;
-	struct mutex		med_idmap_mutex;
-	struct lustre_idmap_table *med_idmap;
-};
-
-struct ec_export_data { /* echo client */
-	struct list_head eced_locks;
-};
-
-/* In-memory access to client data from OST struct */
-/** Filter (oss-side) specific import data */
-struct filter_export_data {
-	struct tg_export_data	fed_ted;
-	spinlock_t		fed_lock;	/**< protects fed_mod_list */
-	long		       fed_dirty;    /* in bytes */
-	long		       fed_grant;    /* in bytes */
-	struct list_head		 fed_mod_list; /* files being modified */
-	int			fed_mod_count;/* items in fed_writing list */
-	long		       fed_pending;  /* bytes just being written */
-	__u32		      fed_group;
-	__u8		       fed_pagesize; /* log2 of client page size */
-};
-
-struct mgs_export_data {
-	struct list_head		med_clients;	/* mgc fs client via this exp */
-	spinlock_t		med_lock;	/* protect med_clients */
-};
-
 enum obd_option {
 	OBD_OPT_FORCE =	 0x0001,
 	OBD_OPT_FAILOVER =      0x0002,
@@ -179,7 +123,8 @@
 	 */
 	spinlock_t		  exp_lock;
 	/** Compatibility flags for this export are embedded into
-	 *  exp_connect_data */
+	 *  exp_connect_data
+	 */
 	struct obd_connect_data   exp_connect_data;
 	enum obd_option	   exp_flags;
 	unsigned long	     exp_failed:1,
@@ -200,22 +145,8 @@
 	/** blocking dlm lock list, protected by exp_bl_list_lock */
 	struct list_head		exp_bl_list;
 	spinlock_t		  exp_bl_list_lock;
-
-	/** Target specific data */
-	union {
-		struct tg_export_data     eu_target_data;
-		struct mdt_export_data    eu_mdt_data;
-		struct filter_export_data eu_filter_data;
-		struct ec_export_data     eu_ec_data;
-		struct mgs_export_data    eu_mgs_data;
-	} u;
 };
 
-#define exp_target_data u.eu_target_data
-#define exp_mdt_data    u.eu_mdt_data
-#define exp_filter_data u.eu_filter_data
-#define exp_ec_data     u.eu_ec_data
-
 static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp)
 {
 	return &exp->exp_connect_data.ocd_connect_flags;
@@ -228,7 +159,6 @@
 
 static inline int exp_max_brw_size(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE)
 		return exp->exp_connect_data.ocd_brw_size;
 
@@ -242,19 +172,16 @@
 
 static inline int exp_connect_cancelset(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET);
 }
 
 static inline int exp_connect_lru_resize(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
 }
 
 static inline int exp_connect_rmtclient(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
 }
 
@@ -268,14 +195,11 @@
 
 static inline int exp_connect_vbr(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
-	LASSERT(exp->exp_connection);
 	return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
 }
 
 static inline int exp_connect_som(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM);
 }
 
@@ -288,7 +212,6 @@
 {
 	struct obd_connect_data *ocd;
 
-	LASSERT(imp != NULL);
 	ocd = &imp->imp_connect_data;
 	return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE);
 }
@@ -300,7 +223,6 @@
 
 static inline bool exp_connect_lvb_type(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE)
 		return true;
 	else
@@ -311,7 +233,6 @@
 {
 	struct obd_connect_data *ocd;
 
-	LASSERT(imp != NULL);
 	ocd = &imp->imp_connect_data;
 	if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE)
 		return true;
@@ -331,13 +252,19 @@
 {
 	struct obd_connect_data *ocd;
 
-	LASSERT(imp != NULL);
 	ocd = &imp->imp_connect_data;
 	return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
 }
 
 struct obd_export *class_conn2export(struct lustre_handle *conn);
 
+#define KKUC_CT_DATA_MAGIC	0x092013cea
+struct kkuc_ct_data {
+	__u32		kcd_magic;
+	struct obd_uuid	kcd_uuid;
+	__u32		kcd_archive;
+};
+
 /** @} export */
 
 #endif /* __EXPORT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 9b1a9c6..ab91879 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -251,7 +251,8 @@
 
 /* For new FS (>= 2.4), the root FID will be changed to
  * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
- * the root FID will still be IGIF */
+ * the root FID will still be IGIF
+ */
 static inline int fid_is_root(const struct lu_fid *fid)
 {
 	return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
@@ -294,7 +295,8 @@
 	const __u64 seq = fid_seq(fid);
 
 	/* Here, we cannot distinguish whether the normal FID is for OST
-	 * object or not. It is caller's duty to check more if needed. */
+	 * object or not. It is caller's duty to check more if needed.
+	 */
 	return (!fid_is_last_id(fid) &&
 		(fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
 	       fid_is_root(fid) || fid_is_dot_lustre(fid);
@@ -516,7 +518,8 @@
 				    struct ldlm_res_id *name)
 {
 	/* Note: it is just a trick here to save some effort, probably the
-	 * correct way would be turn them into the FID and compare */
+	 * correct way would be turn them into the FID and compare
+	 */
 	if (fid_seq_is_mdt0(ostid_seq(oi))) {
 		return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
 		       name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
@@ -589,12 +592,14 @@
 static inline __u32 fid_hash(const struct lu_fid *f, int bits)
 {
 	/* all objects with same id and different versions will belong to same
-	 * collisions list. */
+	 * collisions list.
+	 */
 	return hash_long(fid_flatten(f), bits);
 }
 
 /**
- * map fid to 32 bit value for ino on 32bit systems. */
+ * map fid to 32 bit value for ino on 32bit systems.
+ */
 static inline __u32 fid_flatten32(const struct lu_fid *fid)
 {
 	__u32 ino;
@@ -611,7 +616,8 @@
 	 * that inodes generated at about the same time have a reduced chance
 	 * of collisions. This will give a period of 2^12 = 1024 unique clients
 	 * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
-	 * (from OID), or up to 128M inodes without collisions for new files. */
+	 * (from OID), or up to 128M inodes without collisions for new files.
+	 */
 	ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
 	       (seq >> (64 - (40-8)) & 0xffffff00) +
 	       (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 5511626..4cf2b0e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -71,50 +71,41 @@
 struct lu_server_fld {
 	/**
 	 * super sequence controller export, needed to forward fld
-	 * lookup  request. */
+	 * lookup  request.
+	 */
 	struct obd_export       *lsf_control_exp;
 
-	/**
-	 * Client FLD cache. */
+	/** Client FLD cache. */
 	struct fld_cache	*lsf_cache;
 
-	/**
-	 * Protect index modifications */
+	/** Protect index modifications */
 	struct mutex		lsf_lock;
 
-	/**
-	 * Fld service name in form "fld-srv-lustre-MDTXXX" */
+	/** Fld service name in form "fld-srv-lustre-MDTXXX" */
 	char		     lsf_name[LUSTRE_MDT_MAXNAMELEN];
 
 };
 
 struct lu_client_fld {
-	/**
-	 * Client side debugfs entry. */
+	/** Client side debugfs entry. */
 	struct dentry		*lcf_debugfs_entry;
 
-	/**
-	 * List of exports client FLD knows about. */
+	/** List of exports client FLD knows about. */
 	struct list_head	       lcf_targets;
 
-	/**
-	 * Current hash to be used to chose an export. */
+	/** Current hash to be used to chose an export. */
 	struct lu_fld_hash      *lcf_hash;
 
-	/**
-	 * Exports count. */
+	/** Exports count. */
 	int		      lcf_count;
 
-	/**
-	 * Lock protecting exports list and fld_hash. */
+	/** Lock protecting exports list and fld_hash. */
 	spinlock_t		 lcf_lock;
 
-	/**
-	 * Client FLD cache. */
+	/** Client FLD cache. */
 	struct fld_cache	*lcf_cache;
 
-	/**
-	 * Client fld debugfs entry name. */
+	/** Client fld debugfs entry name. */
 	char			 lcf_name[LUSTRE_MDT_MAXNAMELEN];
 
 	int			 lcf_flags;
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index f39780a..27f169d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -65,7 +65,8 @@
  *
  * Now you're able to assign the results of cookie2handle directly to an
  * ldlm_lock.  If it's not at the top, you'll want to use container_of()
- * to compute the start of the structure based on the handle field. */
+ * to compute the start of the structure based on the handle field.
+ */
 struct portals_handle {
 	struct list_head			h_link;
 	__u64				h_cookie;
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 4e4230e..dac2d84 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -292,7 +292,8 @@
 				  /* need IR MNE swab */
 				  imp_need_mne_swab:1,
 				  /* import must be reconnected instead of
-				   * chose new connection */
+				   * chosing new connection
+				   */
 				  imp_force_reconnect:1,
 				  /* import has tried to connect with server */
 				  imp_connect_tried:1;
diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
new file mode 100644
index 0000000..970610b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
@@ -0,0 +1,55 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2013 Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ *
+ * Kernel <-> userspace communication routines.
+ * The definitions below are used in the kernel and userspace.
+ */
+
+#ifndef __LUSTRE_KERNELCOMM_H__
+#define __LUSTRE_KERNELCOMM_H__
+
+/* For declarations shared with userspace */
+#include "uapi_kernelcomm.h"
+
+/* prototype for callback function on kuc groups */
+typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg);
+
+/* Kernel methods */
+int libcfs_kkuc_msg_put(struct file *fp, void *payload);
+int libcfs_kkuc_group_put(unsigned int group, void *payload);
+int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group,
+			  void *data, size_t data_len);
+int libcfs_kkuc_group_rem(int uid, unsigned int group);
+int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func,
+			      void *cb_arg);
+
+#endif /* __LUSTRE_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 428469f..f2223d5 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -153,9 +153,9 @@
 
 	/* buffers the kernel will treat as user pointers */
 	__u32  ioc_plen1;
-	char  *ioc_pbuf1;
+	void __user *ioc_pbuf1;
 	__u32  ioc_plen2;
-	char  *ioc_pbuf2;
+	void __user *ioc_pbuf2;
 
 	/* inline buffers for various arguments */
 	__u32  ioc_inllen1;
@@ -252,8 +252,8 @@
 #include "obd_support.h"
 
 /* function defined in lustre/obdclass/<platform>/<platform>-module.c */
-int obd_ioctl_getdata(char **buf, int *len, void *arg);
-int obd_ioctl_popdata(void *arg, void *data, int len);
+int obd_ioctl_getdata(char **buf, int *len, void __user *arg);
+int obd_ioctl_popdata(void __user *arg, void *data, int len);
 
 static inline void obd_ioctl_freedata(char *buf, int len)
 {
@@ -365,10 +365,10 @@
 /* OBD_IOC_LLOG_CATINFO is deprecated */
 #define OBD_IOC_LLOG_CATINFO	   _IOWR('f', 196, OBD_IOC_DATA_TYPE)
 
-#define ECHO_IOC_GET_STRIPE	    _IOWR('f', 200, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_SET_STRIPE	    _IOWR('f', 201, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_ENQUEUE	       _IOWR('f', 202, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_CANCEL		_IOWR('f', 203, OBD_IOC_DATA_TYPE)
+/*	#define ECHO_IOC_GET_STRIPE    _IOWR('f', 200, OBD_IOC_DATA_TYPE) */
+/*	#define ECHO_IOC_SET_STRIPE    _IOWR('f', 201, OBD_IOC_DATA_TYPE) */
+/*	#define ECHO_IOC_ENQUEUE       _IOWR('f', 202, OBD_IOC_DATA_TYPE) */
+/*	#define ECHO_IOC_CANCEL        _IOWR('f', 203, OBD_IOC_DATA_TYPE) */
 
 #define OBD_IOC_GET_OBJ_VERSION	_IOR('f', 210, OBD_IOC_DATA_TYPE)
 
@@ -387,7 +387,8 @@
  */
 
 /* Until such time as we get_info the per-stripe maximum from the OST,
- * we define this to be 2T - 4k, which is the ext3 maxbytes. */
+ * we define this to be 2T - 4k, which is the ext3 maxbytes.
+ */
 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
 
 /* Special values for remove LOV EA from disk */
@@ -540,7 +541,7 @@
 	l_add_wait(&wq, &__wait);					      \
 									       \
 	/* Block all signals (just the non-fatal ones if no timeout). */       \
-	if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr))   \
+	if (info->lwi_on_signal && (__timeout == 0 || __allow_intr))   \
 		__blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);	      \
 	else								   \
 		__blocked = cfs_block_sigsinv(0);			      \
@@ -562,13 +563,13 @@
 			__timeout = cfs_time_sub(__timeout,		    \
 					    cfs_time_sub(interval, remaining));\
 			if (__timeout == 0) {				  \
-				if (info->lwi_on_timeout == NULL ||	    \
+				if (!info->lwi_on_timeout ||		      \
 				    info->lwi_on_timeout(info->lwi_cb_data)) { \
 					ret = -ETIMEDOUT;		      \
 					break;				 \
 				}					      \
 				/* Take signals after the timeout expires. */  \
-				if (info->lwi_on_signal != NULL)	       \
+				if (info->lwi_on_signal)		       \
 				    (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
 			}						      \
 		}							      \
@@ -578,7 +579,7 @@
 		if (condition)						 \
 			break;						 \
 		if (cfs_signal_pending()) {				    \
-			if (info->lwi_on_signal != NULL &&		     \
+			if (info->lwi_on_signal &&		     \
 			    (__timeout == 0 || __allow_intr)) {		\
 				if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
 					info->lwi_on_signal(info->lwi_cb_data);\
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index e4fc8b5..49618e1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -241,7 +241,8 @@
 	struct obd_llog_group   *loc_olg; /* group containing that ctxt */
 	struct obd_export       *loc_exp; /* parent "disk" export (e.g. MDS) */
 	struct obd_import       *loc_imp; /* to use in RPC's: can be backward
-					     pointing import */
+					   * pointing import
+					   */
 	struct llog_operations  *loc_logops;
 	struct llog_handle      *loc_handle;
 	struct mutex		 loc_mutex; /* protect loc_imp */
@@ -255,7 +256,7 @@
 static inline int llog_handle2ops(struct llog_handle *loghandle,
 				  struct llog_operations **lop)
 {
-	if (loghandle == NULL || loghandle->lgh_logops == NULL)
+	if (!loghandle || !loghandle->lgh_logops)
 		return -EINVAL;
 
 	*lop = loghandle->lgh_logops;
@@ -272,7 +273,7 @@
 
 static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
 {
-	if (ctxt == NULL)
+	if (!ctxt)
 		return;
 	LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
 	CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
@@ -294,7 +295,7 @@
 	LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
 
 	spin_lock(&olg->olg_lock);
-	if (olg->olg_ctxts[index] != NULL) {
+	if (olg->olg_ctxts[index]) {
 		spin_unlock(&olg->olg_lock);
 		return -EEXIST;
 	}
@@ -311,7 +312,7 @@
 	LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
 
 	spin_lock(&olg->olg_lock);
-	if (olg->olg_ctxts[index] == NULL)
+	if (!olg->olg_ctxts[index])
 		ctxt = NULL;
 	else
 		ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
@@ -335,7 +336,7 @@
 
 static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index)
 {
-	return (olg->olg_ctxts[index] == NULL);
+	return (!olg->olg_ctxts[index]);
 }
 
 static inline int llog_ctxt_null(struct obd_device *obd, int index)
@@ -354,7 +355,7 @@
 	rc = llog_handle2ops(loghandle, &lop);
 	if (rc)
 		return rc;
-	if (lop->lop_next_block == NULL)
+	if (!lop->lop_next_block)
 		return -EOPNOTSUPP;
 
 	rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 3da3733..df94f9f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -81,8 +81,8 @@
 static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
 				    struct lookup_intent *it)
 {
-	if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
-			   it->it_op == IT_LAYOUT))
+	if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+		   it->it_op == IT_LAYOUT))
 		return;
 
 	/* This would normally block until the existing request finishes.
@@ -90,7 +90,8 @@
 	 * done, then set rpcl_it to MDC_FAKE_RPCL_IT.  Once that is set
 	 * it will only be cleared when all fake requests are finished.
 	 * Only when all fake requests are finished can normal requests
-	 * be sent, to ensure they are recoverable again. */
+	 * be sent, to ensure they are recoverable again.
+	 */
  again:
 	mutex_lock(&lck->rpcl_mutex);
 
@@ -105,22 +106,23 @@
 	 * just turned off but there are still requests in progress.
 	 * Wait until they finish.  It doesn't need to be efficient
 	 * in this extremely rare case, just have low overhead in
-	 * the common case when it isn't true. */
+	 * the common case when it isn't true.
+	 */
 	while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
 		mutex_unlock(&lck->rpcl_mutex);
 		schedule_timeout(cfs_time_seconds(1) / 4);
 		goto again;
 	}
 
-	LASSERT(lck->rpcl_it == NULL);
+	LASSERT(!lck->rpcl_it);
 	lck->rpcl_it = it;
 }
 
 static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
 				    struct lookup_intent *it)
 {
-	if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
-			   it->it_op == IT_LAYOUT))
+	if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+		   it->it_op == IT_LAYOUT))
 		return;
 
 	if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index d834ddd..ac08b25 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -76,7 +76,8 @@
  * In order for the client and server to properly negotiate the maximum
  * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
  * value.  The client is free to limit the actual RPC size for any bulk
- * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
+ * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
+ */
 #define PTLRPC_BULK_OPS_BITS	2
 #define PTLRPC_BULK_OPS_COUNT	(1U << PTLRPC_BULK_OPS_BITS)
 /**
@@ -85,7 +86,8 @@
  * protocol limitation on the maximum RPC size that can be used by any
  * RPC sent to that server in the future.  Instead, the server should
  * use the negotiated per-client ocd_brw_size to determine the bulk
- * RPC count. */
+ * RPC count.
+ */
 #define PTLRPC_BULK_OPS_MASK	(~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
 
 /**
@@ -419,16 +421,18 @@
 	/** A spinlock to protect the reply state flags */
 	spinlock_t		rs_lock;
 	/** Reply state flags */
-	unsigned long	  rs_difficult:1;     /* ACK/commit stuff */
+	unsigned long	  rs_difficult:1; /* ACK/commit stuff */
 	unsigned long	  rs_no_ack:1;    /* no ACK, even for
-						  difficult requests */
+					   * difficult requests
+					   */
 	unsigned long	  rs_scheduled:1;     /* being handled? */
 	unsigned long	  rs_scheduled_ever:1;/* any schedule attempts? */
 	unsigned long	  rs_handled:1;  /* been handled yet? */
 	unsigned long	  rs_on_net:1;   /* reply_out_callback pending? */
 	unsigned long	  rs_prealloc:1; /* rs from prealloc list */
 	unsigned long	  rs_committed:1;/* the transaction was committed
-					  * and the rs was dispatched */
+					  * and the rs was dispatched
+					  */
 	/** Size of the state */
 	int		    rs_size;
 	/** opcode */
@@ -463,7 +467,7 @@
 	/** Handles of locks awaiting client reply ACK */
 	struct lustre_handle   rs_locks[RS_MAX_LOCKS];
 	/** Lock modes of locks in \a rs_locks */
-	ldlm_mode_t	    rs_modes[RS_MAX_LOCKS];
+	enum ldlm_mode	    rs_modes[RS_MAX_LOCKS];
 };
 
 struct ptlrpc_thread;
@@ -1181,7 +1185,7 @@
  * purpose of this object is to hold references to the request's resources
  * for the lifetime of the request, and to hold properties that policies use
  * use for determining the request's scheduling priority.
- * */
+ */
 struct ptlrpc_nrs_request {
 	/**
 	 * The request's resource hierarchy.
@@ -1321,15 +1325,17 @@
 		/* do not resend request on -EINPROGRESS */
 		rq_no_retry_einprogress:1,
 		/* allow the req to be sent if the import is in recovery
-		 * status */
+		 * status
+		 */
 		rq_allow_replay:1;
 
 	unsigned int rq_nr_resend;
 
 	enum rq_phase rq_phase; /* one of RQ_PHASE_* */
 	enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
-	atomic_t rq_refcount;/* client-side refcount for SENT race,
-				    server-side refcount for multiple replies */
+	atomic_t rq_refcount; /* client-side refcount for SENT race,
+			       * server-side refcount for multiple replies
+			       */
 
 	/** Portal to which this request would be sent */
 	short rq_request_portal;  /* XXX FIXME bug 249 */
@@ -1363,7 +1369,8 @@
 
 	/**
 	 * security and encryption data
-	 * @{ */
+	 * @{
+	 */
 	struct ptlrpc_cli_ctx   *rq_cli_ctx;     /**< client's half ctx */
 	struct ptlrpc_svc_ctx   *rq_svc_ctx;     /**< server's half ctx */
 	struct list_head	       rq_ctx_chain;   /**< link to waited ctx */
@@ -1477,7 +1484,8 @@
 
 	/** when request must finish. volatile
 	 * so that servers' early reply updates to the deadline aren't
-	 * kept in per-cpu cache */
+	 * kept in per-cpu cache
+	 */
 	volatile time64_t rq_deadline;
 	/** when req reply unlink must finish. */
 	time64_t rq_reply_deadline;
@@ -1518,7 +1526,7 @@
 static inline int ptlrpc_req_interpret(const struct lu_env *env,
 				       struct ptlrpc_request *req, int rc)
 {
-	if (req->rq_interpret_reply != NULL) {
+	if (req->rq_interpret_reply) {
 		req->rq_status = req->rq_interpret_reply(env, req,
 							 &req->rq_async_args,
 							 rc);
@@ -1678,7 +1686,8 @@
 /**
  * This is the debug print function you need to use to print request structure
  * content into lustre debug log.
- * for most callers (level is a constant) this is resolved at compile time */
+ * for most callers (level is a constant) this is resolved at compile time
+ */
 #define DEBUG_REQ(level, req, fmt, args...)				   \
 do {									  \
 	if ((level) & (D_ERROR | D_WARNING)) {				\
@@ -1947,7 +1956,7 @@
  * or general metadata service for MDS.
  */
 struct ptlrpc_service {
-	/** serialize /proc operations */
+	/** serialize sysfs operations */
 	spinlock_t			srv_lock;
 	/** most often accessed fields */
 	/** chain thru all services */
@@ -2101,7 +2110,8 @@
 	/** NRS head for regular requests */
 	struct ptlrpc_nrs		scp_nrs_reg;
 	/** NRS head for HP requests; this is only valid for services that can
-	 *  handle HP requests */
+	 *  handle HP requests
+	 */
 	struct ptlrpc_nrs	       *scp_nrs_hp;
 
 	/** AT stuff */
@@ -2141,8 +2151,8 @@
 #define ptlrpc_service_for_each_part(part, i, svc)			\
 	for (i = 0;							\
 	     i < (svc)->srv_ncpts &&					\
-	     (svc)->srv_parts != NULL &&				\
-	     ((part) = (svc)->srv_parts[i]) != NULL; i++)
+	     (svc)->srv_parts &&					\
+	     ((part) = (svc)->srv_parts[i]); i++)
 
 /**
  * Declaration of ptlrpcd control structure
@@ -2259,7 +2269,6 @@
 static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
 					 const struct ptlrpc_nrs_pol_desc *desc)
 {
-	LASSERT(desc->pd_compat_svc_name != NULL);
 	return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
 }
 
@@ -2303,7 +2312,6 @@
 	struct ptlrpc_bulk_desc *desc;
 	int		      rc;
 
-	LASSERT(req != NULL);
 	desc = req->rq_bulk;
 
 	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
@@ -2462,7 +2470,8 @@
 	/* "soft" limit for total threads number */
 	unsigned int			tc_nthrs_max;
 	/* user specified threads number, it will be validated due to
-	 * other members of this structure. */
+	 * other members of this structure.
+	 */
 	unsigned int			tc_nthrs_user;
 	/* set NUMA node affinity for service threads */
 	unsigned int			tc_cpu_affinity;
@@ -2726,7 +2735,7 @@
 static inline void
 ptlrpc_client_wake_req(struct ptlrpc_request *req)
 {
-	if (req->rq_set == NULL)
+	if (!req->rq_set)
 		wake_up(&req->rq_reply_waitq);
 	else
 		wake_up(&req->rq_set->set_waitq);
@@ -2750,7 +2759,7 @@
 /* Should only be called once per req */
 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
 {
-	if (req->rq_reply_state == NULL)
+	if (!req->rq_reply_state)
 		return; /* shouldn't occur */
 	ptlrpc_rs_decref(req->rq_reply_state);
 	req->rq_reply_state = NULL;
@@ -2807,7 +2816,6 @@
 static inline struct ptlrpc_service *
 ptlrpc_req2svc(struct ptlrpc_request *req)
 {
-	LASSERT(req->rq_rqbd != NULL);
 	return req->rq_rqbd->rqbd_svcpt->scp_service;
 }
 
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index 46a662f..b5128e4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -130,7 +130,6 @@
 extern struct req_format RQF_OBD_PING;
 extern struct req_format RQF_OBD_SET_INFO;
 extern struct req_format RQF_SEC_CTX;
-extern struct req_format RQF_OBD_IDX_READ;
 /* MGS req_format */
 extern struct req_format RQF_MGS_TARGET_REG;
 extern struct req_format RQF_MGS_SET_INFO;
@@ -146,7 +145,6 @@
 extern struct req_format RQF_MDS_SYNC;
 extern struct req_format RQF_MDS_GETXATTR;
 extern struct req_format RQF_MDS_GETATTR;
-extern struct req_format RQF_UPDATE_OBJ;
 
 /*
  * This is format of direct (non-intent) MDS_GETATTR_NAME request.
@@ -177,7 +175,6 @@
 extern struct req_format RQF_MDS_QUOTACHECK;
 extern struct req_format RQF_MDS_QUOTACTL;
 extern struct req_format RQF_QC_CALLBACK;
-extern struct req_format RQF_QUOTA_DQACQ;
 extern struct req_format RQF_MDS_SWAP_LAYOUTS;
 /* MDS hsm formats */
 extern struct req_format RQF_MDS_HSM_STATE_GET;
@@ -220,7 +217,6 @@
 extern struct req_format RQF_LDLM_INTENT_CREATE;
 extern struct req_format RQF_LDLM_INTENT_UNLINK;
 extern struct req_format RQF_LDLM_INTENT_GETXATTR;
-extern struct req_format RQF_LDLM_INTENT_QUOTA;
 extern struct req_format RQF_LDLM_CANCEL;
 extern struct req_format RQF_LDLM_CALLBACK;
 extern struct req_format RQF_LDLM_CP_CALLBACK;
@@ -252,7 +248,6 @@
 extern struct req_msg_field RMF_GETINFO_VAL;
 extern struct req_msg_field RMF_GETINFO_VALLEN;
 extern struct req_msg_field RMF_GETINFO_KEY;
-extern struct req_msg_field RMF_IDX_INFO;
 extern struct req_msg_field RMF_CLOSE_DATA;
 
 /*
@@ -277,7 +272,6 @@
 extern struct req_msg_field RMF_CAPA2;
 extern struct req_msg_field RMF_OBD_QUOTACHECK;
 extern struct req_msg_field RMF_OBD_QUOTACTL;
-extern struct req_msg_field RMF_QUOTA_BODY;
 extern struct req_msg_field RMF_STRING;
 extern struct req_msg_field RMF_SWAP_LAYOUTS;
 extern struct req_msg_field RMF_MDS_HSM_PROGRESS;
@@ -322,9 +316,6 @@
 /* generic uint32 */
 extern struct req_msg_field RMF_U32;
 
-/* OBJ update format */
-extern struct req_msg_field RMF_UPDATE;
-extern struct req_msg_field RMF_UPDATE_REPLY;
 /** @} req_layout */
 
 #endif /* _LUSTRE_REQ_LAYOUT_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h
index caa4da1..64559a1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ver.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ver.h
@@ -1,26 +1,20 @@
 #ifndef _LUSTRE_VER_H_
 #define _LUSTRE_VER_H_
-/* This file automatically generated from lustre/include/lustre_ver.h.in,
- * based on parameters in lustre/autoconf/lustre-version.ac.
- * Changes made directly to this file will be lost. */
 
 #define LUSTRE_MAJOR 2
-#define LUSTRE_MINOR 3
-#define LUSTRE_PATCH 64
+#define LUSTRE_MINOR 4
+#define LUSTRE_PATCH 60
 #define LUSTRE_FIX 0
-#define LUSTRE_VERSION_STRING "2.3.64"
+#define LUSTRE_VERSION_STRING "2.4.60"
 
 #define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \
 					    LUSTRE_MINOR, LUSTRE_PATCH, \
 					    LUSTRE_FIX)
 
-/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches
- * by this amount (set in lustre/autoconf/lustre-version.ac). */
-#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32)
-
-/* If lustre version of client and servers it connects to differs by more
+/*
+ * If lustre version of client and servers it connects to differs by more
  * than this amount, client would issue a warning.
- * (set in lustre/autoconf/lustre-version.ac) */
+ */
 #define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0)
 
 #endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index bcbe613..e63366b 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -90,7 +90,8 @@
 	pid_t	    lsm_lock_owner; /* debugging */
 
 	/* maximum possible file size, might change as OSTs status changes,
-	 * e.g. disconnected, deactivated */
+	 * e.g. disconnected, deactivated
+	 */
 	__u64	    lsm_maxbytes;
 	struct {
 		/* Public members. */
@@ -123,7 +124,7 @@
 
 static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
 {
-	if (lsm == NULL)
+	if (!lsm)
 		return false;
 	if (lsm_is_released(lsm))
 		return false;
@@ -159,7 +160,8 @@
 	/* An update callback which is called to update some data on upper
 	 * level. E.g. it is used for update lsm->lsm_oinfo at every received
 	 * request in osc level for enqueue requests. It is also possible to
-	 * update some caller data from LOV layer if needed. */
+	 * update some caller data from LOV layer if needed.
+	 */
 	obd_enqueue_update_f    oi_cb_up;
 };
 
@@ -216,7 +218,6 @@
 };
 
 #define OSC_MAX_RIF_DEFAULT       8
-#define MDS_OSC_MAX_RIF_DEFAULT   50
 #define OSC_MAX_RIF_MAX	 256
 #define OSC_MAX_DIRTY_DEFAULT  (OSC_MAX_RIF_DEFAULT * 4)
 #define OSC_MAX_DIRTY_MB_MAX   2048     /* arbitrary, but < MAX_LONG bytes */
@@ -241,7 +242,8 @@
 	struct obd_import       *cl_import; /* ptlrpc connection state */
 	int		      cl_conn_count;
 	/* max_mds_easize is purely a performance thing so we don't have to
-	 * call obd_size_diskmd() all the time. */
+	 * call obd_size_diskmd() all the time.
+	 */
 	int			 cl_default_mds_easize;
 	int			 cl_max_mds_easize;
 	int			 cl_default_mds_cookiesize;
@@ -261,7 +263,8 @@
 	/* since we allocate grant by blocks, we don't know how many grant will
 	 * be used to add a page into cache. As a solution, we reserve maximum
 	 * grant before trying to dirty a page and unreserve the rest.
-	 * See osc_{reserve|unreserve}_grant for details. */
+	 * See osc_{reserve|unreserve}_grant for details.
+	 */
 	long		 cl_reserved_grant;
 	struct list_head cl_cache_waiters; /* waiting for cache/grant */
 	unsigned long	 cl_next_shrink_grant;   /* jiffies */
@@ -269,14 +272,16 @@
 	int		 cl_grant_shrink_interval; /* seconds */
 
 	/* A chunk is an optimal size used by osc_extent to determine
-	 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
+	 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
+	 */
 	int		  cl_chunkbits;
 	int		  cl_chunk;
 	int		  cl_extent_tax; /* extent overhead, by bytes */
 
 	/* keep track of objects that have lois that contain pages which
 	 * have been queued for async brw.  this lock also protects the
-	 * lists of osc_client_pages that hang off of the loi */
+	 * lists of osc_client_pages that hang off of the loi
+	 */
 	/*
 	 * ->cl_loi_list_lock protects consistency of
 	 * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and
@@ -295,14 +300,14 @@
 	 * NB by Jinshan: though field names are still _loi_, but actually
 	 * osc_object{}s are in the list.
 	 */
-	client_obd_lock_t	cl_loi_list_lock;
+	struct client_obd_lock	       cl_loi_list_lock;
 	struct list_head	       cl_loi_ready_list;
 	struct list_head	       cl_loi_hp_ready_list;
 	struct list_head	       cl_loi_write_list;
 	struct list_head	       cl_loi_read_list;
 	int		      cl_r_in_flight;
 	int		      cl_w_in_flight;
-	/* just a sum of the loi/lop pending numbers to be exported by /proc */
+	/* just a sum of the loi/lop pending numbers to be exported by sysfs */
 	atomic_t	     cl_pending_w_pages;
 	atomic_t	     cl_pending_r_pages;
 	__u32			 cl_max_pages_per_rpc;
@@ -322,7 +327,7 @@
 	atomic_t		 cl_lru_shrinkers;
 	atomic_t		 cl_lru_in_list;
 	struct list_head	 cl_lru_list; /* lru page list */
-	client_obd_lock_t	 cl_lru_list_lock; /* page list protector */
+	struct client_obd_lock   cl_lru_list_lock; /* page list protector */
 
 	/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
 	atomic_t	     cl_destroy_in_flight;
@@ -340,7 +345,7 @@
 	/* supported checksum types that are worked out at connect time */
 	__u32		    cl_supp_cksum_types;
 	/* checksum algorithm to be used */
-	cksum_type_t	     cl_cksum_type;
+	enum cksum_type	     cl_cksum_type;
 
 	/* also protected by the poorly named _loi_list_lock lock above */
 	struct osc_async_rc      cl_ar;
@@ -375,14 +380,12 @@
 	spinlock_t		ec_lock;
 	struct list_head	   ec_objects;
 	struct list_head	   ec_locks;
-	int		  ec_nstripes;
 	__u64		ec_unique;
 };
 
 /* Generic subset of OSTs */
 struct ost_pool {
-	__u32	      *op_array;      /* array of index of
-						   lov_obd->lov_tgts */
+	__u32	      *op_array;      /* array of index of lov_obd->lov_tgts */
 	unsigned int	op_count;      /* number of OSTs in the array */
 	unsigned int	op_size;       /* allocated size of lp_array */
 	struct rw_semaphore op_rw_sem;     /* to protect ost_pool use */
@@ -415,14 +418,16 @@
 	struct lov_qos_rr   lq_rr;	  /* round robin qos data */
 	unsigned long       lq_dirty:1,     /* recalc qos data */
 			    lq_same_space:1,/* the ost's all have approx.
-					       the same space avail */
+					     * the same space avail
+					     */
 			    lq_reset:1,     /* zero current penalties */
 			    lq_statfs_in_progress:1; /* statfs op in
 							progress */
 	/* qos statfs data */
 	struct lov_statfs_data *lq_statfs_data;
-	wait_queue_head_t	 lq_statfs_waitq; /* waitqueue to notify statfs
-					      * requests completion */
+	wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
+					    * requests completion
+					    */
 };
 
 struct lov_tgt_desc {
@@ -450,16 +455,16 @@
 	struct lov_qos_rr     pool_rr;		/* round robin qos */
 	struct hlist_node      pool_hash;	      /* access by poolname */
 	struct list_head	    pool_list;	      /* serial access */
-	struct dentry		*pool_debugfs_entry;	/* file in /proc */
+	struct dentry		*pool_debugfs_entry;	/* file in debugfs */
 	struct obd_device    *pool_lobd;	/* obd of the lov/lod to which
-						*  this pool belongs */
+						 * this pool belongs
+						 */
 };
 
 struct lov_obd {
 	struct lov_desc	 desc;
 	struct lov_tgt_desc   **lov_tgts;	      /* sparse array */
-	struct ost_pool	 lov_packed;	    /* all OSTs in a packed
-							  array */
+	struct ost_pool	 lov_packed;	    /* all OSTs in a packed array */
 	struct mutex		lov_lock;
 	struct obd_connect_data lov_ocd;
 	atomic_t	    lov_refcount;
@@ -596,34 +601,6 @@
 	struct obd_uuid	 *oti_ost_uuid;
 };
 
-static inline void oti_init(struct obd_trans_info *oti,
-			    struct ptlrpc_request *req)
-{
-	if (oti == NULL)
-		return;
-	memset(oti, 0, sizeof(*oti));
-
-	if (req == NULL)
-		return;
-
-	oti->oti_xid = req->rq_xid;
-	/** VBR: take versions from request */
-	if (req->rq_reqmsg != NULL &&
-	    lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
-		__u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg);
-
-		oti->oti_pre_version = pre_version ? pre_version[0] : 0;
-		oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg);
-	}
-
-	/** called from mds_create_objects */
-	if (req->rq_repmsg != NULL)
-		oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
-	oti->oti_thread = req->rq_svc_thread;
-	if (req->rq_reqmsg != NULL)
-		oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
-}
-
 static inline void oti_alloc_cookies(struct obd_trans_info *oti,
 				     int num_cookies)
 {
@@ -728,21 +705,23 @@
 	unsigned long obd_attached:1,      /* finished attach */
 		      obd_set_up:1,	/* finished setup */
 		      obd_version_recov:1, /* obd uses version checking */
-		      obd_replayable:1,    /* recovery is enabled; inform clients */
-		      obd_no_transno:1,    /* no committed-transno notification */
+		      obd_replayable:1,/* recovery is enabled; inform clients */
+		      obd_no_transno:1,  /* no committed-transno notification */
 		      obd_no_recov:1,      /* fail instead of retry messages */
 		      obd_stopping:1,      /* started cleanup */
 		      obd_starting:1,      /* started setup */
 		      obd_force:1,	 /* cleanup with > 0 obd refcount */
-		      obd_fail:1,	  /* cleanup with failover */
-		      obd_async_recov:1,   /* allow asynchronous orphan cleanup */
+		      obd_fail:1,	 /* cleanup with failover */
+		      obd_async_recov:1, /* allow asynchronous orphan cleanup */
 		      obd_no_conn:1,       /* deny new connections */
 		      obd_inactive:1,      /* device active/inactive
-					   * (for /proc/status only!!) */
+					    * (for sysfs status only!!)
+					    */
 		      obd_no_ir:1,	 /* no imperative recovery. */
 		      obd_process_conf:1;  /* device is processing mgs config */
 	/* use separate field as it is set in interrupt to don't mess with
-	 * protection of other bits using _bh lock */
+	 * protection of other bits using _bh lock
+	 */
 	unsigned long obd_recovery_expired:1;
 	/* uuid-export hash body */
 	struct cfs_hash	     *obd_uuid_hash;
@@ -935,7 +914,8 @@
 	__u32		   op_npages;
 
 	/* used to transfer info between the stacks of MD client
-	 * see enum op_cli_flags */
+	 * see enum op_cli_flags
+	 */
 	__u32			op_cli_flags;
 
 	/* File object data version for HSM release, on client */
@@ -965,7 +945,7 @@
 struct obd_ops {
 	struct module *owner;
 	int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
-			 void *karg, void *uarg);
+			 void *karg, void __user *uarg);
 	int (*get_info)(const struct lu_env *env, struct obd_export *,
 			__u32 keylen, void *key, __u32 *vallen, void *val,
 			struct lov_stripe_md *lsm);
@@ -987,7 +967,8 @@
 	/* connect to the target device with given connection
 	 * data. @ocd->ocd_connect_flags is modified to reflect flags actually
 	 * granted by the target, which are guaranteed to be a subset of flags
-	 * asked for. If @ocd == NULL, use default parameters. */
+	 * asked for. If @ocd == NULL, use default parameters.
+	 */
 	int (*connect)(const struct lu_env *env,
 		       struct obd_export **exp, struct obd_device *src,
 		       struct obd_uuid *cluuid, struct obd_connect_data *ocd,
@@ -1083,7 +1064,8 @@
 	/*
 	 * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line
 	 * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c.
-	 * Also, add a wrapper function in include/linux/obd_class.h. */
+	 * Also, add a wrapper function in include/linux/obd_class.h.
+	 */
 };
 
 enum {
@@ -1189,14 +1171,14 @@
 				      struct obd_client_handle *);
 	int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
 
-	ldlm_mode_t (*lock_match)(struct obd_export *, __u64,
-				  const struct lu_fid *, ldlm_type_t,
-				  ldlm_policy_data_t *, ldlm_mode_t,
-				  struct lustre_handle *);
+	enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
+				     const struct lu_fid *, enum ldlm_type,
+				     ldlm_policy_data_t *, enum ldlm_mode,
+				     struct lustre_handle *);
 
 	int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
-			     ldlm_policy_data_t *, ldlm_mode_t,
-			     ldlm_cancel_flags_t flags, void *opaque);
+			     ldlm_policy_data_t *, enum ldlm_mode,
+			     enum ldlm_cancel_flags flags, void *opaque);
 
 	int (*get_remote_perm)(struct obd_export *, const struct lu_fid *,
 			       __u32, struct ptlrpc_request **);
@@ -1253,7 +1235,7 @@
 	struct md_open_data *mod;
 
 	mod = kzalloc(sizeof(*mod), GFP_NOFS);
-	if (mod == NULL)
+	if (!mod)
 		return NULL;
 	atomic_set(&mod->mod_refcount, 1);
 	return mod;
@@ -1300,7 +1282,7 @@
 		return false;
 
 	/* caller does not care of idx */
-	if (idx == NULL)
+	if (!idx)
 		return true;
 
 	/* volatile file, the MDT can be set from name */
@@ -1327,7 +1309,8 @@
 	return true;
 bad_format:
 	/* bad format of mdt idx, we cannot return an error
-	 * to caller so we use hash algo */
+	 * to caller so we use hash algo
+	 */
 	CERROR("Bad volatile file name format: %s\n",
 	       name + LUSTRE_VOLATILE_HDR_LEN);
 	return false;
@@ -1335,7 +1318,6 @@
 
 static inline int cli_brw_size(struct obd_device *obd)
 {
-	LASSERT(obd != NULL);
 	return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
 }
 
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index 01db604..637fa22 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -37,7 +37,7 @@
 #include "../../include/linux/libcfs/libcfs.h"
 #include "lustre/lustre_idl.h"
 
-static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type)
+static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
 {
 	switch (cksum_type) {
 	case OBD_CKSUM_CRC32:
@@ -63,8 +63,9 @@
  * In case of an unsupported types/flags we fall back to ADLER
  * because that is supported by all clients since 1.8
  *
- * In case multiple algorithms are supported the best one is used. */
-static inline u32 cksum_type_pack(cksum_type_t cksum_type)
+ * In case multiple algorithms are supported the best one is used.
+ */
+static inline u32 cksum_type_pack(enum cksum_type cksum_type)
 {
 	unsigned int    performance = 0, tmp;
 	u32		flag = OBD_FL_CKSUM_ADLER;
@@ -98,7 +99,7 @@
 	return flag;
 }
 
-static inline cksum_type_t cksum_type_unpack(u32 o_flags)
+static inline enum cksum_type cksum_type_unpack(u32 o_flags)
 {
 	switch (o_flags & OBD_FL_CKSUM_ALL) {
 	case OBD_FL_CKSUM_CRC32C:
@@ -116,9 +117,9 @@
  * 1.8 supported ADLER it is base and not depend on hw
  * Client uses all available local algos
  */
-static inline cksum_type_t cksum_types_supported_client(void)
+static inline enum cksum_type cksum_types_supported_client(void)
 {
-	cksum_type_t ret = OBD_CKSUM_ADLER;
+	enum cksum_type ret = OBD_CKSUM_ADLER;
 
 	CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
 	       cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
@@ -139,14 +140,16 @@
  * Currently, calling cksum_type_pack() with a mask will return the fastest
  * checksum type due to its benchmarking at libcfs module load.
  * Caution is advised, however, since what is fastest on a single client may
- * not be the fastest or most efficient algorithm on the server.  */
-static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types)
+ * not be the fastest or most efficient algorithm on the server.
+ */
+static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types)
 {
 	return cksum_type_unpack(cksum_type_pack(cksum_types));
 }
 
 /* Checksum algorithm names. Must be defined in the same order as the
- * OBD_CKSUM_* flags. */
+ * OBD_CKSUM_* flags.
+ */
 #define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"}
 
 #endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 97d8039..8515218 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -45,18 +45,22 @@
 #include "lprocfs_status.h"
 
 #define OBD_STATFS_NODELAY      0x0001  /* requests should be send without delay
-					 * and resends for avoid deadlocks */
+					 * and resends for avoid deadlocks
+					 */
 #define OBD_STATFS_FROM_CACHE   0x0002  /* the statfs callback should not update
-					 * obd_osfs_age */
+					 * obd_osfs_age
+					 */
 #define OBD_STATFS_PTLRPCD      0x0004  /* requests will be sent via ptlrpcd
 					 * instead of a specific set. This
 					 * means that we cannot rely on the set
 					 * interpret routine to be called.
 					 * lov_statfs_fini() must thus be called
-					 * by the request interpret routine */
+					 * by the request interpret routine
+					 */
 #define OBD_STATFS_FOR_MDT0	0x0008	/* The statfs is only for retrieving
-					 * information from MDT0. */
-#define OBD_FL_PUNCH    0x00000001      /* To indicate it is punch operation */
+					 * information from MDT0.
+					 */
+#define OBD_FL_PUNCH	    0x00000001  /* To indicate it is punch operation */
 
 /* OBD Device Declarations */
 extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
@@ -160,8 +164,9 @@
 	struct mutex		    cld_lock;
 	int			 cld_type;
 	unsigned int		cld_stopping:1, /* we were told to stop
-						     * watching */
-				    cld_lostlock:1; /* lock not requeued */
+						 * watching
+						 */
+				cld_lostlock:1; /* lock not requeued */
 	char			cld_logname[0];
 };
 
@@ -275,7 +280,8 @@
 #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op
 
 /* Ensure obd_setup: used for cleanup which must be called
-   while obd is stopping */
+ * while obd is stopping
+ */
 static inline int obd_check_dev(struct obd_device *obd)
 {
 	if (!obd) {
@@ -306,7 +312,7 @@
 	 / sizeof(((struct obd_ops *)(0))->iocontrol))
 
 #define OBD_COUNTER_INCREMENT(obdx, op)			   \
-	if ((obdx)->obd_stats != NULL) {			  \
+	if ((obdx)->obd_stats) {				  \
 		unsigned int coffset;			     \
 		coffset = (unsigned int)((obdx)->obd_cntr_base) + \
 			OBD_COUNTER_OFFSET(op);		   \
@@ -315,7 +321,7 @@
 	}
 
 #define EXP_COUNTER_INCREMENT(export, op)				    \
-	if ((export)->exp_obd->obd_stats != NULL) {			  \
+	if ((export)->exp_obd->obd_stats) {			  \
 		unsigned int coffset;					\
 		coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \
 			OBD_COUNTER_OFFSET(op);			      \
@@ -329,7 +335,7 @@
 	 / sizeof(((struct md_ops *)(0))->getstatus))
 
 #define MD_COUNTER_INCREMENT(obdx, op)			   \
-	if ((obd)->md_stats != NULL) {			   \
+	if ((obd)->md_stats) {			   \
 		unsigned int coffset;			    \
 		coffset = (unsigned int)((obdx)->md_cntr_base) + \
 			MD_COUNTER_OFFSET(op);		   \
@@ -338,24 +344,24 @@
 	}
 
 #define EXP_MD_COUNTER_INCREMENT(export, op)				 \
-	if ((export)->exp_obd->obd_stats != NULL) {			  \
+	if ((export)->exp_obd->obd_stats) {				  \
 		unsigned int coffset;					\
 		coffset = (unsigned int)((export)->exp_obd->md_cntr_base) +  \
 			MD_COUNTER_OFFSET(op);			       \
 		LASSERT(coffset < (export)->exp_obd->md_stats->ls_num);      \
 		lprocfs_counter_incr((export)->exp_obd->md_stats, coffset);  \
-		if ((export)->exp_md_stats != NULL)			  \
+		if ((export)->exp_md_stats)				  \
 			lprocfs_counter_incr(				\
 				(export)->exp_md_stats, coffset);	    \
 	}
 
 #define EXP_CHECK_MD_OP(exp, op)				\
 do {							    \
-	if ((exp) == NULL) {				    \
+	if (!(exp)) {				    \
 		CERROR("obd_" #op ": NULL export\n");	   \
 		return -ENODEV;				\
 	}						       \
-	if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) {   \
+	if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) {   \
 		CERROR("obd_" #op ": cleaned up obd\n");	\
 		return -EOPNOTSUPP;			    \
 	}						       \
@@ -379,11 +385,11 @@
 
 #define EXP_CHECK_DT_OP(exp, op)				\
 do {							    \
-	if ((exp) == NULL) {				    \
+	if (!(exp)) {				    \
 		CERROR("obd_" #op ": NULL export\n");	   \
 		return -ENODEV;				\
 	}						       \
-	if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) {   \
+	if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) {   \
 		CERROR("obd_" #op ": cleaned up obd\n");	\
 		return -EOPNOTSUPP;			    \
 	}						       \
@@ -467,7 +473,7 @@
 	DECLARE_LU_VARS(ldt, d);
 
 	ldt = obd->obd_type->typ_lu;
-	if (ldt != NULL) {
+	if (ldt) {
 		struct lu_context  session_ctx;
 		struct lu_env env;
 
@@ -509,7 +515,7 @@
 		return rc;
 	ldt = obd->obd_type->typ_lu;
 	d = obd->obd_lu_dev;
-	if (ldt != NULL && d != NULL) {
+	if (ldt && d) {
 		if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
 			struct lu_env env;
 
@@ -538,7 +544,7 @@
 
 	ldt = obd->obd_type->typ_lu;
 	d = obd->obd_lu_dev;
-	if (ldt != NULL && d != NULL) {
+	if (ldt && d) {
 		struct lu_env env;
 
 		rc = lu_env_init(&env, ldt->ldt_ctx_tags);
@@ -558,7 +564,8 @@
 static inline void obd_cleanup_client_import(struct obd_device *obd)
 {
 	/* If we set up but never connected, the
-	   client import will not have been cleaned. */
+	 * client import will not have been cleaned.
+	 */
 	down_write(&obd->u.cli.cl_sem);
 	if (obd->u.cli.cl_import) {
 		struct obd_import *imp;
@@ -586,7 +593,7 @@
 	obd->obd_process_conf = 1;
 	ldt = obd->obd_type->typ_lu;
 	d = obd->obd_lu_dev;
-	if (ldt != NULL && d != NULL) {
+	if (ldt && d) {
 		struct lu_env env;
 
 		rc = lu_env_init(&env, ldt->ldt_ctx_tags);
@@ -674,7 +681,7 @@
 				  struct lov_stripe_md **mem_tgt)
 {
 	LASSERT(mem_tgt);
-	LASSERT(*mem_tgt == NULL);
+	LASSERT(!*mem_tgt);
 	return obd_unpackmd(exp, mem_tgt, NULL, 0);
 }
 
@@ -767,7 +774,7 @@
 	EXP_COUNTER_INCREMENT(exp, setattr_async);
 
 	set =  ptlrpc_prep_set();
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 
 	rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
@@ -778,7 +785,8 @@
 }
 
 /* This adds all the requests into @set if @set != NULL, otherwise
-   all requests are sent asynchronously without waiting for response. */
+ * all requests are sent asynchronously without waiting for response.
+ */
 static inline int obd_setattr_async(struct obd_export *exp,
 				    struct obd_info *oinfo,
 				    struct obd_trans_info *oti,
@@ -848,7 +856,8 @@
 {
 	int rc;
 	__u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
-						   * check */
+							 * check
+							 */
 
 	rc = obd_check_dev_active(obd);
 	if (rc)
@@ -858,7 +867,7 @@
 
 	rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata);
 	/* check that only subset is granted */
-	LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) ==
+	LASSERT(ergo(data, (data->ocd_connect_flags & ocf) ==
 				    data->ocd_connect_flags));
 	return rc;
 }
@@ -871,8 +880,7 @@
 				void *localdata)
 {
 	int rc;
-	__u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition
-						   * check */
+	__u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */
 
 	rc = obd_check_dev_active(obd);
 	if (rc)
@@ -882,8 +890,7 @@
 
 	rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata);
 	/* check that only subset is granted */
-	LASSERT(ergo(d != NULL,
-		     (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
+	LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
 	return rc;
 }
 
@@ -998,7 +1005,7 @@
 {
 	int rc = 0;
 
-	if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
+	if ((exp)->exp_obd && OBT((exp)->exp_obd) &&
 	    OBP((exp)->exp_obd, init_export))
 		rc = OBP(exp->exp_obd, init_export)(exp);
 	return rc;
@@ -1006,7 +1013,7 @@
 
 static inline int obd_destroy_export(struct obd_export *exp)
 {
-	if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
+	if ((exp)->exp_obd && OBT((exp)->exp_obd) &&
 	    OBP((exp)->exp_obd, destroy_export))
 		OBP(exp->exp_obd, destroy_export)(exp);
 	return 0;
@@ -1014,7 +1021,8 @@
 
 /* @max_age is the oldest time in jiffies that we accept using a cached data.
  * If the cache is older than @max_age we will get a new value from the
- * target.  Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
+ * target.  Use a value of "cfs_time_current() + HZ" to guarantee freshness.
+ */
 static inline int obd_statfs_async(struct obd_export *exp,
 				   struct obd_info *oinfo,
 				   __u64 max_age,
@@ -1023,7 +1031,7 @@
 	int rc = 0;
 	struct obd_device *obd;
 
-	if (exp == NULL || exp->exp_obd == NULL)
+	if (!exp || !exp->exp_obd)
 		return -EINVAL;
 
 	obd = exp->exp_obd;
@@ -1059,7 +1067,7 @@
 	int rc = 0;
 
 	set =  ptlrpc_prep_set();
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 
 	oinfo.oi_osfs = osfs;
@@ -1073,7 +1081,8 @@
 
 /* @max_age is the oldest time in jiffies that we accept using a cached data.
  * If the cache is older than @max_age we will get a new value from the
- * target.  Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
+ * target.  Use a value of "cfs_time_current() + HZ" to guarantee freshness.
+ */
 static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
 			     struct obd_statfs *osfs, __u64 max_age,
 			     __u32 flags)
@@ -1081,7 +1090,7 @@
 	int rc = 0;
 	struct obd_device *obd = exp->exp_obd;
 
-	if (obd == NULL)
+	if (!obd)
 		return -EINVAL;
 
 	OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
@@ -1155,7 +1164,7 @@
 }
 
 static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
-				int len, void *karg, void *uarg)
+				int len, void *karg, void __user *uarg)
 {
 	int rc;
 
@@ -1205,9 +1214,10 @@
 		return rc;
 
 	/* the check for async_recov is a complete hack - I'm hereby
-	   overloading the meaning to also mean "this was called from
-	   mds_postsetup".  I know that my mds is able to handle notifies
-	   by this point, and it needs to get them to execute mds_postrecov. */
+	 * overloading the meaning to also mean "this was called from
+	 * mds_postsetup".  I know that my mds is able to handle notifies
+	 * by this point, and it needs to get them to execute mds_postrecov.
+	 */
 	if (!obd->obd_set_up && !obd->obd_async_recov) {
 		CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
 		return -EINVAL;
@@ -1241,7 +1251,7 @@
 	 * Also, call non-obd listener, if any
 	 */
 	onu = &observer->obd_upcall;
-	if (onu->onu_upcall != NULL)
+	if (onu->onu_upcall)
 		rc2 = onu->onu_upcall(observer, observed, ev,
 				      onu->onu_owner, NULL);
 	else
@@ -1287,7 +1297,7 @@
 	int rc;
 
 	/* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
-	if (obd == NULL || !OBT(obd)) {
+	if (!obd || !OBT(obd)) {
 		CERROR("cleaned up obd\n");
 		return -EOPNOTSUPP;
 	}
@@ -1318,57 +1328,6 @@
 	return 0;
 }
 
-#if 0
-static inline int obd_register_page_removal_cb(struct obd_export *exp,
-					       obd_page_removal_cb_t cb,
-					       obd_pin_extent_cb pin_cb)
-{
-	int rc;
-
-	OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0);
-	OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb);
-
-	rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb);
-	return rc;
-}
-
-static inline int obd_unregister_page_removal_cb(struct obd_export *exp,
-						 obd_page_removal_cb_t cb)
-{
-	int rc;
-
-	OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0);
-	OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb);
-
-	rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb);
-	return rc;
-}
-
-static inline int obd_register_lock_cancel_cb(struct obd_export *exp,
-					      obd_lock_cancel_cb cb)
-{
-	int rc;
-
-	OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0);
-	OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb);
-
-	rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb);
-	return rc;
-}
-
-static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp,
-						 obd_lock_cancel_cb cb)
-{
-	int rc;
-
-	OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0);
-	OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb);
-
-	rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb);
-	return rc;
-}
-#endif
-
 /* metadata helpers */
 static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid)
 {
@@ -1657,8 +1616,8 @@
 static inline int md_cancel_unused(struct obd_export *exp,
 				   const struct lu_fid *fid,
 				   ldlm_policy_data_t *policy,
-				   ldlm_mode_t mode,
-				   ldlm_cancel_flags_t flags,
+				   enum ldlm_mode mode,
+				   enum ldlm_cancel_flags flags,
 				   void *opaque)
 {
 	int rc;
@@ -1671,12 +1630,12 @@
 	return rc;
 }
 
-static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
-					const struct lu_fid *fid,
-					ldlm_type_t type,
-					ldlm_policy_data_t *policy,
-					ldlm_mode_t mode,
-					struct lustre_handle *lockh)
+static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
+					   const struct lu_fid *fid,
+					   enum ldlm_type type,
+					   ldlm_policy_data_t *policy,
+					   enum ldlm_mode mode,
+					   struct lustre_handle *lockh)
 {
 	EXP_CHECK_MD_OP(exp, lock_match);
 	EXP_MD_COUNTER_INCREMENT(exp, lock_match);
@@ -1759,7 +1718,8 @@
 /* I'm as embarrassed about this as you are.
  *
  * <shaver> // XXX do not look into _superhack with remaining eye
- * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */
+ * <shaver> // XXX if this were any uglier, I'd get my own show on MTV
+ */
 extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
 
 /* obd_mount.c */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index d031437..225262fa 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -47,7 +47,8 @@
 extern unsigned int obd_dump_on_timeout;
 extern unsigned int obd_dump_on_eviction;
 /* obd_timeout should only be used for recovery, not for
-   networking / disk / timings affected by load (use Adaptive Timeouts) */
+ * networking / disk / timings affected by load (use Adaptive Timeouts)
+ */
 extern unsigned int obd_timeout;	  /* seconds */
 extern unsigned int obd_timeout_set;
 extern unsigned int at_min;
@@ -104,18 +105,21 @@
  * failover targets the client only pings one server at a time, and pings
  * can be lost on a loaded network. Since eviction has serious consequences,
  * and there's no urgent need to evict a client just because it's idle, we
- * should be very conservative here. */
+ * should be very conservative here.
+ */
 #define PING_EVICT_TIMEOUT (PING_INTERVAL * 6)
 #define DISK_TIMEOUT 50	  /* Beyond this we warn about disk speed */
 #define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */
- /* Max connect interval for nonresponsive servers; ~50s to avoid building up
-    connect requests in the LND queues, but within obd_timeout so we don't
-    miss the recovery window */
+/* Max connect interval for nonresponsive servers; ~50s to avoid building up
+ * connect requests in the LND queues, but within obd_timeout so we don't
+ * miss the recovery window
+ */
 #define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout))
 #define CONNECTION_SWITCH_INC 5  /* Connection timeout backoff */
 /* In general this should be low to have quick detection of a system
-   running on a backup server. (If it's too low, import_select_connection
-   will increase the timeout anyhow.)  */
+ * running on a backup server. (If it's too low, import_select_connection
+ * will increase the timeout anyhow.)
+ */
 #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20)
 /* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
 #define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
@@ -507,7 +511,6 @@
 do {									      \
 	struct portals_handle *__h = (handle);				      \
 									      \
-	LASSERT(handle != NULL);					      \
 	__h->h_cookie = (unsigned long)(ptr);				      \
 	__h->h_size = (size);						      \
 	call_rcu(&__h->h_rcu, class_handle_free_cb);			      \
diff --git a/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h
new file mode 100644
index 0000000..5e99836
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h
@@ -0,0 +1,94 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ *
+ * Kernel <-> userspace communication routines.
+ * The definitions below are used in the kernel and userspace.
+ */
+
+#ifndef __UAPI_KERNELCOMM_H__
+#define __UAPI_KERNELCOMM_H__
+
+#include <linux/types.h>
+
+/* KUC message header.
+ * All current and future KUC messages should use this header.
+ * To avoid having to include Lustre headers from libcfs, define this here.
+ */
+struct kuc_hdr {
+	__u16 kuc_magic;
+	/* Each new Lustre feature should use a different transport */
+	__u8  kuc_transport;
+	__u8  kuc_flags;
+	/* Message type or opcode, transport-specific */
+	__u16 kuc_msgtype;
+	/* Including header */
+	__u16 kuc_msglen;
+} __aligned(sizeof(__u64));
+
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE)
+
+#define KUC_MAGIC		0x191C /*Lustre9etLinC */
+
+/* kuc_msgtype values are defined in each transport */
+enum kuc_transport_type {
+	KUC_TRANSPORT_GENERIC	= 1,
+	KUC_TRANSPORT_HSM	= 2,
+	KUC_TRANSPORT_CHANGELOG	= 3,
+};
+
+enum kuc_generic_message_type {
+	KUC_MSG_SHUTDOWN	= 1,
+};
+
+/* KUC Broadcast Groups. This determines which userspace process hears which
+ * messages.  Mutliple transports may be used within a group, or multiple
+ * groups may use the same transport.  Broadcast
+ * groups need not be used if e.g. a UID is specified instead;
+ * use group 0 to signify unicast.
+ */
+#define KUC_GRP_HSM	0x02
+#define KUC_GRP_MAX	KUC_GRP_HSM
+
+#define LK_FLG_STOP 0x01
+#define LK_NOFD -1U
+
+/* kernelcomm control structure, passed from userspace to kernel */
+struct lustre_kernelcomm {
+	__u32 lk_wfd;
+	__u32 lk_rfd;
+	__u32 lk_uid;
+	__u32 lk_group;
+	__u32 lk_data;
+	__u32 lk_flags;
+} __packed;
+
+#endif	/* __UAPI_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c
index 8533a1e5..c4e8a08 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/lclient/glimpse.c
@@ -109,7 +109,8 @@
 			 *       if there were no conflicting locks. If there
 			 *       were conflicting locks, enqueuing or waiting
 			 *       fails with -ENAVAIL, but valid inode
-			 *       attributes are returned anyway. */
+			 *       attributes are returned anyway.
+			 */
 			*descr = whole_file;
 			descr->cld_obj   = clob;
 			descr->cld_mode  = CLM_PHANTOM;
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 34dde7d..30651f2 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -117,7 +117,7 @@
 	struct ccc_thread_info *info;
 
 	info = kmem_cache_alloc(ccc_thread_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info == NULL)
+	if (!info)
 		info = ERR_PTR(-ENOMEM);
 	return info;
 }
@@ -136,7 +136,7 @@
 	struct ccc_session *session;
 
 	session = kmem_cache_alloc(ccc_session_kmem, GFP_NOFS | __GFP_ZERO);
-	if (session == NULL)
+	if (!session)
 		session = ERR_PTR(-ENOMEM);
 	return session;
 }
@@ -173,7 +173,7 @@
 	vdv = lu2ccc_dev(d);
 	vdv->cdv_next = lu2cl_dev(next);
 
-	LASSERT(d->ld_site != NULL && next->ld_type != NULL);
+	LASSERT(d->ld_site && next->ld_type);
 	next->ld_site = d->ld_site;
 	rc = next->ld_type->ldt_ops->ldto_device_init(
 			env, next, next->ld_type->ldt_name, NULL);
@@ -211,12 +211,12 @@
 	vdv->cdv_cl.cd_ops = clops;
 
 	site = kzalloc(sizeof(*site), GFP_NOFS);
-	if (site != NULL) {
+	if (site) {
 		rc = cl_site_init(site, &vdv->cdv_cl);
 		if (rc == 0)
 			rc = lu_site_init_finish(&site->cs_lu);
 		else {
-			LASSERT(lud->ld_site == NULL);
+			LASSERT(!lud->ld_site);
 			CERROR("Cannot init lu_site, rc %d.\n", rc);
 			kfree(site);
 		}
@@ -236,7 +236,7 @@
 	struct cl_site    *site = lu2cl_site(d->ld_site);
 	struct lu_device  *next = cl2lu_dev(vdv->cdv_next);
 
-	if (d->ld_site != NULL) {
+	if (d->ld_site) {
 		cl_site_fini(site);
 		kfree(site);
 	}
@@ -252,7 +252,7 @@
 	int result;
 
 	vrq = kmem_cache_alloc(ccc_req_kmem, GFP_NOFS | __GFP_ZERO);
-	if (vrq != NULL) {
+	if (vrq) {
 		cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
 		result = 0;
 	} else
@@ -304,7 +304,7 @@
 
 void ccc_global_fini(struct lu_device_type *device_type)
 {
-	if (ccc_inode_fini_env != NULL) {
+	if (ccc_inode_fini_env) {
 		cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
 		ccc_inode_fini_env = NULL;
 	}
@@ -328,7 +328,7 @@
 	struct lu_object  *obj;
 
 	vob = kmem_cache_alloc(ccc_object_kmem, GFP_NOFS | __GFP_ZERO);
-	if (vob != NULL) {
+	if (vob) {
 		struct cl_object_header *hdr;
 
 		obj = ccc2lu(vob);
@@ -365,7 +365,7 @@
 
 	under = &dev->cdv_next->cd_lu_dev;
 	below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
-	if (below != NULL) {
+	if (below) {
 		const struct cl_object_conf *cconf;
 
 		cconf = lu2cl_conf(conf);
@@ -397,7 +397,7 @@
 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
 	clk = kmem_cache_alloc(ccc_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (clk != NULL) {
+	if (clk) {
 		cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
 		result = 0;
 	} else
@@ -613,7 +613,8 @@
 		 * stale i_size when doing appending writes and effectively
 		 * cancel the result of the truncate.  Getting the
 		 * ll_inode_size_lock() after the enqueue maintains the DLM
-		 * -> ll_inode_size_lock() acquiring order. */
+		 * -> ll_inode_size_lock() acquiring order.
+		 */
 		if (lock->cll_descr.cld_start == 0 &&
 		    lock->cll_descr.cld_end == CL_PAGE_EOF)
 			cl_merge_lvb(env, inode);
@@ -660,7 +661,7 @@
 {
 	size_t size = io->u.ci_rw.crw_count;
 
-	if (!cl_is_normalio(env, io) || cio->cui_iter == NULL)
+	if (!cl_is_normalio(env, io) || !cio->cui_iter)
 		return;
 
 	iov_iter_truncate(cio->cui_iter, size);
@@ -749,12 +750,13 @@
 			 */
 			ccc_object_size_unlock(obj);
 			result = cl_glimpse_lock(env, io, inode, obj, 0);
-			if (result == 0 && exceed != NULL) {
+			if (result == 0 && exceed) {
 				/* If objective page index exceed end-of-file
 				 * page index, return directly. Do not expect
 				 * kernel will check such case correctly.
 				 * linux-2.6.18-128.1.1 miss to do that.
-				 * --bug 17336 */
+				 * --bug 17336
+				 */
 				loff_t size = cl_isize_read(inode);
 				loff_t cur_index = start >> PAGE_CACHE_SHIFT;
 				loff_t size_index = (size - 1) >>
@@ -884,7 +886,8 @@
 
 		if (attr->ia_valid & ATTR_FILE)
 			/* populate the file descriptor for ftruncate to honor
-			 * group lock - see LU-787 */
+			 * group lock - see LU-787
+			 */
 			cio->cui_fd = cl_iattr2fd(inode, attr);
 
 		result = cl_io_loop(env, io);
@@ -896,7 +899,8 @@
 		goto again;
 	/* HSM import case: file is released, cannot be restored
 	 * no need to fail except if restore registration failed
-	 * with -ENODATA */
+	 * with -ENODATA
+	 */
 	if (result == -ENODATA && io->ci_restore_needed &&
 	    io->ci_result != -ENODATA)
 		result = 0;
@@ -986,17 +990,6 @@
 }
 
 /**
- * Returns a pointer to cl_page associated with \a vmpage, without acquiring
- * additional reference to the resulting page. This is an unsafe version of
- * cl_vmpage_page() that can only be used under vmpage lock.
- */
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
-{
-	KLASSERT(PageLocked(vmpage));
-	return (struct cl_page *)vmpage->private;
-}
-
-/**
  * Initialize or update CLIO structures for regular files when new
  * meta-data arrives from the server.
  *
@@ -1033,11 +1026,12 @@
 	fid  = &lli->lli_fid;
 	LASSERT(fid_is_sane(fid));
 
-	if (lli->lli_clob == NULL) {
+	if (!lli->lli_clob) {
 		/* clob is slave of inode, empty lli_clob means for new inode,
 		 * there is no clob in cache with the given fid, so it is
 		 * unnecessary to perform lookup-alloc-lookup-insert, just
-		 * alloc and insert directly. */
+		 * alloc and insert directly.
+		 */
 		LASSERT(inode->i_state & I_NEW);
 		conf.coc_lu.loc_flags = LOC_F_NEW;
 		clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
@@ -1109,7 +1103,7 @@
 	int refcheck;
 	int emergency;
 
-	if (clob != NULL) {
+	if (clob) {
 		void		    *cookie;
 
 		cookie = cl_env_reenter();
@@ -1117,7 +1111,7 @@
 		emergency = IS_ERR(env);
 		if (emergency) {
 			mutex_lock(&ccc_inode_fini_guard);
-			LASSERT(ccc_inode_fini_env != NULL);
+			LASSERT(ccc_inode_fini_env);
 			cl_env_implant(ccc_inode_fini_env, &refcheck);
 			env = ccc_inode_fini_env;
 		}
@@ -1162,7 +1156,8 @@
 }
 
 /**
- * build inode number from passed @fid */
+ * build inode number from passed @fid
+ */
 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
 {
 	if (BITS_PER_LONG == 32 || api32)
@@ -1173,7 +1168,8 @@
 
 /**
  * build inode generation from passed @fid.  If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them. */
+ * inode number then return a non-zero generation to distinguish them.
+ */
 __u32 cl_fid_build_gen(const struct lu_fid *fid)
 {
 	__u32 gen;
@@ -1194,7 +1190,8 @@
  * have to wait for the refcount to become zero to destroy the older layout.
  *
  * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT. */
+ * inside layout lock - MDS_INODELOCK_LAYOUT.
+ */
 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
 {
 	return lov_lsm_get(cl_i2info(inode)->lli_clob);
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
index 8389a0e..d80bcedd 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
@@ -48,7 +48,8 @@
 /* Initialize the default and maximum LOV EA and cookie sizes.  This allows
  * us to make MDS RPCs with large enough reply buffers to hold the
  * maximum-sized (= maximum striped) EA and cookie without having to
- * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
+ * calculate this (via a call into the LOV + OSCs) each time we make an RPC.
+ */
 int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
 {
 	struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
@@ -74,7 +75,8 @@
 	cookiesize = stripes * sizeof(struct llog_cookie);
 
 	/* default cookiesize is 0 because from 2.4 server doesn't send
-	 * llog cookies to client. */
+	 * llog cookies to client.
+	 */
 	CDEBUG(D_HA,
 	       "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
 	       def_easize, easize, cookiesize);
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index a2ea8e5..3230606 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -49,13 +49,11 @@
 
 static inline int node_is_left_child(struct interval_node *node)
 {
-	LASSERT(node->in_parent != NULL);
 	return node == node->in_parent->in_left;
 }
 
 static inline int node_is_right_child(struct interval_node *node)
 {
-	LASSERT(node->in_parent != NULL);
 	return node == node->in_parent->in_right;
 }
 
@@ -135,7 +133,8 @@
 
 /* The left rotation "pivots" around the link from node to node->right, and
  * - node will be linked to node->right's left child, and
- * - node->right's left child will be linked to node's right child.  */
+ * - node->right's left child will be linked to node's right child.
+ */
 static void __rotate_left(struct interval_node *node,
 			  struct interval_node **root)
 {
@@ -164,7 +163,8 @@
 
 /* The right rotation "pivots" around the link from node to node->left, and
  * - node will be linked to node->left's right child, and
- * - node->left's right child will be linked to node's left child.  */
+ * - node->left's right child will be linked to node's left child.
+ */
 static void __rotate_right(struct interval_node *node,
 			   struct interval_node **root)
 {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 9c70f31..222cb62 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -62,7 +62,8 @@
  * is the "highest lock".  This function returns the new KMS value.
  * Caller must hold lr_lock already.
  *
- * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
+ * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
+ */
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 {
 	struct ldlm_resource *res = lock->l_resource;
@@ -72,7 +73,8 @@
 
 	/* don't let another thread in ldlm_extent_shift_kms race in
 	 * just after we finish and take our lock into account in its
-	 * calculation of the kms */
+	 * calculation of the kms
+	 */
 	lock->l_flags |= LDLM_FL_KMS_IGNORE;
 
 	list_for_each(tmp, &res->lr_granted) {
@@ -85,7 +87,8 @@
 			return old_kms;
 
 		/* This extent _has_ to be smaller than old_kms (checked above)
-		 * so kms can only ever be smaller or the same as old_kms. */
+		 * so kms can only ever be smaller or the same as old_kms.
+		 */
 		if (lck->l_policy_data.l_extent.end + 1 > kms)
 			kms = lck->l_policy_data.l_extent.end + 1;
 	}
@@ -113,7 +116,7 @@
 
 	LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
 	node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO);
-	if (node == NULL)
+	if (!node)
 		return NULL;
 
 	INIT_LIST_HEAD(&node->li_group);
@@ -134,7 +137,7 @@
 {
 	struct ldlm_interval *n = l->l_tree_node;
 
-	if (n == NULL)
+	if (!n)
 		return NULL;
 
 	LASSERT(!list_empty(&n->li_group));
@@ -144,7 +147,7 @@
 	return list_empty(&n->li_group) ? n : NULL;
 }
 
-static inline int lock_mode_to_index(ldlm_mode_t mode)
+static inline int lock_mode_to_index(enum ldlm_mode mode)
 {
 	int index;
 
@@ -168,7 +171,7 @@
 	LASSERT(lock->l_granted_mode == lock->l_req_mode);
 
 	node = lock->l_tree_node;
-	LASSERT(node != NULL);
+	LASSERT(node);
 	LASSERT(!interval_is_intree(&node->li_node));
 
 	idx = lock_mode_to_index(lock->l_granted_mode);
@@ -185,14 +188,14 @@
 		struct ldlm_interval *tmp;
 
 		tmp = ldlm_interval_detach(lock);
-		LASSERT(tmp != NULL);
 		ldlm_interval_free(tmp);
 		ldlm_interval_attach(to_ldlm_interval(found), lock);
 	}
 	res->lr_itree[idx].lit_size++;
 
 	/* even though we use interval tree to manage the extent lock, we also
-	 * add the locks into grant list, for debug purpose, .. */
+	 * add the locks into grant list, for debug purpose, ..
+	 */
 	ldlm_resource_add_lock(res, &res->lr_granted, lock);
 }
 
@@ -211,7 +214,7 @@
 	LASSERT(lock->l_granted_mode == 1 << idx);
 	tree = &res->lr_itree[idx];
 
-	LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
+	LASSERT(tree->lit_root); /* assure the tree is not null */
 
 	tree->lit_size--;
 	node = ldlm_interval_detach(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 4310154..b88b786 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -92,7 +92,7 @@
 }
 
 static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
 {
 	LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
 		   mode, flags);
@@ -107,7 +107,8 @@
 		lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
 
 		/* when reaching here, it is under lock_res_and_lock(). Thus,
-		   need call the nolock version of ldlm_lock_decref_internal*/
+		 * need call the nolock version of ldlm_lock_decref_internal
+		 */
 		ldlm_lock_decref_internal_nolock(lock, mode);
 	}
 
@@ -133,7 +134,7 @@
  *     would be collected and ASTs sent.
  */
 static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
-				   int first_enq, ldlm_error_t *err,
+				   int first_enq, enum ldlm_error *err,
 				   struct list_head *work_list)
 {
 	struct ldlm_resource *res = req->l_resource;
@@ -143,7 +144,7 @@
 	struct ldlm_lock *lock = NULL;
 	struct ldlm_lock *new = req;
 	struct ldlm_lock *new2 = NULL;
-	ldlm_mode_t mode = req->l_req_mode;
+	enum ldlm_mode mode = req->l_req_mode;
 	int added = (mode == LCK_NL);
 	int overlaps = 0;
 	int splitted = 0;
@@ -159,13 +160,15 @@
 	*err = ELDLM_OK;
 
 	/* No blocking ASTs are sent to the clients for
-	 * Posix file & record locks */
+	 * Posix file & record locks
+	 */
 	req->l_blocking_ast = NULL;
 
 reprocess:
 	if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
 		/* This loop determines where this processes locks start
-		 * in the resource lr_granted list. */
+		 * in the resource lr_granted list.
+		 */
 		list_for_each(tmp, &res->lr_granted) {
 			lock = list_entry(tmp, struct ldlm_lock,
 					      l_res_link);
@@ -180,7 +183,8 @@
 		lockmode_verify(mode);
 
 		/* This loop determines if there are existing locks
-		 * that conflict with the new lock request. */
+		 * that conflict with the new lock request.
+		 */
 		list_for_each(tmp, &res->lr_granted) {
 			lock = list_entry(tmp, struct ldlm_lock,
 					      l_res_link);
@@ -238,8 +242,8 @@
 	}
 
 	/* Scan the locks owned by this process that overlap this request.
-	 * We may have to merge or split existing locks. */
-
+	 * We may have to merge or split existing locks.
+	 */
 	if (!ownlocks)
 		ownlocks = &res->lr_granted;
 
@@ -253,7 +257,8 @@
 			/* If the modes are the same then we need to process
 			 * locks that overlap OR adjoin the new lock. The extra
 			 * logic condition is necessary to deal with arithmetic
-			 * overflow and underflow. */
+			 * overflow and underflow.
+			 */
 			if ((new->l_policy_data.l_flock.start >
 			     (lock->l_policy_data.l_flock.end + 1))
 			    && (lock->l_policy_data.l_flock.end !=
@@ -327,11 +332,13 @@
 		 * with the request but this would complicate the reply
 		 * processing since updates to req get reflected in the
 		 * reply. The client side replays the lock request so
-		 * it must see the original lock data in the reply. */
+		 * it must see the original lock data in the reply.
+		 */
 
 		/* XXX - if ldlm_lock_new() can sleep we should
 		 * release the lr_lock, allocate the new lock,
-		 * and restart processing this lock. */
+		 * and restart processing this lock.
+		 */
 		if (!new2) {
 			unlock_res_and_lock(req);
 			new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
@@ -361,7 +368,7 @@
 		lock->l_policy_data.l_flock.start =
 			new->l_policy_data.l_flock.end + 1;
 		new2->l_conn_export = lock->l_conn_export;
-		if (lock->l_export != NULL) {
+		if (lock->l_export) {
 			new2->l_export = class_export_lock_get(lock->l_export,
 							       new2);
 			if (new2->l_export->exp_lock_hash &&
@@ -381,7 +388,7 @@
 	}
 
 	/* if new2 is created but never used, destroy it*/
-	if (splitted == 0 && new2 != NULL)
+	if (splitted == 0 && new2)
 		ldlm_lock_destroy_nolock(new2);
 
 	/* At this point we're granting the lock request. */
@@ -396,7 +403,8 @@
 	if (*flags != LDLM_FL_WAIT_NOREPROC) {
 		/* The only one possible case for client-side calls flock
 		 * policy function is ldlm_flock_completion_ast inside which
-		 * carries LDLM_FL_WAIT_NOREPROC flag. */
+		 * carries LDLM_FL_WAIT_NOREPROC flag.
+		 */
 		CERROR("Illegal parameter for client-side-only module.\n");
 		LBUG();
 	}
@@ -404,7 +412,8 @@
 	/* In case we're reprocessing the requested lock we can't destroy
 	 * it until after calling ldlm_add_ast_work_item() above so that laawi()
 	 * can bump the reference count on \a req. Otherwise \a req
-	 * could be freed before the completion AST can be sent.  */
+	 * could be freed before the completion AST can be sent.
+	 */
 	if (added)
 		ldlm_flock_destroy(req, mode, *flags);
 
@@ -449,7 +458,7 @@
 	struct obd_import	      *imp = NULL;
 	struct ldlm_flock_wait_data     fwd;
 	struct l_wait_info	      lwi;
-	ldlm_error_t		    err;
+	enum ldlm_error		    err;
 	int			     rc = 0;
 
 	CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
@@ -458,12 +467,12 @@
 	/* Import invalidation. We need to actually release the lock
 	 * references being held, so that it can go away. No point in
 	 * holding the lock even if app still believes it has it, since
-	 * server already dropped it anyway. Only for granted locks too. */
+	 * server already dropped it anyway. Only for granted locks too.
+	 */
 	if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
 	    (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
 		if (lock->l_req_mode == lock->l_granted_mode &&
-		    lock->l_granted_mode != LCK_NL &&
-		    data == NULL)
+		    lock->l_granted_mode != LCK_NL && !data)
 			ldlm_lock_decref_internal(lock, lock->l_req_mode);
 
 		/* Need to wake up the waiter if we were evicted */
@@ -475,7 +484,7 @@
 
 	if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
 		       LDLM_FL_BLOCK_CONV))) {
-		if (data == NULL)
+		if (!data)
 			/* mds granted the lock in the reply */
 			goto granted;
 		/* CP AST RPC: lock get granted, wake it up */
@@ -488,10 +497,10 @@
 	obd = class_exp2obd(lock->l_conn_export);
 
 	/* if this is a local lock, there is no import */
-	if (obd != NULL)
+	if (obd)
 		imp = obd->u.cli.cl_import;
 
-	if (imp != NULL) {
+	if (imp) {
 		spin_lock(&imp->imp_lock);
 		fwd.fwd_generation = imp->imp_generation;
 		spin_unlock(&imp->imp_lock);
@@ -540,7 +549,8 @@
 	} else if (flags & LDLM_FL_TEST_LOCK) {
 		/* fcntl(F_GETLK) request */
 		/* The old mode was saved in getlk->fl_type so that if the mode
-		 * in the lock changes we can decref the appropriate refcount.*/
+		 * in the lock changes we can decref the appropriate refcount.
+		 */
 		ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
 		switch (lock->l_granted_mode) {
 		case LCK_PR:
@@ -559,7 +569,8 @@
 		__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
 
 		/* We need to reprocess the lock to do merges or splits
-		 * with existing locks owned by this process. */
+		 * with existing locks owned by this process.
+		 */
 		ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
 	}
 	unlock_res_and_lock(lock);
@@ -576,7 +587,8 @@
 	lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
 	/* Compat code, old clients had no idea about owner field and
 	 * relied solely on pid for ownership. Introduced in LU-104, 2.1,
-	 * April 2011 */
+	 * April 2011
+	 */
 	lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
 }
 
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 849cc98..e21373e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -96,14 +96,15 @@
 	LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
 	LDLM_CANCEL_LRUR   = 1 << 3, /* Cancel locks from lru resize. */
 	LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
-				      * sending nor waiting for any rpcs) */
+				      * sending nor waiting for any rpcs)
+				      */
 };
 
 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
-		    ldlm_cancel_flags_t sync, int flags);
+		    enum ldlm_cancel_flags sync, int flags);
 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
 			 struct list_head *cancels, int count, int max,
-			 ldlm_cancel_flags_t cancel_flags, int flags);
+			 enum ldlm_cancel_flags cancel_flags, int flags);
 extern int ldlm_enqueue_min;
 
 /* ldlm_resource.c */
@@ -133,11 +134,11 @@
 		  enum req_location loc, void *data, int size);
 struct ldlm_lock *
 ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
-		 ldlm_type_t type, ldlm_mode_t,
+		 enum ldlm_type type, enum ldlm_mode mode,
 		 const struct ldlm_callback_suite *cbs,
 		 void *data, __u32 lvb_len, enum lvb_type lvb_type);
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
-			       void *cookie, __u64 *flags);
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
+				  void *cookie, __u64 *flags);
 void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
@@ -154,7 +155,7 @@
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
 			   struct ldlm_lock_desc *ld,
 			   struct list_head *cancels, int count,
-			   ldlm_cancel_flags_t cancel_flags);
+			   enum ldlm_cancel_flags cancel_flags);
 
 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
 			     struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 3c8d441..b586d5a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -219,7 +219,8 @@
 void client_destroy_import(struct obd_import *imp)
 {
 	/* Drop security policy instance after all RPCs have finished/aborted
-	 * to let all busy contexts be released. */
+	 * to let all busy contexts be released.
+	 */
 	class_import_get(imp);
 	class_destroy_import(imp);
 	sptlrpc_import_sec_put(imp);
@@ -227,29 +228,6 @@
 }
 EXPORT_SYMBOL(client_destroy_import);
 
-/**
- * Check whether or not the OSC is on MDT.
- * In the config log,
- * osc on MDT
- *	setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID
- * osc on client
- *	setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID
- *
- **/
-static int osc_on_mdt(char *obdname)
-{
-	char *ptr;
-
-	ptr = strrchr(obdname, '-');
-	if (ptr == NULL)
-		return 0;
-
-	if (strncmp(ptr + 1, "MDT", 3) == 0)
-		return 1;
-
-	return 0;
-}
-
 /* Configure an RPC client OBD device.
  *
  * lcfg parameters:
@@ -264,11 +242,12 @@
 	struct obd_uuid server_uuid;
 	int rq_portal, rp_portal, connect_op;
 	char *name = obddev->obd_type->typ_name;
-	ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
+	enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN;
 	int rc;
 
 	/* In a more perfect world, we would hang a ptlrpc_client off of
-	 * obd_type and just use the values from there. */
+	 * obd_type and just use the values from there.
+	 */
 	if (!strcmp(name, LUSTRE_OSC_NAME)) {
 		rq_portal = OST_REQUEST_PORTAL;
 		rp_portal = OSC_REPLY_PORTAL;
@@ -284,22 +263,6 @@
 		cli->cl_sp_me = LUSTRE_SP_CLI;
 		cli->cl_sp_to = LUSTRE_SP_MDT;
 		ns_type = LDLM_NS_TYPE_MDC;
-	} else if (!strcmp(name, LUSTRE_OSP_NAME)) {
-		if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) {
-			/* OSP_on_MDT for other MDTs */
-			connect_op = MDS_CONNECT;
-			cli->cl_sp_to = LUSTRE_SP_MDT;
-			ns_type = LDLM_NS_TYPE_MDC;
-			rq_portal = OUT_PORTAL;
-		} else {
-			/* OSP on MDT for OST */
-			connect_op = OST_CONNECT;
-			cli->cl_sp_to = LUSTRE_SP_OST;
-			ns_type = LDLM_NS_TYPE_OSC;
-			rq_portal = OST_REQUEST_PORTAL;
-		}
-		rp_portal = OSC_REPLY_PORTAL;
-		cli->cl_sp_me = LUSTRE_SP_CLI;
 	} else if (!strcmp(name, LUSTRE_MGC_NAME)) {
 		rq_portal = MGS_REQUEST_PORTAL;
 		rp_portal = MGC_REPLY_PORTAL;
@@ -387,7 +350,8 @@
 	/* This value may be reduced at connect time in
 	 * ptlrpc_connect_interpret() . We initialize it to only
 	 * 1MB until we know what the performance looks like.
-	 * In the future this should likely be increased. LU-1431 */
+	 * In the future this should likely be increased. LU-1431
+	 */
 	cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
 					  LNET_MTU >> PAGE_CACHE_SHIFT);
 
@@ -400,10 +364,7 @@
 	} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
 		cli->cl_max_rpcs_in_flight = 4;
 	} else {
-		if (osc_on_mdt(obddev->obd_name))
-			cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT;
-		else
-			cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
+		cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
 	}
 	rc = ldlm_get_ref();
 	if (rc) {
@@ -415,7 +376,7 @@
 			   &obddev->obd_ldlm_client);
 
 	imp = class_new_import(obddev);
-	if (imp == NULL) {
+	if (!imp) {
 		rc = -ENOENT;
 		goto err_ldlm;
 	}
@@ -451,7 +412,7 @@
 						   LDLM_NAMESPACE_CLIENT,
 						   LDLM_NAMESPACE_GREEDY,
 						   ns_type);
-	if (obddev->obd_namespace == NULL) {
+	if (!obddev->obd_namespace) {
 		CERROR("Unable to create client namespace - %s\n",
 		       obddev->obd_name);
 		rc = -ENOMEM;
@@ -477,7 +438,7 @@
 	ldlm_namespace_free_post(obddev->obd_namespace);
 	obddev->obd_namespace = NULL;
 
-	LASSERT(obddev->u.cli.cl_import == NULL);
+	LASSERT(!obddev->u.cli.cl_import);
 
 	ldlm_put_ref();
 	return 0;
@@ -528,7 +489,7 @@
 		LASSERT(imp->imp_state == LUSTRE_IMP_DISCON);
 		goto out_ldlm;
 	}
-	LASSERT(*exp != NULL && (*exp)->exp_connection);
+	LASSERT(*exp && (*exp)->exp_connection);
 
 	if (data) {
 		LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
@@ -587,17 +548,19 @@
 
 	/* Mark import deactivated now, so we don't try to reconnect if any
 	 * of the cleanup RPCs fails (e.g. LDLM cancel, etc).  We don't
-	 * fully deactivate the import, or that would drop all requests. */
+	 * fully deactivate the import, or that would drop all requests.
+	 */
 	spin_lock(&imp->imp_lock);
 	imp->imp_deactive = 1;
 	spin_unlock(&imp->imp_lock);
 
 	/* Some non-replayable imports (MDS's OSCs) are pinged, so just
 	 * delete it regardless.  (It's safe to delete an import that was
-	 * never added.) */
+	 * never added.)
+	 */
 	(void)ptlrpc_pinger_del_import(imp);
 
-	if (obd->obd_namespace != NULL) {
+	if (obd->obd_namespace) {
 		/* obd_force == local only */
 		ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
 				       obd->obd_force ? LCF_LOCAL : 0, NULL);
@@ -606,7 +569,8 @@
 	}
 
 	/* There's no need to hold sem while disconnecting an import,
-	 * and it may actually cause deadlock in GSS. */
+	 * and it may actually cause deadlock in GSS.
+	 */
 	up_write(&cli->cl_sem);
 	rc = ptlrpc_disconnect_import(imp, 0);
 	down_write(&cli->cl_sem);
@@ -615,7 +579,8 @@
 
 out_disconnect:
 	/* Use server style - class_disconnect should be always called for
-	 * o_disconnect. */
+	 * o_disconnect.
+	 */
 	err = class_disconnect(exp);
 	if (!rc && err)
 		rc = err;
@@ -634,7 +599,8 @@
 	struct obd_device *obd;
 
 	/* Check that we still have all structures alive as this may
-	 * be some late RPC at shutdown time. */
+	 * be some late RPC at shutdown time.
+	 */
 	if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
 		     !exp_connect_lru_resize(req->rq_export))) {
 		lustre_msg_set_slv(req->rq_repmsg, 0);
@@ -684,14 +650,14 @@
 
 	svcpt = req->rq_rqbd->rqbd_svcpt;
 	rs = req->rq_reply_state;
-	if (rs == NULL || !rs->rs_difficult) {
+	if (!rs || !rs->rs_difficult) {
 		/* no notifiers */
 		target_send_reply_msg(req, rc, fail_id);
 		return;
 	}
 
 	/* must be an export if locks saved */
-	LASSERT(req->rq_export != NULL);
+	LASSERT(req->rq_export);
 	/* req/reply consistent */
 	LASSERT(rs->rs_svcpt == svcpt);
 
@@ -700,7 +666,7 @@
 	LASSERT(!rs->rs_scheduled_ever);
 	LASSERT(!rs->rs_handled);
 	LASSERT(!rs->rs_on_net);
-	LASSERT(rs->rs_export == NULL);
+	LASSERT(!rs->rs_export);
 	LASSERT(list_empty(&rs->rs_obd_list));
 	LASSERT(list_empty(&rs->rs_exp_list));
 
@@ -739,7 +705,8 @@
 		 * reply ref until ptlrpc_handle_rs() is done
 		 * with the reply state (if the send was successful, there
 		 * would have been +1 ref for the net, which
-		 * reply_out_callback leaves alone) */
+		 * reply_out_callback leaves alone)
+		 */
 		rs->rs_on_net = 0;
 		ptlrpc_rs_addref(rs);
 	}
@@ -760,7 +727,7 @@
 }
 EXPORT_SYMBOL(target_send_reply);
 
-ldlm_mode_t lck_compat_array[] = {
+enum ldlm_mode lck_compat_array[] = {
 	[LCK_EX]	= LCK_COMPAT_EX,
 	[LCK_PW]	= LCK_COMPAT_PW,
 	[LCK_PR]	= LCK_COMPAT_PR,
@@ -775,7 +742,7 @@
  * Rather arbitrary mapping from LDLM error codes to errno values. This should
  * not escape to the user level.
  */
-int ldlm_error2errno(ldlm_error_t error)
+int ldlm_error2errno(enum ldlm_error error)
 {
 	int result;
 
@@ -803,7 +770,7 @@
 		break;
 	default:
 		if (((int)error) < 0)  /* cast to signed type */
-			result = error; /* as ldlm_error_t can be unsigned */
+			result = error; /* as enum ldlm_error can be unsigned */
 		else {
 			CERROR("Invalid DLM result code: %d\n", error);
 			result = -EPROTO;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index cf9ec0c..98975ef 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -91,7 +91,7 @@
 /**
  * Converts lock policy from local format to on the wire lock_desc format
  */
-static void ldlm_convert_policy_to_wire(ldlm_type_t type,
+static void ldlm_convert_policy_to_wire(enum ldlm_type type,
 					const ldlm_policy_data_t *lpolicy,
 					ldlm_wire_policy_data_t *wpolicy)
 {
@@ -105,7 +105,7 @@
 /**
  * Converts lock policy from on the wire lock_desc format to local format
  */
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
 				  const ldlm_wire_policy_data_t *wpolicy,
 				  ldlm_policy_data_t *lpolicy)
 {
@@ -326,9 +326,11 @@
 
 	if (lock->l_export && lock->l_export->exp_lock_hash) {
 		/* NB: it's safe to call cfs_hash_del() even lock isn't
-		 * in exp_lock_hash. */
+		 * in exp_lock_hash.
+		 */
 		/* In the function below, .hs_keycmp resolves to
-		 * ldlm_export_lock_keycmp() */
+		 * ldlm_export_lock_keycmp()
+		 */
 		/* coverity[overrun-buffer-val] */
 		cfs_hash_del(lock->l_export->exp_lock_hash,
 			     &lock->l_remote_handle, &lock->l_exp_hash);
@@ -337,16 +339,6 @@
 	ldlm_lock_remove_from_lru(lock);
 	class_handle_unhash(&lock->l_handle);
 
-#if 0
-	/* Wake anyone waiting for this lock */
-	/* FIXME: I should probably add yet another flag, instead of using
-	 * l_export to only call this on clients */
-	if (lock->l_export)
-		class_export_put(lock->l_export);
-	lock->l_export = NULL;
-	if (lock->l_export && lock->l_completion_ast)
-		lock->l_completion_ast(lock, 0);
-#endif
 	return 1;
 }
 
@@ -412,11 +404,10 @@
 {
 	struct ldlm_lock *lock;
 
-	if (resource == NULL)
-		LBUG();
+	LASSERT(resource);
 
 	lock = kmem_cache_alloc(ldlm_lock_slab, GFP_NOFS | __GFP_ZERO);
-	if (lock == NULL)
+	if (!lock)
 		return NULL;
 
 	spin_lock_init(&lock->l_lock);
@@ -485,7 +476,7 @@
 	unlock_res_and_lock(lock);
 
 	newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
-	if (newres == NULL)
+	if (!newres)
 		return -ENOMEM;
 
 	lu_ref_add(&newres->lr_reference, "lock", lock);
@@ -547,11 +538,12 @@
 	LASSERT(handle);
 
 	lock = class_handle2object(handle->cookie);
-	if (lock == NULL)
+	if (!lock)
 		return NULL;
 
 	/* It's unlikely but possible that someone marked the lock as
-	 * destroyed after we did handle2object on it */
+	 * destroyed after we did handle2object on it
+	 */
 	if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
 		lu_ref_add(&lock->l_reference, "handle", current);
 		return lock;
@@ -559,7 +551,7 @@
 
 	lock_res_and_lock(lock);
 
-	LASSERT(lock->l_resource != NULL);
+	LASSERT(lock->l_resource);
 
 	lu_ref_add_atomic(&lock->l_reference, "handle", current);
 	if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
@@ -611,13 +603,14 @@
 		LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
 		lock->l_flags |= LDLM_FL_AST_SENT;
 		/* If the enqueuing client said so, tell the AST recipient to
-		 * discard dirty data, rather than writing back. */
+		 * discard dirty data, rather than writing back.
+		 */
 		if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
 			lock->l_flags |= LDLM_FL_DISCARD_DATA;
 		LASSERT(list_empty(&lock->l_bl_ast));
 		list_add(&lock->l_bl_ast, work_list);
 		LDLM_LOCK_GET(lock);
-		LASSERT(lock->l_blocking_lock == NULL);
+		LASSERT(!lock->l_blocking_lock);
 		lock->l_blocking_lock = LDLM_LOCK_GET(new);
 	}
 }
@@ -664,7 +657,7 @@
 	struct ldlm_lock *lock;
 
 	lock = ldlm_handle2lock(lockh);
-	LASSERT(lock != NULL);
+	LASSERT(lock);
 	ldlm_lock_addref_internal(lock, mode);
 	LDLM_LOCK_PUT(lock);
 }
@@ -708,7 +701,7 @@
 
 	result = -EAGAIN;
 	lock = ldlm_handle2lock(lockh);
-	if (lock != NULL) {
+	if (lock) {
 		lock_res_and_lock(lock);
 		if (lock->l_readers != 0 || lock->l_writers != 0 ||
 		    !(lock->l_flags & LDLM_FL_CBPENDING)) {
@@ -780,7 +773,8 @@
 	if (lock->l_flags & LDLM_FL_LOCAL &&
 	    !lock->l_readers && !lock->l_writers) {
 		/* If this is a local lock on a server namespace and this was
-		 * the last reference, cancel the lock. */
+		 * the last reference, cancel the lock.
+		 */
 		CDEBUG(D_INFO, "forcing cancel of local lock\n");
 		lock->l_flags |= LDLM_FL_CBPENDING;
 	}
@@ -788,7 +782,8 @@
 	if (!lock->l_readers && !lock->l_writers &&
 	    (lock->l_flags & LDLM_FL_CBPENDING)) {
 		/* If we received a blocked AST and this was the last reference,
-		 * run the callback. */
+		 * run the callback.
+		 */
 
 		LDLM_DEBUG(lock, "final decref done on cbpending lock");
 
@@ -809,7 +804,8 @@
 		LDLM_DEBUG(lock, "add lock into lru list");
 
 		/* If this is a client-side namespace and this was the last
-		 * reference, put it on the LRU. */
+		 * reference, put it on the LRU.
+		 */
 		ldlm_lock_add_to_lru(lock);
 		unlock_res_and_lock(lock);
 
@@ -818,7 +814,8 @@
 
 		/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
 		 * are not supported by the server, otherwise, it is done on
-		 * enqueue. */
+		 * enqueue.
+		 */
 		if (!exp_connect_cancelset(lock->l_conn_export) &&
 		    !ns_connect_lru_resize(ns))
 			ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
@@ -835,7 +832,7 @@
 {
 	struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
-	LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
+	LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie);
 	ldlm_lock_decref_internal(lock, mode);
 	LDLM_LOCK_PUT(lock);
 }
@@ -852,7 +849,7 @@
 {
 	struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
-	LASSERT(lock != NULL);
+	LASSERT(lock);
 
 	LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
 	lock_res_and_lock(lock);
@@ -921,7 +918,8 @@
 				if (lock->l_policy_data.l_inodebits.bits ==
 				    req->l_policy_data.l_inodebits.bits) {
 					/* insert point is last lock of
-					 * the policy group */
+					 * the policy group
+					 */
 					prev->res_link =
 						&policy_end->l_res_link;
 					prev->mode_link =
@@ -942,7 +940,8 @@
 			}  /* loop over policy groups within the mode group */
 
 			/* insert point is last lock of the mode group,
-			 * new policy group is started */
+			 * new policy group is started
+			 */
 			prev->res_link = &mode_end->l_res_link;
 			prev->mode_link = &mode_end->l_sl_mode;
 			prev->policy_link = &req->l_sl_policy;
@@ -954,7 +953,8 @@
 	}
 
 	/* insert point is last lock on the queue,
-	 * new mode group and new policy group are started */
+	 * new mode group and new policy group are started
+	 */
 	prev->res_link = queue->prev;
 	prev->mode_link = &req->l_sl_mode;
 	prev->policy_link = &req->l_sl_policy;
@@ -1034,10 +1034,7 @@
 	else
 		ldlm_resource_add_lock(res, &res->lr_granted, lock);
 
-	if (lock->l_granted_mode < res->lr_most_restr)
-		res->lr_most_restr = lock->l_granted_mode;
-
-	if (work_list && lock->l_completion_ast != NULL)
+	if (work_list && lock->l_completion_ast)
 		ldlm_add_ast_work_item(lock, NULL, work_list);
 
 	ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
@@ -1050,7 +1047,7 @@
  * comment above ldlm_lock_match
  */
 static struct ldlm_lock *search_queue(struct list_head *queue,
-				      ldlm_mode_t *mode,
+				      enum ldlm_mode *mode,
 				      ldlm_policy_data_t *policy,
 				      struct ldlm_lock *old_lock,
 				      __u64 flags, int unref)
@@ -1059,7 +1056,7 @@
 	struct list_head       *tmp;
 
 	list_for_each(tmp, queue) {
-		ldlm_mode_t match;
+		enum ldlm_mode match;
 
 		lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
@@ -1067,7 +1064,8 @@
 			break;
 
 		/* Check if this lock can be matched.
-		 * Used by LU-2919(exclusive open) for open lease lock */
+		 * Used by LU-2919(exclusive open) for open lease lock
+		 */
 		if (ldlm_is_excl(lock))
 			continue;
 
@@ -1076,7 +1074,8 @@
 		 * if it passes in CBPENDING and the lock still has users.
 		 * this is generally only going to be used by children
 		 * whose parents already hold a lock so forward progress
-		 * can still happen. */
+		 * can still happen.
+		 */
 		if (lock->l_flags & LDLM_FL_CBPENDING &&
 		    !(flags & LDLM_FL_CBPENDING))
 			continue;
@@ -1100,7 +1099,8 @@
 			continue;
 
 		/* We match if we have existing lock with same or wider set
-		   of bits. */
+		 * of bits.
+		 */
 		if (lock->l_resource->lr_type == LDLM_IBITS &&
 		     ((lock->l_policy_data.l_inodebits.bits &
 		      policy->l_inodebits.bits) !=
@@ -1192,16 +1192,18 @@
  * keep caller code unchanged), the context failure will be discovered by
  * caller sometime later.
  */
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
-			    const struct ldlm_res_id *res_id, ldlm_type_t type,
-			    ldlm_policy_data_t *policy, ldlm_mode_t mode,
-			    struct lustre_handle *lockh, int unref)
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+			       const struct ldlm_res_id *res_id,
+			       enum ldlm_type type,
+			       ldlm_policy_data_t *policy,
+			       enum ldlm_mode mode,
+			       struct lustre_handle *lockh, int unref)
 {
 	struct ldlm_resource *res;
 	struct ldlm_lock *lock, *old_lock = NULL;
 	int rc = 0;
 
-	if (ns == NULL) {
+	if (!ns) {
 		old_lock = ldlm_handle2lock(lockh);
 		LASSERT(old_lock);
 
@@ -1212,8 +1214,8 @@
 	}
 
 	res = ldlm_resource_get(ns, NULL, res_id, type, 0);
-	if (res == NULL) {
-		LASSERT(old_lock == NULL);
+	if (!res) {
+		LASSERT(!old_lock);
 		return 0;
 	}
 
@@ -1222,7 +1224,7 @@
 
 	lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
 			    flags, unref);
-	if (lock != NULL) {
+	if (lock) {
 		rc = 1;
 		goto out;
 	}
@@ -1232,7 +1234,7 @@
 	}
 	lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
 			    flags, unref);
-	if (lock != NULL) {
+	if (lock) {
 		rc = 1;
 		goto out;
 	}
@@ -1317,14 +1319,14 @@
 }
 EXPORT_SYMBOL(ldlm_lock_match);
 
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
-					__u64 *bits)
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+					   __u64 *bits)
 {
 	struct ldlm_lock *lock;
-	ldlm_mode_t mode = 0;
+	enum ldlm_mode mode = 0;
 
 	lock = ldlm_handle2lock(lockh);
-	if (lock != NULL) {
+	if (lock) {
 		lock_res_and_lock(lock);
 		if (lock->l_flags & LDLM_FL_GONE_MASK)
 			goto out;
@@ -1340,7 +1342,7 @@
 	}
 
 out:
-	if (lock != NULL) {
+	if (lock) {
 		unlock_res_and_lock(lock);
 		LDLM_LOCK_PUT(lock);
 	}
@@ -1354,7 +1356,7 @@
 {
 	void *lvb;
 
-	LASSERT(data != NULL);
+	LASSERT(data);
 	LASSERT(size >= 0);
 
 	switch (lock->l_lvb_type) {
@@ -1368,7 +1370,7 @@
 				lvb = req_capsule_server_swab_get(pill,
 						&RMF_DLM_LVB,
 						lustre_swab_ost_lvb);
-			if (unlikely(lvb == NULL)) {
+			if (unlikely(!lvb)) {
 				LDLM_ERROR(lock, "no LVB");
 				return -EPROTO;
 			}
@@ -1385,7 +1387,7 @@
 				lvb = req_capsule_server_sized_swab_get(pill,
 						&RMF_DLM_LVB, size,
 						lustre_swab_ost_lvb_v1);
-			if (unlikely(lvb == NULL)) {
+			if (unlikely(!lvb)) {
 				LDLM_ERROR(lock, "no LVB");
 				return -EPROTO;
 			}
@@ -1410,7 +1412,7 @@
 				lvb = req_capsule_server_swab_get(pill,
 						&RMF_DLM_LVB,
 						lustre_swab_lquota_lvb);
-			if (unlikely(lvb == NULL)) {
+			if (unlikely(!lvb)) {
 				LDLM_ERROR(lock, "no LVB");
 				return -EPROTO;
 			}
@@ -1431,7 +1433,7 @@
 			lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
 		else
 			lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
-		if (unlikely(lvb == NULL)) {
+		if (unlikely(!lvb)) {
 			LDLM_ERROR(lock, "no LVB");
 			return -EPROTO;
 		}
@@ -1453,8 +1455,8 @@
  */
 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
 				   const struct ldlm_res_id *res_id,
-				   ldlm_type_t type,
-				   ldlm_mode_t mode,
+				   enum ldlm_type type,
+				   enum ldlm_mode mode,
 				   const struct ldlm_callback_suite *cbs,
 				   void *data, __u32 lvb_len,
 				   enum lvb_type lvb_type)
@@ -1463,12 +1465,12 @@
 	struct ldlm_resource *res;
 
 	res = ldlm_resource_get(ns, NULL, res_id, type, 1);
-	if (res == NULL)
+	if (!res)
 		return NULL;
 
 	lock = ldlm_lock_new(res);
 
-	if (lock == NULL)
+	if (!lock)
 		return NULL;
 
 	lock->l_req_mode = mode;
@@ -1483,7 +1485,7 @@
 	lock->l_tree_node = NULL;
 	/* if this is the extent lock, allocate the interval tree node */
 	if (type == LDLM_EXTENT) {
-		if (ldlm_interval_alloc(lock) == NULL)
+		if (!ldlm_interval_alloc(lock))
 			goto out;
 	}
 
@@ -1514,9 +1516,9 @@
  * Does not block. As a result of enqueue the lock would be put
  * into granted or waiting list.
  */
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
-			       struct ldlm_lock **lockp,
-			       void *cookie, __u64 *flags)
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
+				  struct ldlm_lock **lockp,
+				  void *cookie, __u64 *flags)
 {
 	struct ldlm_lock *lock = *lockp;
 	struct ldlm_resource *res = lock->l_resource;
@@ -1527,7 +1529,8 @@
 	if (lock->l_req_mode == lock->l_granted_mode) {
 		/* The server returned a blocked lock, but it was granted
 		 * before we got a chance to actually enqueue it.  We don't
-		 * need to do anything else. */
+		 * need to do anything else.
+		 */
 		*flags &= ~(LDLM_FL_BLOCK_GRANTED |
 			    LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
 		goto out;
@@ -1540,7 +1543,8 @@
 		LBUG();
 
 	/* Some flags from the enqueue want to make it into the AST, via the
-	 * lock's l_flags. */
+	 * lock's l_flags.
+	 */
 	lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
 
 	/*
@@ -1621,19 +1625,21 @@
 	 * This can't happen with the blocking_ast, however, because we
 	 * will never call the local blocking_ast until we drop our
 	 * reader/writer reference, which we won't do until we get the
-	 * reply and finish enqueueing. */
+	 * reply and finish enqueueing.
+	 */
 
 	/* nobody should touch l_cp_ast */
 	lock_res_and_lock(lock);
 	list_del_init(&lock->l_cp_ast);
 	LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
 	/* save l_completion_ast since it can be changed by
-	 * mds_intent_policy(), see bug 14225 */
+	 * mds_intent_policy(), see bug 14225
+	 */
 	completion_callback = lock->l_completion_ast;
 	lock->l_flags &= ~LDLM_FL_CP_REQD;
 	unlock_res_and_lock(lock);
 
-	if (completion_callback != NULL)
+	if (completion_callback)
 		rc = completion_callback(lock, 0, (void *)arg);
 	LDLM_LOCK_RELEASE(lock);
 
@@ -1749,10 +1755,11 @@
 	/* We create a ptlrpc request set with flow control extension.
 	 * This request set will use the work_ast_lock function to produce new
 	 * requests and will send a new request each time one completes in order
-	 * to keep the number of requests in flight to ns_max_parallel_ast */
+	 * to keep the number of requests in flight to ns_max_parallel_ast
+	 */
 	arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
 				     work_ast_lock, arg);
-	if (arg->set == NULL) {
+	if (!arg->set) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1815,7 +1822,8 @@
 	ns  = ldlm_res_to_ns(res);
 
 	/* Please do not, no matter how tempting, remove this LBUG without
-	 * talking to me first. -phik */
+	 * talking to me first. -phik
+	 */
 	if (lock->l_readers || lock->l_writers) {
 		LDLM_ERROR(lock, "lock still has references");
 		LBUG();
@@ -1831,7 +1839,8 @@
 		ldlm_pool_del(&ns->ns_pool, lock);
 
 	/* Make sure we will not be called again for same lock what is possible
-	 * if not to zero out lock->l_granted_mode */
+	 * if not to zero out lock->l_granted_mode
+	 */
 	lock->l_granted_mode = LCK_MINMODE;
 	unlock_res_and_lock(lock);
 }
@@ -1846,7 +1855,7 @@
 	int rc = -EINVAL;
 
 	if (lock) {
-		if (lock->l_ast_data == NULL)
+		if (!lock->l_ast_data)
 			lock->l_ast_data = data;
 		if (lock->l_ast_data == data)
 			rc = 0;
@@ -1874,7 +1883,7 @@
 		return;
 
 	lock = ldlm_handle2lock(lockh);
-	if (lock == NULL)
+	if (!lock)
 		return;
 
 	LDLM_DEBUG_LIMIT(level, lock, "###");
@@ -1900,13 +1909,13 @@
 
 	if (exp && exp->exp_connection) {
 		nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
-	} else if (exp && exp->exp_obd != NULL) {
+	} else if (exp && exp->exp_obd) {
 		struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
 
 		nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
 	}
 
-	if (resource == NULL) {
+	if (!resource) {
 		libcfs_debug_vmsg2(msgdata, fmt, args,
 				   " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
 				   lock,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 79aeb2bf..ebe9042 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -107,7 +107,7 @@
 	struct list_head	      blwi_head;
 	int		     blwi_count;
 	struct completion	blwi_comp;
-	ldlm_cancel_flags_t     blwi_flags;
+	enum ldlm_cancel_flags  blwi_flags;
 	int		     blwi_mem_pressure;
 };
 
@@ -136,7 +136,7 @@
 		CDEBUG(D_DLMTRACE,
 		       "Lock %p already unused, calling callback (%p)\n", lock,
 		       lock->l_blocking_ast);
-		if (lock->l_blocking_ast != NULL)
+		if (lock->l_blocking_ast)
 			lock->l_blocking_ast(lock, ld, lock->l_ast_data,
 					     LDLM_CB_BLOCKING);
 	} else {
@@ -185,7 +185,7 @@
 	} else if (lvb_len > 0) {
 		if (lock->l_lvb_len > 0) {
 			/* for extent lock, lvb contains ost_lvb{}. */
-			LASSERT(lock->l_lvb_data != NULL);
+			LASSERT(lock->l_lvb_data);
 
 			if (unlikely(lock->l_lvb_len < lvb_len)) {
 				LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
@@ -194,7 +194,8 @@
 				goto out;
 			}
 		} else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
-						     * variable length */
+						     * variable length
+						     */
 			void *lvb_data;
 
 			lvb_data = kzalloc(lvb_len, GFP_NOFS);
@@ -205,7 +206,7 @@
 			}
 
 			lock_res_and_lock(lock);
-			LASSERT(lock->l_lvb_data == NULL);
+			LASSERT(!lock->l_lvb_data);
 			lock->l_lvb_type = LVB_T_LAYOUT;
 			lock->l_lvb_data = lvb_data;
 			lock->l_lvb_len = lvb_len;
@@ -224,7 +225,8 @@
 	}
 
 	/* If we receive the completion AST before the actual enqueue returned,
-	 * then we might need to switch lock modes, resources, or extents. */
+	 * then we might need to switch lock modes, resources, or extents.
+	 */
 	if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
 		lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
 		LDLM_DEBUG(lock, "completion AST, new lock mode");
@@ -256,7 +258,8 @@
 
 	if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
 		/* BL_AST locks are not needed in LRU.
-		 * Let ldlm_cancel_lru() be fast. */
+		 * Let ldlm_cancel_lru() be fast.
+		 */
 		ldlm_lock_remove_from_lru(lock);
 		lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
 		LDLM_DEBUG(lock, "completion AST includes blocking AST");
@@ -276,8 +279,7 @@
 
 	LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
 
-	/* Let Enqueue to call osc_lock_upcall() and initialize
-	 * l_ast_data */
+	/* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */
 	OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
 
 	ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
@@ -312,10 +314,10 @@
 
 	LDLM_DEBUG(lock, "client glimpse AST callback handler");
 
-	if (lock->l_glimpse_ast != NULL)
+	if (lock->l_glimpse_ast)
 		rc = lock->l_glimpse_ast(lock, req);
 
-	if (req->rq_repmsg != NULL) {
+	if (req->rq_repmsg) {
 		ptlrpc_reply(req);
 	} else {
 		req->rq_status = rc;
@@ -353,7 +355,7 @@
 }
 
 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
-			       ldlm_cancel_flags_t cancel_flags)
+			       enum ldlm_cancel_flags cancel_flags)
 {
 	struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
 
@@ -371,7 +373,8 @@
 	wake_up(&blp->blp_waitq);
 
 	/* can not check blwi->blwi_flags as blwi could be already freed in
-	   LCF_ASYNC mode */
+	 * LCF_ASYNC mode
+	 */
 	if (!(cancel_flags & LCF_ASYNC))
 		wait_for_completion(&blwi->blwi_comp);
 
@@ -383,7 +386,7 @@
 			     struct ldlm_lock_desc *ld,
 			     struct list_head *cancels, int count,
 			     struct ldlm_lock *lock,
-			     ldlm_cancel_flags_t cancel_flags)
+			     enum ldlm_cancel_flags cancel_flags)
 {
 	init_completion(&blwi->blwi_comp);
 	INIT_LIST_HEAD(&blwi->blwi_head);
@@ -393,7 +396,7 @@
 
 	blwi->blwi_ns = ns;
 	blwi->blwi_flags = cancel_flags;
-	if (ld != NULL)
+	if (ld)
 		blwi->blwi_ld = *ld;
 	if (count) {
 		list_add(&blwi->blwi_head, cancels);
@@ -417,7 +420,7 @@
 			     struct ldlm_lock_desc *ld,
 			     struct ldlm_lock *lock,
 			     struct list_head *cancels, int count,
-			     ldlm_cancel_flags_t cancel_flags)
+			     enum ldlm_cancel_flags cancel_flags)
 {
 	if (cancels && count == 0)
 		return 0;
@@ -451,7 +454,7 @@
 
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
 			   struct list_head *cancels, int count,
-			   ldlm_cancel_flags_t cancel_flags)
+			   enum ldlm_cancel_flags cancel_flags)
 {
 	return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
 }
@@ -470,14 +473,14 @@
 	req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
 
 	key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
-	if (key == NULL) {
+	if (!key) {
 		DEBUG_REQ(D_IOCTL, req, "no set_info key");
 		return -EFAULT;
 	}
 	keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
 				      RCL_CLIENT);
 	val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
-	if (val == NULL) {
+	if (!val) {
 		DEBUG_REQ(D_IOCTL, req, "no set_info val");
 		return -EFAULT;
 	}
@@ -519,7 +522,7 @@
 	struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
 
 	oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
-	if (oqctl == NULL) {
+	if (!oqctl) {
 		CERROR("Can't unpack obd_quotactl\n");
 		return -EPROTO;
 	}
@@ -541,7 +544,8 @@
 	/* Requests arrive in sender's byte order.  The ptlrpc service
 	 * handler has already checked and, if necessary, byte-swapped the
 	 * incoming request message body, but I am responsible for the
-	 * message buffers. */
+	 * message buffers.
+	 */
 
 	/* do nothing for sec context finalize */
 	if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
@@ -549,15 +553,14 @@
 
 	req_capsule_init(&req->rq_pill, req, RCL_SERVER);
 
-	if (req->rq_export == NULL) {
+	if (!req->rq_export) {
 		rc = ldlm_callback_reply(req, -ENOTCONN);
 		ldlm_callback_errmsg(req, "Operate on unconnected server",
 				     rc, NULL);
 		return 0;
 	}
 
-	LASSERT(req->rq_export != NULL);
-	LASSERT(req->rq_export->exp_obd != NULL);
+	LASSERT(req->rq_export->exp_obd);
 
 	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
 	case LDLM_BL_CALLBACK:
@@ -591,12 +594,12 @@
 	}
 
 	ns = req->rq_export->exp_obd->obd_namespace;
-	LASSERT(ns != NULL);
+	LASSERT(ns);
 
 	req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
 
 	dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
-	if (dlm_req == NULL) {
+	if (!dlm_req) {
 		rc = ldlm_callback_reply(req, -EPROTO);
 		ldlm_callback_errmsg(req, "Operate without parameter", rc,
 				     NULL);
@@ -604,7 +607,8 @@
 	}
 
 	/* Force a known safe race, send a cancel to the server for a lock
-	 * which the server has already started a blocking callback on. */
+	 * which the server has already started a blocking callback on.
+	 */
 	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
 	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
 		rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
@@ -634,7 +638,8 @@
 		/* If somebody cancels lock and cache is already dropped,
 		 * or lock is failed before cp_ast received on client,
 		 * we can tell the server we have no lock. Otherwise, we
-		 * should send cancel after dropping the cache. */
+		 * should send cancel after dropping the cache.
+		 */
 		if (((lock->l_flags & LDLM_FL_CANCELING) &&
 		    (lock->l_flags & LDLM_FL_BL_DONE)) ||
 		    (lock->l_flags & LDLM_FL_FAILED)) {
@@ -648,7 +653,8 @@
 			return 0;
 		}
 		/* BL_AST locks are not needed in LRU.
-		 * Let ldlm_cancel_lru() be fast. */
+		 * Let ldlm_cancel_lru() be fast.
+		 */
 		ldlm_lock_remove_from_lru(lock);
 		lock->l_flags |= LDLM_FL_BL_AST;
 	}
@@ -661,7 +667,8 @@
 	 * But we'd also like to be able to indicate in the reply that we're
 	 * cancelling right now, because it's unused, or have an intent result
 	 * in the reply, so we might have to push the responsibility for sending
-	 * the reply down into the AST handlers, alas. */
+	 * the reply down into the AST handlers, alas.
+	 */
 
 	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
 	case LDLM_BL_CALLBACK:
@@ -781,17 +788,17 @@
 
 		blwi = ldlm_bl_get_work(blp);
 
-		if (blwi == NULL) {
+		if (!blwi) {
 			atomic_dec(&blp->blp_busy_threads);
 			l_wait_event_exclusive(blp->blp_waitq,
-					 (blwi = ldlm_bl_get_work(blp)) != NULL,
+					 (blwi = ldlm_bl_get_work(blp)),
 					 &lwi);
 			busy = atomic_inc_return(&blp->blp_busy_threads);
 		} else {
 			busy = atomic_read(&blp->blp_busy_threads);
 		}
 
-		if (blwi->blwi_ns == NULL)
+		if (!blwi->blwi_ns)
 			/* added by ldlm_cleanup() */
 			break;
 
@@ -810,7 +817,8 @@
 			/* The special case when we cancel locks in LRU
 			 * asynchronously, we pass the list of locks here.
 			 * Thus locks are marked LDLM_FL_CANCELING, but NOT
-			 * canceled locally yet. */
+			 * canceled locally yet.
+			 */
 			count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
 							   blwi->blwi_count,
 							   LCF_BL_AST);
@@ -915,7 +923,7 @@
 	int rc = 0;
 	int i;
 
-	if (ldlm_state != NULL)
+	if (ldlm_state)
 		return -EALREADY;
 
 	ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
@@ -1040,7 +1048,7 @@
 
 	ldlm_pools_fini();
 
-	if (ldlm_state->ldlm_bl_pool != NULL) {
+	if (ldlm_state->ldlm_bl_pool) {
 		struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
 
 		while (atomic_read(&blp->blp_num_threads) > 0) {
@@ -1059,7 +1067,7 @@
 		kfree(blp);
 	}
 
-	if (ldlm_state->ldlm_cb_service != NULL)
+	if (ldlm_state->ldlm_cb_service)
 		ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
 
 	if (ldlm_ns_kset)
@@ -1085,13 +1093,13 @@
 	ldlm_resource_slab = kmem_cache_create("ldlm_resources",
 					       sizeof(struct ldlm_resource), 0,
 					       SLAB_HWCACHE_ALIGN, NULL);
-	if (ldlm_resource_slab == NULL)
+	if (!ldlm_resource_slab)
 		return -ENOMEM;
 
 	ldlm_lock_slab = kmem_cache_create("ldlm_locks",
 			      sizeof(struct ldlm_lock), 0,
 			      SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
-	if (ldlm_lock_slab == NULL) {
+	if (!ldlm_lock_slab) {
 		kmem_cache_destroy(ldlm_resource_slab);
 		return -ENOMEM;
 	}
@@ -1099,7 +1107,7 @@
 	ldlm_interval_slab = kmem_cache_create("interval_node",
 					sizeof(struct ldlm_interval),
 					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (ldlm_interval_slab == NULL) {
+	if (!ldlm_interval_slab) {
 		kmem_cache_destroy(ldlm_resource_slab);
 		kmem_cache_destroy(ldlm_lock_slab);
 		return -ENOMEM;
@@ -1117,7 +1125,8 @@
 	kmem_cache_destroy(ldlm_resource_slab);
 	/* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
 	 * synchronize_rcu() to wait a grace period elapsed, so that
-	 * ldlm_lock_free() get a chance to be called. */
+	 * ldlm_lock_free() get a chance to be called.
+	 */
 	synchronize_rcu();
 	kmem_cache_destroy(ldlm_lock_slab);
 	kmem_cache_destroy(ldlm_interval_slab);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3d7c137..3e937b0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -246,7 +246,6 @@
 	 */
 	obd = container_of(pl, struct ldlm_namespace,
 			   ns_pool)->ns_obd;
-	LASSERT(obd != NULL);
 	read_lock(&obd->obd_pool_lock);
 	pl->pl_server_lock_volume = obd->obd_pool_slv;
 	atomic_set(&pl->pl_limit, obd->obd_pool_limit);
@@ -381,7 +380,7 @@
 	spin_unlock(&pl->pl_lock);
 
  recalc:
-	if (pl->pl_ops->po_recalc != NULL) {
+	if (pl->pl_ops->po_recalc) {
 		count = pl->pl_ops->po_recalc(pl);
 		lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
 				    count);
@@ -409,7 +408,7 @@
 {
 	int cancel = 0;
 
-	if (pl->pl_ops->po_shrink != NULL) {
+	if (pl->pl_ops->po_shrink) {
 		cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
 		if (nr > 0) {
 			lprocfs_counter_add(pl->pl_stats,
@@ -643,11 +642,11 @@
 
 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
 {
-	if (pl->pl_stats != NULL) {
+	if (pl->pl_stats) {
 		lprocfs_free_stats(&pl->pl_stats);
 		pl->pl_stats = NULL;
 	}
-	if (pl->pl_debugfs_entry != NULL) {
+	if (pl->pl_debugfs_entry) {
 		ldebugfs_remove(&pl->pl_debugfs_entry);
 		pl->pl_debugfs_entry = NULL;
 	}
@@ -834,7 +833,7 @@
 			continue;
 		}
 
-		if (ns_old == NULL)
+		if (!ns_old)
 			ns_old = ns;
 
 		ldlm_namespace_get(ns);
@@ -957,7 +956,7 @@
 			continue;
 		}
 
-		if (ns_old == NULL)
+		if (!ns_old)
 			ns_old = ns;
 
 		spin_lock(&ns->ns_lock);
@@ -1040,7 +1039,7 @@
 	struct l_wait_info lwi = { 0 };
 	struct task_struct *task;
 
-	if (ldlm_pools_thread != NULL)
+	if (ldlm_pools_thread)
 		return -EALREADY;
 
 	ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
@@ -1065,7 +1064,7 @@
 
 static void ldlm_pools_thread_stop(void)
 {
-	if (ldlm_pools_thread == NULL)
+	if (!ldlm_pools_thread)
 		return;
 
 	thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index b9eb377..2d501a6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -94,7 +94,7 @@
 	struct obd_import *imp;
 	struct obd_device *obd;
 
-	if (lock->l_conn_export == NULL) {
+	if (!lock->l_conn_export) {
 		static unsigned long next_dump, last_dump;
 
 		LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
@@ -128,7 +128,8 @@
 }
 
 /* We use the same basis for both server side and client side functions
-   from a single node. */
+ * from a single node.
+ */
 static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
 {
 	int timeout = at_get(ldlm_lock_to_ns_at(lock));
@@ -136,8 +137,9 @@
 	if (AT_OFF)
 		return obd_timeout / 2;
 	/* Since these are non-updating timeouts, we should be conservative.
-	   It would be nice to have some kind of "early reply" mechanism for
-	   lock callbacks too... */
+	 * It would be nice to have some kind of "early reply" mechanism for
+	 * lock callbacks too...
+	 */
 	timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
 	return max(timeout, ldlm_enqueue_min);
 }
@@ -239,12 +241,13 @@
 	obd = class_exp2obd(lock->l_conn_export);
 
 	/* if this is a local lock, then there is no import */
-	if (obd != NULL)
+	if (obd)
 		imp = obd->u.cli.cl_import;
 
 	/* Wait a long time for enqueue - server may have to callback a
-	   lock from another client.  Server will evict the other client if it
-	   doesn't respond reasonably, and then give us the lock. */
+	 * lock from another client.  Server will evict the other client if it
+	 * doesn't respond reasonably, and then give us the lock.
+	 */
 	timeout = ldlm_get_enq_timeout(lock) * 2;
 
 	lwd.lwd_lock = lock;
@@ -258,7 +261,7 @@
 				       interrupted_completion_wait, &lwd);
 	}
 
-	if (imp != NULL) {
+	if (imp) {
 		spin_lock(&imp->imp_lock);
 		lwd.lwd_conn_cnt = imp->imp_conn_cnt;
 		spin_unlock(&imp->imp_lock);
@@ -296,7 +299,8 @@
 	    !(lock->l_flags & LDLM_FL_FAILED)) {
 		/* Make sure that this lock will not be found by raced
 		 * bl_ast and -EINVAL reply is sent to server anyways.
-		 * bug 17645 */
+		 * bug 17645
+		 */
 		lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
 				 LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
 		need_cancel = 1;
@@ -312,11 +316,13 @@
 	ldlm_lock_decref_internal(lock, mode);
 
 	/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
-	 *       from llite/file.c/ll_file_flock(). */
+	 *       from llite/file.c/ll_file_flock().
+	 */
 	/* This code makes for the fact that we do not have blocking handler on
 	 * a client for flock locks. As such this is the place where we must
 	 * completely kill failed locks. (interrupted and those that
-	 * were waiting to be granted when server evicted us. */
+	 * were waiting to be granted when server evicted us.
+	 */
 	if (lock->l_resource->lr_type == LDLM_FLOCK) {
 		lock_res_and_lock(lock);
 		ldlm_resource_unlink_lock(lock);
@@ -331,7 +337,8 @@
  * Called after receiving reply from server.
  */
 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
-			  ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
+			  enum ldlm_type type, __u8 with_policy,
+			  enum ldlm_mode mode,
 			  __u64 *flags, void *lvb, __u32 lvb_len,
 			  struct lustre_handle *lockh, int rc)
 {
@@ -363,13 +370,13 @@
 
 	/* Before we return, swab the reply */
 	reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-	if (reply == NULL) {
+	if (!reply) {
 		rc = -EPROTO;
 		goto cleanup;
 	}
 
 	if (lvb_len != 0) {
-		LASSERT(lvb != NULL);
+		LASSERT(lvb);
 
 		size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
 					    RCL_SERVER);
@@ -401,7 +408,8 @@
 	/* Key change rehash lock in per-export hash with new key */
 	if (exp->exp_lock_hash) {
 		/* In the function below, .hs_keycmp resolves to
-		 * ldlm_export_lock_keycmp() */
+		 * ldlm_export_lock_keycmp()
+		 */
 		/* coverity[overrun-buffer-val] */
 		cfs_hash_rehash_key(exp->exp_lock_hash,
 				    &lock->l_remote_handle,
@@ -415,7 +423,8 @@
 	lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
 					      LDLM_INHERIT_FLAGS);
 	/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
-	 * to wait with no timeout as well */
+	 * to wait with no timeout as well
+	 */
 	lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
 					      LDLM_FL_NO_TIMEOUT);
 	unlock_res_and_lock(lock);
@@ -425,7 +434,8 @@
 
 	/* If enqueue returned a blocked lock but the completion handler has
 	 * already run, then it fixed up the resource and we don't need to do it
-	 * again. */
+	 * again.
+	 */
 	if ((*flags) & LDLM_FL_LOCK_CHANGED) {
 		int newmode = reply->lock_desc.l_req_mode;
 
@@ -445,7 +455,7 @@
 
 			rc = ldlm_lock_change_resource(ns, lock,
 					&reply->lock_desc.l_resource.lr_name);
-			if (rc || lock->l_resource == NULL) {
+			if (rc || !lock->l_resource) {
 				rc = -ENOMEM;
 				goto cleanup;
 			}
@@ -467,7 +477,8 @@
 	if ((*flags) & LDLM_FL_AST_SENT ||
 	    /* Cancel extent locks as soon as possible on a liblustre client,
 	     * because it cannot handle asynchronous ASTs robustly (see
-	     * bug 7311). */
+	     * bug 7311).
+	     */
 	    (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
 		lock_res_and_lock(lock);
 		lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
@@ -476,12 +487,14 @@
 	}
 
 	/* If the lock has already been granted by a completion AST, don't
-	 * clobber the LVB with an older one. */
+	 * clobber the LVB with an older one.
+	 */
 	if (lvb_len != 0) {
 		/* We must lock or a racing completion might update lvb without
 		 * letting us know and we'll clobber the correct value.
-		 * Cannot unlock after the check either, a that still leaves
-		 * a tiny window for completion to get in */
+		 * Cannot unlock after the check either, as that still leaves
+		 * a tiny window for completion to get in
+		 */
 		lock_res_and_lock(lock);
 		if (lock->l_req_mode != lock->l_granted_mode)
 			rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
@@ -495,7 +508,7 @@
 
 	if (!is_replay) {
 		rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
-		if (lock->l_completion_ast != NULL) {
+		if (lock->l_completion_ast) {
 			int err = lock->l_completion_ast(lock, *flags, NULL);
 
 			if (!rc)
@@ -505,9 +518,10 @@
 		}
 	}
 
-	if (lvb_len && lvb != NULL) {
+	if (lvb_len && lvb) {
 		/* Copy the LVB here, and not earlier, because the completion
-		 * AST (if any) can override what we got in the reply */
+		 * AST (if any) can override what we got in the reply
+		 */
 		memcpy(lvb, lock->l_lvb_data, lvb_len);
 	}
 
@@ -579,7 +593,7 @@
 	LIST_HEAD(head);
 	int rc;
 
-	if (cancels == NULL)
+	if (!cancels)
 		cancels = &head;
 	if (ns_connect_cancelset(ns)) {
 		/* Estimate the amount of available space in the request. */
@@ -593,7 +607,8 @@
 
 		/* Cancel LRU locks here _only_ if the server supports
 		 * EARLY_CANCEL. Otherwise we have to send extra CANCEL
-		 * RPC, which will make us slower. */
+		 * RPC, which will make us slower.
+		 */
 		if (avail > count)
 			count += ldlm_cancel_lru_local(ns, cancels, to_free,
 						       avail - count, 0, flags);
@@ -618,7 +633,8 @@
 			/* Skip first lock handler in ldlm_request_pack(),
 			 * this method will increment @lock_count according
 			 * to the lock handle amount actually written to
-			 * the buffer. */
+			 * the buffer.
+			 */
 			dlm->lock_count = canceloff;
 		}
 		/* Pack into the request @pack lock handles. */
@@ -665,15 +681,14 @@
 	int		    rc, err;
 	struct ptlrpc_request *req;
 
-	LASSERT(exp != NULL);
-
 	ns = exp->exp_obd->obd_namespace;
 
 	/* If we're replaying this lock, just check some invariants.
-	 * If we're creating a new lock, get everything all setup nice. */
+	 * If we're creating a new lock, get everything all setup nicely.
+	 */
 	if (is_replay) {
 		lock = ldlm_handle2lock_long(lockh, 0);
-		LASSERT(lock != NULL);
+		LASSERT(lock);
 		LDLM_DEBUG(lock, "client-side enqueue START");
 		LASSERT(exp == lock->l_conn_export);
 	} else {
@@ -685,13 +700,13 @@
 		lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
 					einfo->ei_mode, &cbs, einfo->ei_cbdata,
 					lvb_len, lvb_type);
-		if (lock == NULL)
+		if (!lock)
 			return -ENOMEM;
 		/* for the local lock, add the reference */
 		ldlm_lock_addref_internal(lock, einfo->ei_mode);
 		ldlm_lock2handle(lock, lockh);
-		if (policy != NULL)
-				lock->l_policy_data = *policy;
+		if (policy)
+			lock->l_policy_data = *policy;
 
 		if (einfo->ei_type == LDLM_EXTENT)
 			lock->l_req_extent = policy->l_extent;
@@ -706,12 +721,12 @@
 
 	/* lock not sent to server yet */
 
-	if (reqp == NULL || *reqp == NULL) {
+	if (!reqp || !*reqp) {
 		req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 						&RQF_LDLM_ENQUEUE,
 						LUSTRE_DLM_VERSION,
 						LDLM_ENQUEUE);
-		if (req == NULL) {
+		if (!req) {
 			failed_lock_cleanup(ns, lock, einfo->ei_mode);
 			LDLM_LOCK_RELEASE(lock);
 			return -ENOMEM;
@@ -754,7 +769,7 @@
 		     policy->l_extent.end == OBD_OBJECT_EOF));
 
 	if (async) {
-		LASSERT(reqp != NULL);
+		LASSERT(reqp);
 		return 0;
 	}
 
@@ -767,13 +782,14 @@
 				    lockh, rc);
 
 	/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
-	 * one reference that we took */
+	 * one reference that we took
+	 */
 	if (err == -ENOLCK)
 		LDLM_LOCK_RELEASE(lock);
 	else
 		rc = err;
 
-	if (!req_passed_in && req != NULL) {
+	if (!req_passed_in && req) {
 		ptlrpc_req_finished(req);
 		if (reqp)
 			*reqp = NULL;
@@ -832,7 +848,7 @@
 	int max, packed = 0;
 
 	dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
-	LASSERT(dlm != NULL);
+	LASSERT(dlm);
 
 	/* Check the room in the request buffer. */
 	max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
@@ -843,7 +859,8 @@
 
 	/* XXX: it would be better to pack lock handles grouped by resource.
 	 * so that the server cancel would call filter_lvbo_update() less
-	 * frequently. */
+	 * frequently.
+	 */
 	list_for_each_entry(lock, head, l_bl_ast) {
 		if (!count--)
 			break;
@@ -858,17 +875,18 @@
 
 /**
  * Prepare and send a batched cancel RPC. It will include \a count lock
- * handles of locks given in \a cancels list. */
+ * handles of locks given in \a cancels list.
+ */
 static int ldlm_cli_cancel_req(struct obd_export *exp,
 			       struct list_head *cancels,
-			       int count, ldlm_cancel_flags_t flags)
+			       int count, enum ldlm_cancel_flags flags)
 {
 	struct ptlrpc_request *req = NULL;
 	struct obd_import *imp;
 	int free, sent = 0;
 	int rc = 0;
 
-	LASSERT(exp != NULL);
+	LASSERT(exp);
 	LASSERT(count > 0);
 
 	CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
@@ -883,14 +901,14 @@
 
 	while (1) {
 		imp = class_exp2cliimp(exp);
-		if (imp == NULL || imp->imp_invalid) {
+		if (!imp || imp->imp_invalid) {
 			CDEBUG(D_DLMTRACE,
 			       "skipping cancel on invalid import %p\n", imp);
 			return count;
 		}
 
 		req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
-		if (req == NULL) {
+		if (!req) {
 			rc = -ENOMEM;
 			goto out;
 		}
@@ -946,7 +964,6 @@
 
 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
 {
-	LASSERT(imp != NULL);
 	return &imp->imp_obd->obd_namespace->ns_pool;
 }
 
@@ -971,7 +988,8 @@
 	 * is the case when server does not support LRU resize feature.
 	 * This is also possible in some recovery cases when server-side
 	 * reqs have no reference to the OBD export and thus access to
-	 * server-side namespace is not possible. */
+	 * server-side namespace is not possible.
+	 */
 	if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
 	    lustre_msg_get_limit(req->rq_repmsg) == 0) {
 		DEBUG_REQ(D_HA, req,
@@ -989,7 +1007,8 @@
 	 * to the pool thread. We do not access obd_namespace and pool
 	 * directly here as there is no reliable way to make sure that
 	 * they are still alive at cleanup time. Evil races are possible
-	 * which may cause Oops at that time. */
+	 * which may cause Oops at that time.
+	 */
 	write_lock(&obd->obd_pool_lock);
 	obd->obd_pool_slv = new_slv;
 	obd->obd_pool_limit = new_limit;
@@ -1005,7 +1024,7 @@
  * Lock must not have any readers or writers by this time.
  */
 int ldlm_cli_cancel(struct lustre_handle *lockh,
-		    ldlm_cancel_flags_t cancel_flags)
+		    enum ldlm_cancel_flags cancel_flags)
 {
 	struct obd_export *exp;
 	int avail, flags, count = 1;
@@ -1016,7 +1035,7 @@
 
 	/* concurrent cancels on the same handle can happen */
 	lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
-	if (lock == NULL) {
+	if (!lock) {
 		LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
 		return 0;
 	}
@@ -1028,7 +1047,8 @@
 	}
 	/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
 	 * RPC which goes to canceld portal, so we can cancel other LRU locks
-	 * here and send them all as one LDLM_CANCEL RPC. */
+	 * here and send them all as one LDLM_CANCEL RPC.
+	 */
 	LASSERT(list_empty(&lock->l_bl_ast));
 	list_add(&lock->l_bl_ast, &cancels);
 
@@ -1055,7 +1075,7 @@
  * Return the number of cancelled locks.
  */
 int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
-			       ldlm_cancel_flags_t flags)
+			       enum ldlm_cancel_flags flags)
 {
 	LIST_HEAD(head);
 	struct ldlm_lock *lock, *next;
@@ -1076,7 +1096,8 @@
 		/* Until we have compound requests and can send LDLM_CANCEL
 		 * requests batched with generic RPCs, we need to send cancels
 		 * with the LDLM_FL_BL_AST flag in a separate RPC from
-		 * the one being generated now. */
+		 * the one being generated now.
+		 */
 		if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
 			LDLM_DEBUG(lock, "Cancel lock separately");
 			list_del_init(&lock->l_bl_ast);
@@ -1116,7 +1137,8 @@
 	lock_res_and_lock(lock);
 
 	/* don't check added & count since we want to process all locks
-	 * from unused list */
+	 * from unused list
+	 */
 	switch (lock->l_resource->lr_type) {
 	case LDLM_EXTENT:
 	case LDLM_IBITS:
@@ -1152,7 +1174,8 @@
 	unsigned long la;
 
 	/* Stop LRU processing when we reach past @count or have checked all
-	 * locks in LRU. */
+	 * locks in LRU.
+	 */
 	if (count && added >= count)
 		return LDLM_POLICY_KEEP_LOCK;
 
@@ -1166,7 +1189,8 @@
 	ldlm_pool_set_clv(pl, lv);
 
 	/* Stop when SLV is not yet come from server or lv is smaller than
-	 * it is. */
+	 * it is.
+	 */
 	return (slv == 0 || lv < slv) ?
 		LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
 }
@@ -1186,7 +1210,8 @@
 						   int count)
 {
 	/* Stop LRU processing when we reach past @count or have checked all
-	 * locks in LRU. */
+	 * locks in LRU.
+	 */
 	return (added >= count) ?
 		LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
 }
@@ -1227,7 +1252,8 @@
 						    int count)
 {
 	/* Stop LRU processing when we reach past count or have checked all
-	 * locks in LRU. */
+	 * locks in LRU.
+	 */
 	return (added >= count) ?
 		LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
 }
@@ -1307,7 +1333,7 @@
 		count += unused - ns->ns_max_unused;
 
 	pf = ldlm_cancel_lru_policy(ns, flags);
-	LASSERT(pf != NULL);
+	LASSERT(pf);
 
 	while (!list_empty(&ns->ns_unused_list)) {
 		ldlm_policy_res_t result;
@@ -1331,7 +1357,8 @@
 				continue;
 
 			/* Somebody is already doing CANCEL. No need for this
-			 * lock in LRU, do not traverse it again. */
+			 * lock in LRU, do not traverse it again.
+			 */
 			if (!(lock->l_flags & LDLM_FL_CANCELING))
 				break;
 
@@ -1380,7 +1407,8 @@
 			/* Another thread is removing lock from LRU, or
 			 * somebody is already doing CANCEL, or there
 			 * is a blocking request which will send cancel
-			 * by itself, or the lock is no longer unused. */
+			 * by itself, or the lock is no longer unused.
+			 */
 			unlock_res_and_lock(lock);
 			lu_ref_del(&lock->l_reference,
 				   __func__, current);
@@ -1394,7 +1422,8 @@
 		 * better send cancel notification to server, so that it
 		 * frees appropriate state. This might lead to a race
 		 * where while we are doing cancel here, server is also
-		 * silently cancelling this lock. */
+		 * silently cancelling this lock.
+		 */
 		lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
 
 		/* Setting the CBPENDING flag is a little misleading,
@@ -1402,7 +1431,8 @@
 		 * CBPENDING is set, the lock can accumulate no more
 		 * readers/writers. Since readers and writers are
 		 * already zero here, ldlm_lock_decref() won't see
-		 * this flag and call l_blocking_ast */
+		 * this flag and call l_blocking_ast
+		 */
 		lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
 
 		/* We can't re-add to l_lru as it confuses the
@@ -1410,7 +1440,8 @@
 		 * arrives after we drop lr_lock below. We use l_bl_ast
 		 * and can't use l_pending_chain as it is used both on
 		 * server and client nevertheless bug 5666 says it is
-		 * used only on server */
+		 * used only on server
+		 */
 		LASSERT(list_empty(&lock->l_bl_ast));
 		list_add(&lock->l_bl_ast, cancels);
 		unlock_res_and_lock(lock);
@@ -1425,7 +1456,7 @@
 
 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
 			  struct list_head *cancels, int count, int max,
-			  ldlm_cancel_flags_t cancel_flags, int flags)
+			  enum ldlm_cancel_flags cancel_flags, int flags)
 {
 	int added;
 
@@ -1444,14 +1475,15 @@
  * callback will be performed in this function.
  */
 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
-		    ldlm_cancel_flags_t cancel_flags,
+		    enum ldlm_cancel_flags cancel_flags,
 		    int flags)
 {
 	LIST_HEAD(cancels);
 	int count, rc;
 
 	/* Just prepare the list of locks, do not actually cancel them yet.
-	 * Locks are cancelled later in a separate thread. */
+	 * Locks are cancelled later in a separate thread.
+	 */
 	count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
 	rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
 	if (rc == 0)
@@ -1468,15 +1500,16 @@
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
 			       struct list_head *cancels,
 			       ldlm_policy_data_t *policy,
-			       ldlm_mode_t mode, __u64 lock_flags,
-			       ldlm_cancel_flags_t cancel_flags, void *opaque)
+			       enum ldlm_mode mode, __u64 lock_flags,
+			       enum ldlm_cancel_flags cancel_flags,
+			       void *opaque)
 {
 	struct ldlm_lock *lock;
 	int count = 0;
 
 	lock_res(res);
 	list_for_each_entry(lock, &res->lr_granted, l_res_link) {
-		if (opaque != NULL && lock->l_ast_data != opaque) {
+		if (opaque && lock->l_ast_data != opaque) {
 			LDLM_ERROR(lock, "data %p doesn't match opaque %p",
 				   lock->l_ast_data, opaque);
 			continue;
@@ -1486,7 +1519,8 @@
 			continue;
 
 		/* If somebody is already doing CANCEL, or blocking AST came,
-		 * skip this lock. */
+		 * skip this lock.
+		 */
 		if (lock->l_flags & LDLM_FL_BL_AST ||
 		    lock->l_flags & LDLM_FL_CANCELING)
 			continue;
@@ -1495,7 +1529,8 @@
 			continue;
 
 		/* If policy is given and this is IBITS lock, add to list only
-		 * those locks that match by policy. */
+		 * those locks that match by policy.
+		 */
 		if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
 		    !(lock->l_policy_data.l_inodebits.bits &
 		      policy->l_inodebits.bits))
@@ -1527,7 +1562,8 @@
  * Destroy \a cancels at the end.
  */
 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
-			 struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
+			 struct ptlrpc_request *req,
+			 enum ldlm_cancel_flags flags)
 {
 	struct ldlm_lock *lock;
 	int res = 0;
@@ -1539,7 +1575,8 @@
 	 * Usually it is enough to have just 1 RPC, but it is possible that
 	 * there are too many locks to be cancelled in LRU or on a resource.
 	 * It would also speed up the case when the server does not support
-	 * the feature. */
+	 * the feature.
+	 */
 	while (count > 0) {
 		LASSERT(!list_empty(cancels));
 		lock = list_entry(cancels->next, struct ldlm_lock,
@@ -1577,12 +1614,13 @@
  * Cancel all locks on a resource that have 0 readers/writers.
  *
  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
- * to notify the server. */
+ * to notify the server.
+ */
 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
 				    const struct ldlm_res_id *res_id,
 				    ldlm_policy_data_t *policy,
-				    ldlm_mode_t mode,
-				    ldlm_cancel_flags_t flags,
+				    enum ldlm_mode mode,
+				    enum ldlm_cancel_flags flags,
 				    void *opaque)
 {
 	struct ldlm_resource *res;
@@ -1591,7 +1629,7 @@
 	int rc;
 
 	res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
-	if (res == NULL) {
+	if (!res) {
 		/* This is not a problem. */
 		CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
 		return 0;
@@ -1638,17 +1676,17 @@
  * to notify the server. */
 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
 			   const struct ldlm_res_id *res_id,
-			   ldlm_cancel_flags_t flags, void *opaque)
+			   enum ldlm_cancel_flags flags, void *opaque)
 {
 	struct ldlm_cli_cancel_arg arg = {
 		.lc_flags       = flags,
 		.lc_opaque      = opaque,
 	};
 
-	if (ns == NULL)
+	if (!ns)
 		return ELDLM_OK;
 
-	if (res_id != NULL) {
+	if (res_id) {
 		return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
 						       LCK_MINMODE, flags,
 						       opaque);
@@ -1743,13 +1781,13 @@
 	struct ldlm_resource *res;
 	int rc;
 
-	if (ns == NULL) {
+	if (!ns) {
 		CERROR("must pass in namespace\n");
 		LBUG();
 	}
 
 	res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
-	if (res == NULL)
+	if (!res)
 		return 0;
 
 	LDLM_RESOURCE_ADDREF(res);
@@ -1796,7 +1834,7 @@
 		goto out;
 
 	reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-	if (reply == NULL) {
+	if (!reply) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1815,7 +1853,8 @@
 	exp = req->rq_export;
 	if (exp && exp->exp_lock_hash) {
 		/* In the function below, .hs_keycmp resolves to
-		 * ldlm_export_lock_keycmp() */
+		 * ldlm_export_lock_keycmp()
+		 */
 		/* coverity[overrun-buffer-val] */
 		cfs_hash_rehash_key(exp->exp_lock_hash,
 				    &lock->l_remote_handle,
@@ -1850,7 +1889,8 @@
 
 	/* If this is reply-less callback lock, we cannot replay it, since
 	 * server might have long dropped it, but notification of that event was
-	 * lost by network. (and server granted conflicting lock already) */
+	 * lost by network. (and server granted conflicting lock already)
+	 */
 	if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
 		LDLM_DEBUG(lock, "Not replaying reply-less lock:");
 		ldlm_lock_cancel(lock);
@@ -1882,7 +1922,7 @@
 
 	req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
 					LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	/* We're part of recovery, so don't wait for it. */
@@ -1901,7 +1941,8 @@
 	/* notify the server we've replayed all requests.
 	 * also, we mark the request to be put on a dedicated
 	 * queue to be processed after all request replayes.
-	 * bug 6063 */
+	 * bug 6063
+	 */
 	lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
 
 	LDLM_DEBUG(lock, "replaying lock:");
@@ -1936,7 +1977,8 @@
 
 	/* We don't need to care whether or not LRU resize is enabled
 	 * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
-	 * count parameter */
+	 * count parameter
+	 */
 	canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
 					 LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
 
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 0ae6100..03b9726 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -56,7 +56,8 @@
 struct mutex ldlm_cli_namespace_lock;
 /* Client Namespaces that have active resources in them.
  * Once all resources go away, ldlm_poold moves such namespaces to the
- * inactive list */
+ * inactive list
+ */
 LIST_HEAD(ldlm_cli_active_namespace_list);
 /* Client namespaces that don't have any locks in them */
 static LIST_HEAD(ldlm_cli_inactive_namespace_list);
@@ -66,7 +67,8 @@
 struct dentry *ldlm_svc_debugfs_dir;
 
 /* during debug dump certain amount of granted locks for one resource to avoid
- * DDOS. */
+ * DDOS.
+ */
 static unsigned int ldlm_dump_granted_max = 256;
 
 static ssize_t
@@ -275,7 +277,8 @@
 		ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
 
 		/* Make sure that LRU resize was originally supported before
-		 * turning it on here. */
+		 * turning it on here.
+		 */
 		if (lru_resize &&
 		    (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
 			CDEBUG(D_DLMTRACE,
@@ -380,7 +383,7 @@
 	else
 		ldebugfs_remove(&ns->ns_debugfs_entry);
 
-	if (ns->ns_stats != NULL)
+	if (ns->ns_stats)
 		lprocfs_free_stats(&ns->ns_stats);
 }
 
@@ -400,7 +403,7 @@
 				   "%s", ldlm_ns_name(ns));
 
 	ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
-	if (ns->ns_stats == NULL) {
+	if (!ns->ns_stats) {
 		kobject_put(&ns->ns_kobj);
 		return -ENOMEM;
 	}
@@ -420,7 +423,7 @@
 	} else {
 		ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
 					      ldlm_ns_debugfs_dir);
-		if (ns_entry == NULL)
+		if (!ns_entry)
 			return -ENOMEM;
 		ns->ns_debugfs_entry = ns_entry;
 	}
@@ -554,7 +557,7 @@
 };
 
 struct ldlm_ns_hash_def {
-	ldlm_ns_type_t  nsd_type;
+	enum ldlm_ns_type nsd_type;
 	/** hash bucket bits */
 	unsigned	nsd_bkt_bits;
 	/** hash bits */
@@ -621,8 +624,8 @@
  */
 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
 					  ldlm_side_t client,
-					  ldlm_appetite_t apt,
-					  ldlm_ns_type_t ns_type)
+					  enum ldlm_appetite apt,
+					  enum ldlm_ns_type ns_type)
 {
 	struct ldlm_namespace *ns = NULL;
 	struct ldlm_ns_bucket *nsb;
@@ -631,7 +634,7 @@
 	int		    idx;
 	int		    rc;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 
 	rc = ldlm_get_ref();
 	if (rc) {
@@ -664,7 +667,7 @@
 					 CFS_HASH_BIGNAME |
 					 CFS_HASH_SPIN_BKTLOCK |
 					 CFS_HASH_NO_ITEMREF);
-	if (ns->ns_rs_hash == NULL)
+	if (!ns->ns_rs_hash)
 		goto out_ns;
 
 	cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
@@ -749,7 +752,8 @@
 		struct lustre_handle lockh;
 
 		/* First, we look for non-cleaned-yet lock
-		 * all cleaned locks are marked by CLEANED flag. */
+		 * all cleaned locks are marked by CLEANED flag.
+		 */
 		lock_res(res);
 		list_for_each(tmp, q) {
 			lock = list_entry(tmp, struct ldlm_lock,
@@ -763,13 +767,14 @@
 			break;
 		}
 
-		if (lock == NULL) {
+		if (!lock) {
 			unlock_res(res);
 			break;
 		}
 
 		/* Set CBPENDING so nothing in the cancellation path
-		 * can match this lock. */
+		 * can match this lock.
+		 */
 		lock->l_flags |= LDLM_FL_CBPENDING;
 		lock->l_flags |= LDLM_FL_FAILED;
 		lock->l_flags |= flags;
@@ -782,7 +787,8 @@
 			/* This is a little bit gross, but much better than the
 			 * alternative: pretend that we got a blocking AST from
 			 * the server, so that when the lock is decref'd, it
-			 * will go away ... */
+			 * will go away ...
+			 */
 			unlock_res(res);
 			LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
 			if (lock->l_completion_ast)
@@ -837,7 +843,7 @@
  */
 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
 {
-	if (ns == NULL) {
+	if (!ns) {
 		CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
 		return ELDLM_OK;
 	}
@@ -873,7 +879,8 @@
 				  atomic_read(&ns->ns_bref) == 0, &lwi);
 
 		/* Forced cleanups should be able to reclaim all references,
-		 * so it's safe to wait forever... we can't leak locks... */
+		 * so it's safe to wait forever... we can't leak locks...
+		 */
 		if (force && rc == -ETIMEDOUT) {
 			LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
 				       ldlm_ns_name(ns),
@@ -943,7 +950,8 @@
 	LASSERT(!list_empty(&ns->ns_list_chain));
 	/* Some asserts and possibly other parts of the code are still
 	 * using list_empty(&ns->ns_list_chain). This is why it is
-	 * important to use list_del_init() here. */
+	 * important to use list_del_init() here.
+	 */
 	list_del_init(&ns->ns_list_chain);
 	ldlm_namespace_nr_dec(client);
 	mutex_unlock(ldlm_namespace_lock(client));
@@ -963,7 +971,8 @@
 	ldlm_namespace_unregister(ns, ns->ns_client);
 	/* Fini pool _before_ parent proc dir is removed. This is important as
 	 * ldlm_pool_fini() removes own proc dir which is child to @dir.
-	 * Removing it after @dir may cause oops. */
+	 * Removing it after @dir may cause oops.
+	 */
 	ldlm_pool_fini(&ns->ns_pool);
 
 	ldlm_namespace_debugfs_unregister(ns);
@@ -971,7 +980,8 @@
 	cfs_hash_putref(ns->ns_rs_hash);
 	/* Namespace \a ns should be not on list at this time, otherwise
 	 * this will cause issues related to using freed \a ns in poold
-	 * thread. */
+	 * thread.
+	 */
 	LASSERT(list_empty(&ns->ns_list_chain));
 	kfree(ns);
 	ldlm_put_ref();
@@ -1032,7 +1042,7 @@
 	int idx;
 
 	res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO);
-	if (res == NULL)
+	if (!res)
 		return NULL;
 
 	INIT_LIST_HEAD(&res->lr_granted);
@@ -1050,7 +1060,8 @@
 	lu_ref_init(&res->lr_reference);
 
 	/* The creator of the resource must unlock the mutex after LVB
-	 * initialization. */
+	 * initialization.
+	 */
 	mutex_init(&res->lr_lvb_mutex);
 	mutex_lock(&res->lr_lvb_mutex);
 
@@ -1065,7 +1076,8 @@
  */
 struct ldlm_resource *
 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
-		  const struct ldlm_res_id *name, ldlm_type_t type, int create)
+		  const struct ldlm_res_id *name, enum ldlm_type type,
+		  int create)
 {
 	struct hlist_node     *hnode;
 	struct ldlm_resource *res;
@@ -1073,14 +1085,13 @@
 	__u64		 version;
 	int		      ns_refcount = 0;
 
-	LASSERT(ns != NULL);
-	LASSERT(parent == NULL);
-	LASSERT(ns->ns_rs_hash != NULL);
+	LASSERT(!parent);
+	LASSERT(ns->ns_rs_hash);
 	LASSERT(name->name[0] != 0);
 
 	cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
 	hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
-	if (hnode != NULL) {
+	if (hnode) {
 		cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
 		res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
 		/* Synchronize with regard to resource creation. */
@@ -1111,13 +1122,12 @@
 	res->lr_ns_bucket  = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
 	res->lr_name       = *name;
 	res->lr_type       = type;
-	res->lr_most_restr = LCK_NL;
 
 	cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
 	hnode = (version == cfs_hash_bd_version_get(&bd)) ?  NULL :
 		cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
 
-	if (hnode != NULL) {
+	if (hnode) {
 		/* Someone won the race and already added the resource. */
 		cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
 		/* Clean lu_ref for failed resource. */
@@ -1167,7 +1177,8 @@
 	/* Let's see if we happened to be the very first resource in this
 	 * namespace. If so, and this is a client namespace, we need to move
 	 * the namespace into the active namespaces list to be patrolled by
-	 * the ldlm_poold. */
+	 * the ldlm_poold.
+	 */
 	if (ns_refcount == 1) {
 		mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
 		ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lustre/libcfs/Makefile
index 03d3f3d..277c123 100644
--- a/drivers/staging/lustre/lustre/libcfs/Makefile
+++ b/drivers/staging/lustre/lustre/libcfs/Makefile
@@ -11,8 +11,7 @@
 libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
 
 libcfs-all-objs := debug.o fail.o module.o tracefile.o \
-		   libcfs_string.o hash.o kernel_user_comm.o \
-		   prng.o workitem.o libcfs_cpu.o \
-		   libcfs_mem.o libcfs_lock.o
+		   libcfs_string.o hash.o prng.o workitem.o \
+		   libcfs_cpu.o libcfs_mem.o libcfs_lock.o
 
 libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 0b38dad..c90e510 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -47,15 +47,15 @@
 static char debug_file_name[1024];
 
 unsigned int libcfs_subsystem_debug = ~0;
+EXPORT_SYMBOL(libcfs_subsystem_debug);
 module_param(libcfs_subsystem_debug, int, 0644);
 MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
-EXPORT_SYMBOL(libcfs_subsystem_debug);
 
 unsigned int libcfs_debug = (D_CANTMASK |
 			     D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
+EXPORT_SYMBOL(libcfs_debug);
 module_param(libcfs_debug, int, 0644);
 MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
-EXPORT_SYMBOL(libcfs_debug);
 
 static int libcfs_param_debug_mb_set(const char *val,
 				     const struct kernel_param *kp)
@@ -82,7 +82,8 @@
 
 /* While debug_mb setting look like unsigned int, in fact
  * it needs quite a bunch of extra processing, so we define special
- * debugmb parameter type with corresponding methods to handle this case */
+ * debugmb parameter type with corresponding methods to handle this case
+ */
 static struct kernel_param_ops param_ops_debugmb = {
 	.set = libcfs_param_debug_mb_set,
 	.get = param_get_uint,
@@ -227,8 +228,7 @@
 
 int libcfs_panic_in_progress;
 
-/* libcfs_debug_token2mask() expects the returned
- * string in lower-case */
+/* libcfs_debug_token2mask() expects the returned string in lower-case */
 static const char *
 libcfs_debug_subsys2str(int subsys)
 {
@@ -271,6 +271,8 @@
 		return "lquota";
 	case S_OSD:
 		return "osd";
+	case S_LFSCK:
+		return "lfsck";
 	case S_LMV:
 		return "lmv";
 	case S_SEC:
@@ -288,8 +290,7 @@
 	}
 }
 
-/* libcfs_debug_token2mask() expects the returned
- * string in lower-case */
+/* libcfs_debug_token2mask() expects the returned string in lower-case */
 static const char *
 libcfs_debug_dbg2str(int debug)
 {
@@ -376,7 +377,7 @@
 				continue;
 
 			token = fn(i);
-			if (token == NULL)	      /* unused bit */
+			if (!token)	      /* unused bit */
 				continue;
 
 			if (len > 0) {		  /* separator? */
@@ -416,7 +417,7 @@
 	/* Allow a number for backwards compatibility */
 
 	for (n = strlen(str); n > 0; n--)
-		if (!isspace(str[n-1]))
+		if (!isspace(str[n - 1]))
 			break;
 	matched = n;
 	t = sscanf(str, "%i%n", &m, &matched);
@@ -446,8 +447,7 @@
 		snprintf(debug_file_name, sizeof(debug_file_name) - 1,
 			 "%s.%lld.%ld", libcfs_debug_file_path_arr,
 			 (s64)ktime_get_real_seconds(), (long_ptr_t)arg);
-		pr_alert("LustreError: dumping log to %s\n",
-		       debug_file_name);
+		pr_alert("LustreError: dumping log to %s\n", debug_file_name);
 		cfs_tracefile_dump_all_pages(debug_file_name);
 		libcfs_run_debug_log_upcall(debug_file_name);
 	}
@@ -469,7 +469,8 @@
 
 	/* we're being careful to ensure that the kernel thread is
 	 * able to set our state to running as it exits before we
-	 * get to schedule() */
+	 * get to schedule()
+	 */
 	init_waitqueue_entry(&wait, current);
 	set_current_state(TASK_INTERRUPTIBLE);
 	add_wait_queue(&debug_ctlwq, &wait);
@@ -503,14 +504,15 @@
 		libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
 	}
 
-	if (libcfs_debug_file_path != NULL) {
+	if (libcfs_debug_file_path) {
 		strlcpy(libcfs_debug_file_path_arr,
 			libcfs_debug_file_path,
 			sizeof(libcfs_debug_file_path_arr));
 	}
 
 	/* If libcfs_debug_mb is set to an invalid value or uninitialized
-	 * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
+	 * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES
+	 */
 	if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
 		max = TCD_MAX_PAGES;
 	} else {
@@ -540,8 +542,7 @@
 	return 0;
 }
 
-/* Debug markers, although printed by S_LNET
- * should not be be marked as such. */
+/* Debug markers, although printed by S_LNET should not be be marked as such. */
 #undef DEBUG_SUBSYSTEM
 #define DEBUG_SUBSYSTEM S_UNDEFINED
 int libcfs_debug_mark_buffer(const char *text)
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index 2783143..dadaf76 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -97,7 +97,8 @@
 	/* Lost race to set CFS_FAILED_BIT. */
 	if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
 		/* If CFS_FAIL_ONCE is valid, only one process can fail,
-		 * otherwise multi-process can fail at the same time. */
+		 * otherwise multi-process can fail at the same time.
+		 */
 		if (cfs_fail_loc & CFS_FAIL_ONCE)
 			return 0;
 	}
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 4d50510..f60feb3 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -355,7 +355,7 @@
 
 	dh = container_of(cfs_hash_dh_hhead(hs, bd),
 			  struct cfs_hash_dhead, dh_head);
-	if (dh->dh_tail != NULL) /* not empty */
+	if (dh->dh_tail) /* not empty */
 		hlist_add_behind(hnode, dh->dh_tail);
 	else /* empty list */
 		hlist_add_head(hnode, &dh->dh_head);
@@ -371,7 +371,7 @@
 
 	dh = container_of(cfs_hash_dh_hhead(hs, bd),
 			  struct cfs_hash_dhead, dh_head);
-	if (hnd->next == NULL) { /* it's the tail */
+	if (!hnd->next) { /* it's the tail */
 		dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
 			      container_of(hnd->pprev, struct hlist_node, next);
 	}
@@ -412,7 +412,7 @@
 
 	dh = container_of(cfs_hash_dd_hhead(hs, bd),
 			  struct cfs_hash_dhead_dep, dd_head);
-	if (dh->dd_tail != NULL) /* not empty */
+	if (dh->dd_tail) /* not empty */
 		hlist_add_behind(hnode, dh->dd_tail);
 	else /* empty list */
 		hlist_add_head(hnode, &dh->dd_head);
@@ -428,7 +428,7 @@
 
 	dh = container_of(cfs_hash_dd_hhead(hs, bd),
 			  struct cfs_hash_dhead_dep, dd_head);
-	if (hnd->next == NULL) { /* it's the tail */
+	if (!hnd->next) { /* it's the tail */
 		dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
 			      container_of(hnd->pprev, struct hlist_node, next);
 	}
@@ -492,7 +492,7 @@
 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
 {
 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
-	if (likely(hs->hs_rehash_buckets == NULL)) {
+	if (likely(!hs->hs_rehash_buckets)) {
 		cfs_hash_bd_from_key(hs, hs->hs_buckets,
 				     hs->hs_cur_bits, key, bd);
 	} else {
@@ -579,7 +579,8 @@
 		return;
 
 	/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
-	 * in cfs_hash_bd_del/add_locked */
+	 * in cfs_hash_bd_del/add_locked
+	 */
 	hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
 	rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
 	cfs_hash_bd_dep_record(hs, bd_new, rc);
@@ -635,13 +636,14 @@
 	int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
 
 	/* with this function, we can avoid a lot of useless refcount ops,
-	 * which are expensive atomic operations most time. */
+	 * which are expensive atomic operations most time.
+	 */
 	match = intent_add ? NULL : hnode;
 	hlist_for_each(ehnode, hhead) {
 		if (!cfs_hash_keycmp(hs, key, ehnode))
 			continue;
 
-		if (match != NULL && match != ehnode) /* can't match */
+		if (match && match != ehnode) /* can't match */
 			continue;
 
 		/* match and ... */
@@ -659,7 +661,7 @@
 	if (!intent_add)
 		return NULL;
 
-	LASSERT(hnode != NULL);
+	LASSERT(hnode);
 	cfs_hash_bd_add_locked(hs, bd, hnode);
 	return hnode;
 }
@@ -698,8 +700,7 @@
 		if (prev == bds[i].bd_bucket)
 			continue;
 
-		LASSERT(prev == NULL ||
-			prev->hsb_index < bds[i].bd_bucket->hsb_index);
+		LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
 		cfs_hash_bd_lock(hs, &bds[i], excl);
 		prev = bds[i].bd_bucket;
 	}
@@ -730,7 +731,7 @@
 	cfs_hash_for_each_bd(bds, n, i) {
 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
 						   CFS_HS_LOOKUP_IT_FIND);
-		if (ehnode != NULL)
+		if (ehnode)
 			return ehnode;
 	}
 	return NULL;
@@ -745,13 +746,13 @@
 	int intent;
 	unsigned i;
 
-	LASSERT(hnode != NULL);
+	LASSERT(hnode);
 	intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
 
 	cfs_hash_for_each_bd(bds, n, i) {
 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
 						   NULL, intent);
-		if (ehnode != NULL)
+		if (ehnode)
 			return ehnode;
 	}
 
@@ -778,7 +779,7 @@
 	cfs_hash_for_each_bd(bds, n, i) {
 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
 						   CFS_HS_LOOKUP_IT_FINDDEL);
-		if (ehnode != NULL)
+		if (ehnode)
 			return ehnode;
 	}
 	return NULL;
@@ -789,26 +790,20 @@
 {
 	int rc;
 
-	if (bd2->bd_bucket == NULL)
+	if (!bd2->bd_bucket)
 		return;
 
-	if (bd1->bd_bucket == NULL) {
+	if (!bd1->bd_bucket) {
 		*bd1 = *bd2;
 		bd2->bd_bucket = NULL;
 		return;
 	}
 
 	rc = cfs_hash_bd_compare(bd1, bd2);
-	if (rc == 0) {
+	if (!rc)
 		bd2->bd_bucket = NULL;
-
-	} else if (rc > 0) { /* swab bd1 and bd2 */
-		struct cfs_hash_bd tmp;
-
-		tmp = *bd2;
-		*bd2 = *bd1;
-		*bd1 = tmp;
-	}
+	else if (rc > 0)
+		swap(*bd1, *bd2); /* swap bd1 and bd2 */
 }
 
 void
@@ -818,7 +813,7 @@
 	/* NB: caller should hold hs_lock.rw if REHASH is set */
 	cfs_hash_bd_from_key(hs, hs->hs_buckets,
 			     hs->hs_cur_bits, key, &bds[0]);
-	if (likely(hs->hs_rehash_buckets == NULL)) {
+	if (likely(!hs->hs_rehash_buckets)) {
 		/* no rehash or not rehashing */
 		bds[1].bd_bucket = NULL;
 		return;
@@ -873,7 +868,7 @@
 	int i;
 
 	for (i = prev_size; i < size; i++) {
-		if (buckets[i] != NULL)
+		if (buckets[i])
 			LIBCFS_FREE(buckets[i], bkt_size);
 	}
 
@@ -892,16 +887,16 @@
 	struct cfs_hash_bucket **new_bkts;
 	int i;
 
-	LASSERT(old_size == 0 || old_bkts != NULL);
+	LASSERT(old_size == 0 || old_bkts);
 
-	if (old_bkts != NULL && old_size == new_size)
+	if (old_bkts && old_size == new_size)
 		return old_bkts;
 
 	LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
-	if (new_bkts == NULL)
+	if (!new_bkts)
 		return NULL;
 
-	if (old_bkts != NULL) {
+	if (old_bkts) {
 		memcpy(new_bkts, old_bkts,
 		       min(old_size, new_size) * sizeof(*old_bkts));
 	}
@@ -911,7 +906,7 @@
 		struct cfs_hash_bd bd;
 
 		LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
-		if (new_bkts[i] == NULL) {
+		if (!new_bkts[i]) {
 			cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
 					      old_size, new_size);
 			return NULL;
@@ -1011,14 +1006,13 @@
 
 	CLASSERT(CFS_HASH_THETA_BITS < 15);
 
-	LASSERT(name != NULL);
-	LASSERT(ops != NULL);
+	LASSERT(name);
 	LASSERT(ops->hs_key);
 	LASSERT(ops->hs_hash);
 	LASSERT(ops->hs_object);
 	LASSERT(ops->hs_keycmp);
-	LASSERT(ops->hs_get != NULL);
-	LASSERT(ops->hs_put_locked != NULL);
+	LASSERT(ops->hs_get);
+	LASSERT(ops->hs_put_locked);
 
 	if ((flags & CFS_HASH_REHASH) != 0)
 		flags |= CFS_HASH_COUNTER; /* must have counter */
@@ -1029,13 +1023,12 @@
 	LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
 	LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
 		     (flags & CFS_HASH_NO_LOCK) == 0));
-	LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
-		      ops->hs_keycpy != NULL));
+	LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
 
 	len = (flags & CFS_HASH_BIGNAME) == 0 ?
 	      CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
 	LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
-	if (hs == NULL)
+	if (!hs)
 		return NULL;
 
 	strlcpy(hs->hs_name, name, len);
@@ -1063,7 +1056,7 @@
 
 	hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
 						  CFS_HASH_NBKT(hs));
-	if (hs->hs_buckets != NULL)
+	if (hs->hs_buckets)
 		return hs;
 
 	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
@@ -1082,7 +1075,7 @@
 	struct cfs_hash_bd bd;
 	int i;
 
-	LASSERT(hs != NULL);
+	LASSERT(hs);
 	LASSERT(!cfs_hash_is_exiting(hs) &&
 		!cfs_hash_is_iterating(hs));
 
@@ -1096,13 +1089,12 @@
 
 	cfs_hash_depth_wi_cancel(hs);
 	/* rehash should be done/canceled */
-	LASSERT(hs->hs_buckets != NULL &&
-		hs->hs_rehash_buckets == NULL);
+	LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
 
 	cfs_hash_for_each_bucket(hs, &bd, i) {
 		struct hlist_head *hhead;
 
-		LASSERT(bd.bd_bucket != NULL);
+		LASSERT(bd.bd_bucket);
 		/* no need to take this lock, just for consistent code */
 		cfs_hash_bd_lock(hs, &bd, 1);
 
@@ -1113,7 +1105,8 @@
 					 hs->hs_name, bd.bd_bucket->hsb_index,
 					 bd.bd_offset, bd.bd_bucket->hsb_count);
 				/* can't assert key valicate, because we
-				 * can interrupt rehash */
+				 * can interrupt rehash
+				 */
 				cfs_hash_bd_del_locked(hs, &bd, hnode);
 				cfs_hash_exit(hs, hnode);
 			}
@@ -1164,7 +1157,8 @@
 		return -EAGAIN;
 
 	/* XXX: need to handle case with max_theta != 2.0
-	 *      and the case with min_theta != 0.5 */
+	 *      and the case with min_theta != 0.5
+	 */
 	if ((hs->hs_cur_bits < hs->hs_max_bits) &&
 	    (__cfs_hash_theta(hs) > hs->hs_max_theta))
 		return hs->hs_cur_bits + 1;
@@ -1293,8 +1287,8 @@
 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
 
 	/* NB: do nothing if @hnode is not in hash table */
-	if (hnode == NULL || !hlist_unhashed(hnode)) {
-		if (bds[1].bd_bucket == NULL && hnode != NULL) {
+	if (!hnode || !hlist_unhashed(hnode)) {
+		if (!bds[1].bd_bucket && hnode) {
 			cfs_hash_bd_del_locked(hs, &bds[0], hnode);
 		} else {
 			hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
@@ -1302,7 +1296,7 @@
 		}
 	}
 
-	if (hnode != NULL) {
+	if (hnode) {
 		obj  = cfs_hash_object(hs, hnode);
 		bits = cfs_hash_rehash_bits(hs);
 	}
@@ -1348,7 +1342,7 @@
 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
 
 	hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
-	if (hnode != NULL)
+	if (hnode)
 		obj = cfs_hash_object(hs, hnode);
 
 	cfs_hash_dual_bd_unlock(hs, bds, 0);
@@ -1378,7 +1372,8 @@
 	/* NB: iteration is mostly called by service thread,
 	 * we tend to cancel pending rehash-request, instead of
 	 * blocking service thread, we will relaunch rehash request
-	 * after iteration */
+	 * after iteration
+	 */
 	if (cfs_hash_is_rehashing(hs))
 		cfs_hash_rehash_cancel_locked(hs);
 	cfs_hash_unlock(hs, 1);
@@ -1436,7 +1431,7 @@
 		struct hlist_head *hhead;
 
 		cfs_hash_bd_lock(hs, &bd, excl);
-		if (func == NULL) { /* only glimpse size */
+		if (!func) { /* only glimpse size */
 			count += bd.bd_bucket->hsb_count;
 			cfs_hash_bd_unlock(hs, &bd, excl);
 			continue;
@@ -1574,7 +1569,7 @@
 
 	stop_on_change = cfs_hash_with_rehash_key(hs) ||
 			 !cfs_hash_with_no_itemref(hs) ||
-			 hs->hs_ops->hs_put_locked == NULL;
+			 !hs->hs_ops->hs_put_locked;
 	cfs_hash_lock(hs, 0);
 	LASSERT(!cfs_hash_is_rehashing(hs));
 
@@ -1585,7 +1580,7 @@
 		version = cfs_hash_bd_version_get(&bd);
 
 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
-			for (hnode = hhead->first; hnode != NULL;) {
+			for (hnode = hhead->first; hnode;) {
 				cfs_hash_bucket_validate(hs, &bd, hnode);
 				cfs_hash_get(hs, hnode);
 				cfs_hash_bd_unlock(hs, &bd, 0);
@@ -1634,9 +1629,8 @@
 	    !cfs_hash_with_no_itemref(hs))
 		return -EOPNOTSUPP;
 
-	if (hs->hs_ops->hs_get == NULL ||
-	    (hs->hs_ops->hs_put == NULL &&
-	     hs->hs_ops->hs_put_locked == NULL))
+	if (!hs->hs_ops->hs_get ||
+	    (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
 		return -EOPNOTSUPP;
 
 	cfs_hash_for_each_enter(hs);
@@ -1667,9 +1661,8 @@
 	if (cfs_hash_with_no_lock(hs))
 		return -EOPNOTSUPP;
 
-	if (hs->hs_ops->hs_get == NULL ||
-	    (hs->hs_ops->hs_put == NULL &&
-	     hs->hs_ops->hs_put_locked == NULL))
+	if (!hs->hs_ops->hs_get ||
+	    (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
 		return -EOPNOTSUPP;
 
 	cfs_hash_for_each_enter(hs);
@@ -1708,7 +1701,6 @@
 	cfs_hash_unlock(hs, 0);
 	cfs_hash_for_each_exit(hs);
 }
-
 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
 
 /*
@@ -1837,7 +1829,7 @@
 	cfs_hash_bd_for_each_hlist(hs, old, hhead) {
 		hlist_for_each_safe(hnode, pos, hhead) {
 			key = cfs_hash_key(hs, hnode);
-			LASSERT(key != NULL);
+			LASSERT(key);
 			/* Validate hnode is in the correct bucket. */
 			cfs_hash_bucket_validate(hs, old, hnode);
 			/*
@@ -1867,7 +1859,7 @@
 	int rc = 0;
 	int i;
 
-	LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
+	LASSERT(hs && cfs_hash_with_rehash(hs));
 
 	cfs_hash_lock(hs, 0);
 	LASSERT(cfs_hash_is_rehashing(hs));
@@ -1884,7 +1876,7 @@
 	bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
 					old_size, new_size);
 	cfs_hash_lock(hs, 1);
-	if (bkts == NULL) {
+	if (!bkts) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1903,7 +1895,7 @@
 		goto out;
 	}
 
-	LASSERT(hs->hs_rehash_buckets == NULL);
+	LASSERT(!hs->hs_rehash_buckets);
 	hs->hs_rehash_buckets = bkts;
 
 	rc = 0;
@@ -1946,7 +1938,7 @@
 	bsize = cfs_hash_bkt_size(hs);
 	cfs_hash_unlock(hs, 1);
 	/* can't refer to @hs anymore because it could be destroyed */
-	if (bkts != NULL)
+	if (bkts)
 		cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
 	if (rc != 0)
 		CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
@@ -1987,14 +1979,15 @@
 	cfs_hash_bd_order(&bds[0], &bds[1]);
 
 	cfs_hash_multi_bd_lock(hs, bds, 3, 1);
-	if (likely(old_bds[1].bd_bucket == NULL)) {
+	if (likely(!old_bds[1].bd_bucket)) {
 		cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
 	} else {
 		cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
 		cfs_hash_bd_add_locked(hs, &new_bd, hnode);
 	}
 	/* overwrite key inside locks, otherwise may screw up with
-	 * other operations, i.e: rehash */
+	 * other operations, i.e: rehash
+	 */
 	cfs_hash_keycpy(hs, hnode, new_key);
 
 	cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
@@ -2013,7 +2006,7 @@
 cfs_hash_full_bkts(struct cfs_hash *hs)
 {
 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
-	if (hs->hs_rehash_buckets == NULL)
+	if (!hs->hs_rehash_buckets)
 		return hs->hs_buckets;
 
 	LASSERT(hs->hs_rehash_bits != 0);
@@ -2025,7 +2018,7 @@
 cfs_hash_full_nbkt(struct cfs_hash *hs)
 {
 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
-	if (hs->hs_rehash_buckets == NULL)
+	if (!hs->hs_rehash_buckets)
 		return CFS_HASH_NBKT(hs);
 
 	LASSERT(hs->hs_rehash_bits != 0);
@@ -2046,15 +2039,15 @@
 	theta = __cfs_hash_theta(hs);
 
 	seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
-		      CFS_HASH_BIGNAME_LEN, hs->hs_name,
-		      1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
-		      1 << hs->hs_max_bits,
-		      __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
-		      __cfs_hash_theta_int(hs->hs_min_theta),
-		      __cfs_hash_theta_frac(hs->hs_min_theta),
-		      __cfs_hash_theta_int(hs->hs_max_theta),
-		      __cfs_hash_theta_frac(hs->hs_max_theta),
-		      hs->hs_flags, hs->hs_rehash_count);
+		   CFS_HASH_BIGNAME_LEN, hs->hs_name,
+		   1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
+		   1 << hs->hs_max_bits,
+		   __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
+		   __cfs_hash_theta_int(hs->hs_min_theta),
+		   __cfs_hash_theta_frac(hs->hs_min_theta),
+		   __cfs_hash_theta_int(hs->hs_max_theta),
+		   __cfs_hash_theta_frac(hs->hs_max_theta),
+		   hs->hs_flags, hs->hs_rehash_count);
 
 	/*
 	 * The distribution is a summary of the chained hash depth in
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
index 933525c..33352af 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
@@ -56,8 +51,9 @@
 	}
 
 	LIBCFS_ALLOC(cptab, sizeof(*cptab));
-	if (cptab != NULL) {
+	if (cptab) {
 		cptab->ctb_version = CFS_CPU_VERSION_MAGIC;
+		node_set(0, cptab->ctb_nodemask);
 		cptab->ctb_nparts  = ncpt;
 	}
 
@@ -111,6 +107,13 @@
 }
 EXPORT_SYMBOL(cfs_cpt_online);
 
+nodemask_t *
+cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
+{
+	return &cptab->ctb_nodemask;
+}
+EXPORT_SYMBOL(cfs_cpt_cpumask);
+
 int
 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
 {
@@ -207,7 +210,7 @@
 void
 cfs_cpu_fini(void)
 {
-	if (cfs_cpt_table != NULL) {
+	if (cfs_cpt_table) {
 		cfs_cpt_table_free(cfs_cpt_table);
 		cfs_cpt_table = NULL;
 	}
@@ -218,7 +221,7 @@
 {
 	cfs_cpt_table = cfs_cpt_table_alloc(1);
 
-	return cfs_cpt_table != NULL ? 0 : -1;
+	return cfs_cpt_table ? 0 : -1;
 }
 
 #endif /* HAVE_LIBCFS_CPT */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
index 15782d9..2de9eea 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
@@ -38,7 +33,7 @@
 void
 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
 {
-	LASSERT(pcl->pcl_locks != NULL);
+	LASSERT(pcl->pcl_locks);
 	LASSERT(!pcl->pcl_locked);
 
 	cfs_percpt_free(pcl->pcl_locks);
@@ -90,6 +85,7 @@
  */
 void
 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
+	__acquires(pcl->pcl_locks)
 {
 	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
 	int	i;
@@ -114,7 +110,8 @@
 		if (i == 0) {
 			LASSERT(!pcl->pcl_locked);
 			/* nobody should take private lock after this
-			 * so I wouldn't starve for too long time */
+			 * so I wouldn't starve for too long time
+			 */
 			pcl->pcl_locked = 1;
 		}
 	}
@@ -124,6 +121,7 @@
 /** unlock a CPU partition */
 void
 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
+	__releases(pcl->pcl_locks)
 {
 	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
 	int	i;
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
index 27cf861..c5a6951 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
@@ -54,7 +49,7 @@
 	arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
 
 	for (i = 0; i < arr->va_count; i++) {
-		if (arr->va_ptrs[i] != NULL)
+		if (arr->va_ptrs[i])
 			LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
 	}
 
@@ -87,9 +82,10 @@
 	if (!arr)
 		return NULL;
 
-	arr->va_size	= size = L1_CACHE_ALIGN(size);
-	arr->va_count	= count;
-	arr->va_cptab	= cptab;
+	size = L1_CACHE_ALIGN(size);
+	arr->va_size = size;
+	arr->va_count = count;
+	arr->va_cptab = cptab;
 
 	for (i = 0; i < count; i++) {
 		LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index 205a3ed..9dca666 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -54,7 +54,8 @@
 	 * and optionally an operator ('+' or '-').  If an operator
 	 * appears first in <str>, '*oldmask' is used as the starting point
 	 * (relative), otherwise minmask is used (absolute).  An operator
-	 * applies to all following tokens up to the next operator. */
+	 * applies to all following tokens up to the next operator.
+	 */
 	while (*str != '\0') {
 		while (isspace(*str))
 			str++;
@@ -81,8 +82,7 @@
 		found = 0;
 		for (i = 0; i < 32; i++) {
 			debugstr = bit2str(i);
-			if (debugstr != NULL &&
-			    strlen(debugstr) == len &&
+			if (debugstr && strlen(debugstr) == len &&
 			    strncasecmp(str, debugstr, len) == 0) {
 				if (op == '-')
 					newmask &= ~(1 << i);
@@ -175,7 +175,7 @@
 {
 	char *end;
 
-	if (next->ls_str == NULL)
+	if (!next->ls_str)
 		return 0;
 
 	/* skip leading white spaces */
@@ -196,7 +196,7 @@
 
 	res->ls_str = next->ls_str;
 	end = memchr(next->ls_str, delim, next->ls_len);
-	if (end == NULL) {
+	if (!end) {
 		/* there is no the delimeter in the string */
 		end = next->ls_str + next->ls_len;
 		next->ls_str = NULL;
@@ -229,18 +229,13 @@
 cfs_str2num_check(char *str, int nob, unsigned *num,
 		  unsigned min, unsigned max)
 {
-	char	*endp;
+	int rc;
 
 	str = cfs_trimwhite(str);
-	*num = simple_strtoul(str, &endp, 0);
-	if (endp == str)
+	rc = kstrtouint(str, 10, num);
+	if (rc)
 		return 0;
 
-	for (; endp < str + nob; endp++) {
-		if (!isspace(*endp))
-			return 0;
-	}
-
 	return (*num >= min && *num <= max);
 }
 EXPORT_SYMBOL(cfs_str2num_check);
@@ -266,7 +261,7 @@
 	struct cfs_lstr		tok;
 
 	LIBCFS_ALLOC(re, sizeof(*re));
-	if (re == NULL)
+	if (!re)
 		return -ENOMEM;
 
 	if (src->ls_len == 1 && src->ls_str[0] == '*') {
@@ -337,18 +332,19 @@
 	char s[] = "[";
 	char e[] = "]";
 
-	if (bracketed)
-		s[0] = e[0] = '\0';
+	if (bracketed) {
+		s[0] = '\0';
+		e[0] = '\0';
+	}
 
 	if (expr->re_lo == expr->re_hi)
 		i = scnprintf(buffer, count, "%u", expr->re_lo);
 	else if (expr->re_stride == 1)
 		i = scnprintf(buffer, count, "%s%u-%u%s",
-				s, expr->re_lo, expr->re_hi, e);
+			      s, expr->re_lo, expr->re_hi, e);
 	else
 		i = scnprintf(buffer, count, "%s%u-%u/%u%s",
-				s, expr->re_lo, expr->re_hi,
-				expr->re_stride, e);
+			      s, expr->re_lo, expr->re_hi, expr->re_stride, e);
 	return i;
 }
 
@@ -442,7 +438,7 @@
 	}
 
 	LIBCFS_ALLOC(val, sizeof(val[0]) * count);
-	if (val == NULL)
+	if (!val)
 		return -ENOMEM;
 
 	count = 0;
@@ -470,7 +466,7 @@
 		struct cfs_range_expr *expr;
 
 		expr = list_entry(expr_list->el_exprs.next,
-				      struct cfs_range_expr, re_link);
+				  struct cfs_range_expr, re_link);
 		list_del(&expr->re_link);
 		LIBCFS_FREE(expr, sizeof(*expr));
 	}
@@ -495,7 +491,7 @@
 	int			rc;
 
 	LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
-	if (expr_list == NULL)
+	if (!expr_list)
 		return -ENOMEM;
 
 	src.ls_str = str;
@@ -509,7 +505,7 @@
 		src.ls_len -= 2;
 
 		rc = -EINVAL;
-		while (src.ls_str != NULL) {
+		while (src.ls_str) {
 			struct cfs_lstr tok;
 
 			if (!cfs_gettok(&src, ',', &tok)) {
@@ -521,15 +517,12 @@
 			if (rc != 0)
 				break;
 
-			list_add_tail(&expr->re_link,
-					  &expr_list->el_exprs);
+			list_add_tail(&expr->re_link, &expr_list->el_exprs);
 		}
 	} else {
 		rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
-		if (rc == 0) {
-			list_add_tail(&expr->re_link,
-					  &expr_list->el_exprs);
-		}
+		if (rc == 0)
+			list_add_tail(&expr->re_link, &expr_list->el_exprs);
 	}
 
 	if (rc != 0)
@@ -555,8 +548,7 @@
 	struct cfs_expr_list *el;
 
 	while (!list_empty(list)) {
-		el = list_entry(list->next,
-				    struct cfs_expr_list, el_link);
+		el = list_entry(list->next, struct cfs_expr_list, el_link);
 		list_del(&el->el_link);
 		cfs_expr_list_free(el);
 	}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index e52afe3..389fb9e 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
@@ -84,32 +79,32 @@
 {
 	int	i;
 
-	if (cptab->ctb_cpu2cpt != NULL) {
+	if (cptab->ctb_cpu2cpt) {
 		LIBCFS_FREE(cptab->ctb_cpu2cpt,
 			    num_possible_cpus() *
 			    sizeof(cptab->ctb_cpu2cpt[0]));
 	}
 
-	for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
+	for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
 		struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
 
-		if (part->cpt_nodemask != NULL) {
+		if (part->cpt_nodemask) {
 			LIBCFS_FREE(part->cpt_nodemask,
 				    sizeof(*part->cpt_nodemask));
 		}
 
-		if (part->cpt_cpumask != NULL)
+		if (part->cpt_cpumask)
 			LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
 	}
 
-	if (cptab->ctb_parts != NULL) {
+	if (cptab->ctb_parts) {
 		LIBCFS_FREE(cptab->ctb_parts,
 			    cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
 	}
 
-	if (cptab->ctb_nodemask != NULL)
+	if (cptab->ctb_nodemask)
 		LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
-	if (cptab->ctb_cpumask != NULL)
+	if (cptab->ctb_cpumask)
 		LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
 
 	LIBCFS_FREE(cptab, sizeof(*cptab));
@@ -123,7 +118,7 @@
 	int	i;
 
 	LIBCFS_ALLOC(cptab, sizeof(*cptab));
-	if (cptab == NULL)
+	if (!cptab)
 		return NULL;
 
 	cptab->ctb_nparts = ncpt;
@@ -131,19 +126,19 @@
 	LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
 	LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
 
-	if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
+	if (!cptab->ctb_cpumask || !cptab->ctb_nodemask)
 		goto failed;
 
 	LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
 		     num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
-	if (cptab->ctb_cpu2cpt == NULL)
+	if (!cptab->ctb_cpu2cpt)
 		goto failed;
 
 	memset(cptab->ctb_cpu2cpt, -1,
 	       num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
 
 	LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
-	if (cptab->ctb_parts == NULL)
+	if (!cptab->ctb_parts)
 		goto failed;
 
 	for (i = 0; i < ncpt; i++) {
@@ -151,7 +146,7 @@
 
 		LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
 		LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
-		if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
+		if (!part->cpt_cpumask || !part->cpt_nodemask)
 			goto failed;
 	}
 
@@ -359,8 +354,6 @@
 
 	if (i >= nr_cpu_ids)
 		node_clear(node, *cptab->ctb_nodemask);
-
-	return;
 }
 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
 
@@ -530,7 +523,8 @@
 			return cpt;
 
 		/* don't return negative value for safety of upper layer,
-		 * instead we shadow the unknown cpu to a valid partition ID */
+		 * instead we shadow the unknown cpu to a valid partition ID
+		 */
 		cpt = cpu % cptab->ctb_nparts;
 	}
 
@@ -618,7 +612,7 @@
 	/* allocate scratch buffer */
 	LIBCFS_ALLOC(socket, cpumask_size());
 	LIBCFS_ALLOC(core, cpumask_size());
-	if (socket == NULL || core == NULL) {
+	if (!socket || !core) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -659,9 +653,9 @@
 	}
 
  out:
-	if (socket != NULL)
+	if (socket)
 		LIBCFS_FREE(socket, cpumask_size());
-	if (core != NULL)
+	if (core)
 		LIBCFS_FREE(core, cpumask_size());
 	return rc;
 }
@@ -682,7 +676,8 @@
 
 	/* generate reasonable number of CPU partitions based on total number
 	 * of CPUs, Preferred N should be power2 and match this condition:
-	 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
+	 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2
+	 */
 	for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1)
 		;
 
@@ -700,7 +695,8 @@
  out:
 #if (BITS_PER_LONG == 32)
 	/* config many CPU partitions on 32-bit system could consume
-	 * too much memory */
+	 * too much memory
+	 */
 	ncpt = min(2U, ncpt);
 #endif
 	while (ncpu % ncpt != 0)
@@ -735,7 +731,7 @@
 	}
 
 	cptab = cfs_cpt_table_alloc(ncpt);
-	if (cptab == NULL) {
+	if (!cptab) {
 		CERROR("Failed to allocate CPU map(%d)\n", ncpt);
 		goto failed;
 	}
@@ -747,7 +743,7 @@
 	}
 
 	LIBCFS_ALLOC(mask, cpumask_size());
-	if (mask == NULL) {
+	if (!mask) {
 		CERROR("Failed to allocate scratch cpumask\n");
 		goto failed;
 	}
@@ -793,10 +789,10 @@
 	CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
 	       ncpt, num_online_nodes(), num_online_cpus());
 
-	if (mask != NULL)
+	if (mask)
 		LIBCFS_FREE(mask, cpumask_size());
 
-	if (cptab != NULL)
+	if (cptab)
 		cfs_cpt_table_free(cptab);
 
 	return NULL;
@@ -814,7 +810,7 @@
 
 	for (ncpt = 0;; ncpt++) { /* quick scan bracket */
 		str = strchr(str, '[');
-		if (str == NULL)
+		if (!str)
 			break;
 		str++;
 	}
@@ -836,7 +832,7 @@
 	high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
 
 	cptab = cfs_cpt_table_alloc(ncpt);
-	if (cptab == NULL) {
+	if (!cptab) {
 		CERROR("Failed to allocate cpu partition table\n");
 		return NULL;
 	}
@@ -850,11 +846,12 @@
 		int			i;
 		int			n;
 
-		if (bracket == NULL) {
+		if (!bracket) {
 			if (*str != 0) {
 				CERROR("Invalid pattern %s\n", str);
 				goto failed;
-			} else if (c != ncpt) {
+			}
+			if (c != ncpt) {
 				CERROR("expect %d partitions but found %d\n",
 				       ncpt, c);
 				goto failed;
@@ -885,7 +882,7 @@
 		}
 
 		bracket = strchr(str, ']');
-		if (bracket == NULL) {
+		if (!bracket) {
 			CERROR("missing right bracket for cpt %d, %s\n",
 			       cpt, str);
 			goto failed;
@@ -943,6 +940,7 @@
 		spin_lock(&cpt_data.cpt_lock);
 		cpt_data.cpt_version++;
 		spin_unlock(&cpt_data.cpt_lock);
+		/* Fall through */
 	default:
 		if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
 			CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
@@ -975,25 +973,25 @@
 void
 cfs_cpu_fini(void)
 {
-	if (cfs_cpt_table != NULL)
+	if (cfs_cpt_table)
 		cfs_cpt_table_free(cfs_cpt_table);
 
 #ifdef CONFIG_HOTPLUG_CPU
 	unregister_hotcpu_notifier(&cfs_cpu_notifier);
 #endif
-	if (cpt_data.cpt_cpumask != NULL)
+	if (cpt_data.cpt_cpumask)
 		LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
 }
 
 int
 cfs_cpu_init(void)
 {
-	LASSERT(cfs_cpt_table == NULL);
+	LASSERT(!cfs_cpt_table);
 
 	memset(&cpt_data, 0, sizeof(cpt_data));
 
 	LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
-	if (cpt_data.cpt_cpumask == NULL) {
+	if (!cpt_data.cpt_cpumask) {
 		CERROR("Failed to allocate scratch buffer\n");
 		return -1;
 	}
@@ -1007,7 +1005,7 @@
 
 	if (*cpu_pattern != 0) {
 		cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
-		if (cfs_cpt_table == NULL) {
+		if (!cfs_cpt_table) {
 			CERROR("Failed to create cptab from pattern %s\n",
 			       cpu_pattern);
 			goto failed;
@@ -1015,7 +1013,7 @@
 
 	} else {
 		cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
-		if (cfs_cpt_table == NULL) {
+		if (!cfs_cpt_table) {
 			CERROR("Failed to create ptable with npartitions %d\n",
 			       cpu_npartitions);
 			goto failed;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
index 079d50e..ec75801 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
@@ -45,14 +45,14 @@
 
 	*type = cfs_crypto_hash_type(alg_id);
 
-	if (*type == NULL) {
+	if (!*type) {
 		CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
 		      alg_id, CFS_HASH_ALG_MAX);
 		return -EINVAL;
 	}
 	desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0);
 
-	if (desc->tfm == NULL)
+	if (!desc->tfm)
 		return -EINVAL;
 
 	if (IS_ERR(desc->tfm)) {
@@ -69,7 +69,7 @@
 	 * Skip this function for digest, because we use shash logic at
 	 * cfs_crypto_hash_alloc.
 	 */
-	if (key != NULL)
+	if (key)
 		err = crypto_hash_setkey(desc->tfm, key, key_len);
 	else if ((*type)->cht_key != 0)
 		err = crypto_hash_setkey(desc->tfm,
@@ -99,14 +99,14 @@
 	int			err;
 	const struct cfs_crypto_hash_type	*type;
 
-	if (buf == NULL || buf_len == 0 || hash_len == NULL)
+	if (!buf || buf_len == 0 || !hash_len)
 		return -EINVAL;
 
 	err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len);
 	if (err != 0)
 		return err;
 
-	if (hash == NULL || *hash_len < type->cht_size) {
+	if (!hash || *hash_len < type->cht_size) {
 		*hash_len = type->cht_size;
 		crypto_free_hash(hdesc.tfm);
 		return -ENOSPC;
@@ -125,13 +125,12 @@
 	cfs_crypto_hash_init(unsigned char alg_id,
 			     unsigned char *key, unsigned int key_len)
 {
-
 	struct  hash_desc       *hdesc;
 	int		     err;
 	const struct cfs_crypto_hash_type       *type;
 
 	hdesc = kmalloc(sizeof(*hdesc), 0);
-	if (hdesc == NULL)
+	if (!hdesc)
 		return ERR_PTR(-ENOMEM);
 
 	err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
@@ -175,16 +174,16 @@
 	int     err;
 	int     size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
 
-	if (hash_len == NULL) {
+	if (!hash_len) {
 		crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
 		kfree(hdesc);
 		return 0;
 	}
-	if (hash == NULL || *hash_len < size) {
+	if (!hash || *hash_len < size) {
 		*hash_len = size;
 		return -ENOSPC;
 	}
-	err = crypto_hash_final((struct hash_desc *) hdesc, hash);
+	err = crypto_hash_final((struct hash_desc *)hdesc, hash);
 
 	if (err < 0) {
 		/* May be caller can fix error */
@@ -212,7 +211,6 @@
 					     hash, &hash_len);
 		if (err)
 			break;
-
 	}
 	end = jiffies;
 
@@ -235,8 +233,7 @@
 {
 	if (hash_alg < CFS_HASH_ALG_MAX)
 		return cfs_crypto_hash_speeds[hash_alg];
-	else
-		return -1;
+	return -1;
 }
 EXPORT_SYMBOL(cfs_crypto_hash_speed);
 
@@ -249,11 +246,12 @@
 	unsigned char	   *data;
 	unsigned int	    j;
 	/* Data block size for testing hash. Maximum
-	 * kmalloc size for 2.6.18 kernel is 128K */
+	 * kmalloc size for 2.6.18 kernel is 128K
+	 */
 	unsigned int	    data_len = 1 * 128 * 1024;
 
 	data = kmalloc(data_len, 0);
-	if (data == NULL) {
+	if (!data) {
 		CERROR("Failed to allocate mem\n");
 		return -ENOMEM;
 	}
@@ -285,6 +283,4 @@
 {
 	if (adler32 == 0)
 		cfs_crypto_adler32_unregister();
-
-	return;
 }
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index 68515d9..13d31e8 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -65,6 +65,7 @@
 		commit_creds(cred);
 	}
 }
+EXPORT_SYMBOL(cfs_cap_raise);
 
 void cfs_cap_lower(cfs_cap_t cap)
 {
@@ -76,11 +77,13 @@
 		commit_creds(cred);
 	}
 }
+EXPORT_SYMBOL(cfs_cap_lower);
 
 int cfs_cap_raised(cfs_cap_t cap)
 {
 	return cap_raised(current_cap(), cap);
 }
+EXPORT_SYMBOL(cfs_cap_raised);
 
 static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
 {
@@ -95,10 +98,6 @@
 	cfs_kernel_cap_pack(current_cap(), &cap);
 	return cap;
 }
-
-EXPORT_SYMBOL(cfs_cap_raise);
-EXPORT_SYMBOL(cfs_cap_lower);
-EXPORT_SYMBOL(cfs_cap_raised);
 EXPORT_SYMBOL(cfs_curproc_cap_pack);
 
 /*
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
index 59c7bf3..638e4b3 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
@@ -80,14 +80,14 @@
 
 	argv[0] = lnet_debug_log_upcall;
 
-	LASSERTF(file != NULL, "called on a null filename\n");
+	LASSERTF(file, "called on a null filename\n");
 	argv[1] = file; /* only need to pass the path of the file */
 
 	argv[2] = NULL;
 
 	rc = call_usermodehelper(argv[0], argv, envp, 1);
 	if (rc < 0 && rc != -ENOENT) {
-		CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n",
+		CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
 		       rc, argv[0], argv[1]);
 	} else {
 		CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
@@ -106,14 +106,14 @@
 
 	argv[0] = lnet_upcall;
 	argc = 1;
-	while (argv[argc] != NULL)
+	while (argv[argc])
 		argc++;
 
 	LASSERT(argc >= 2);
 
 	rc = call_usermodehelper(argv[0], argv, envp, 1);
 	if (rc < 0 && rc != -ENOENT) {
-		CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n",
+		CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n",
 		       rc, argv[0], argv[1],
 		       argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
 		       argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
@@ -142,8 +142,9 @@
 	argv[4] = buf;
 	argv[5] = NULL;
 
-	libcfs_run_upcall (argv);
+	libcfs_run_upcall(argv);
 }
+EXPORT_SYMBOL(libcfs_run_lbug_upcall);
 
 /* coverity[+kill] */
 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
@@ -166,9 +167,10 @@
 	while (1)
 		schedule();
 }
+EXPORT_SYMBOL(lbug_with_loc);
 
 static int panic_notifier(struct notifier_block *self, unsigned long unused1,
-			 void *unused2)
+			  void *unused2)
 {
 	if (libcfs_panic_in_progress)
 		return 0;
@@ -187,13 +189,12 @@
 
 void libcfs_register_panic_notifier(void)
 {
-	atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &libcfs_panic_notifier);
 }
 
 void libcfs_unregister_panic_notifier(void)
 {
-	atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &libcfs_panic_notifier);
 }
-
-EXPORT_SYMBOL(libcfs_run_lbug_upcall);
-EXPORT_SYMBOL(lbug_with_loc);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c
index 025e2f0..86f32ff 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c
@@ -50,7 +50,7 @@
 	ret = kzalloc_node(size, flags | __GFP_NOWARN,
 			   cfs_cpt_spread_node(cptab, cpt));
 	if (!ret) {
-		WARN_ON(!(flags & (__GFP_FS|__GFP_HIGH)));
+		WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH)));
 		ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));
 	}
 
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
index 70a99cf0..ebc60ac 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -40,41 +40,10 @@
 
 #define LNET_MINOR 240
 
-int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
 {
-	struct libcfs_ioctl_hdr   *hdr;
-	struct libcfs_ioctl_data  *data;
-	int orig_len;
-
-	hdr = (struct libcfs_ioctl_hdr *)buf;
-	data = (struct libcfs_ioctl_data *)buf;
-
-	if (copy_from_user(buf, arg, sizeof(*hdr)))
-		return -EFAULT;
-
-	if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
-		CERROR("PORTALS: version mismatch kernel vs application\n");
-		return -EINVAL;
-	}
-
-	if (hdr->ioc_len >= end - buf) {
-		CERROR("PORTALS: user buffer exceeds kernel buffer\n");
-		return -EINVAL;
-	}
-
-	if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
-		CERROR("PORTALS: user buffer too small for ioctl\n");
-		return -EINVAL;
-	}
-
-	orig_len = hdr->ioc_len;
-	if (copy_from_user(buf, arg, hdr->ioc_len))
-		return -EFAULT;
-	if (orig_len != data->ioc_len)
-		return -EINVAL;
-
 	if (libcfs_ioctl_is_invalid(data)) {
-		CERROR("PORTALS: ioctl not correctly formatted\n");
+		CERROR("LNET: ioctl not correctly formatted\n");
 		return -EINVAL;
 	}
 
@@ -88,9 +57,29 @@
 	return 0;
 }
 
-int libcfs_ioctl_popdata(void *arg, void *data, int size)
+int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
+			     __u32 *len)
 {
-	if (copy_to_user((char *)arg, data, size))
+	struct libcfs_ioctl_hdr hdr;
+
+	if (copy_from_user(&hdr, arg, sizeof(hdr)))
+		return -EFAULT;
+
+	if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
+	    hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
+		CERROR("LNET: version mismatch expected %#x, got %#x\n",
+		       LIBCFS_IOCTL_VERSION, hdr.ioc_version);
+		return -EINVAL;
+	}
+
+	*len = hdr.ioc_len;
+
+	return 0;
+}
+
+int libcfs_ioctl_popdata(void __user *arg, void *data, int size)
+{
+	if (copy_to_user(arg, data, size))
 		return -EFAULT;
 	return 0;
 }
@@ -98,14 +87,12 @@
 static int
 libcfs_psdev_open(struct inode *inode, struct file *file)
 {
-	struct libcfs_device_userstate **pdu = NULL;
 	int    rc = 0;
 
 	if (!inode)
 		return -EINVAL;
-	pdu = (struct libcfs_device_userstate **)&file->private_data;
-	if (libcfs_psdev_ops.p_open != NULL)
-		rc = libcfs_psdev_ops.p_open(0, (void *)pdu);
+	if (libcfs_psdev_ops.p_open)
+		rc = libcfs_psdev_ops.p_open(0, NULL);
 	else
 		return -EPERM;
 	return rc;
@@ -115,14 +102,12 @@
 static int
 libcfs_psdev_release(struct inode *inode, struct file *file)
 {
-	struct libcfs_device_userstate *pdu;
 	int    rc = 0;
 
 	if (!inode)
 		return -EINVAL;
-	pdu = file->private_data;
-	if (libcfs_psdev_ops.p_close != NULL)
-		rc = libcfs_psdev_ops.p_close(0, (void *)pdu);
+	if (libcfs_psdev_ops.p_close)
+		rc = libcfs_psdev_ops.p_close(0, NULL);
 	else
 		rc = -EPERM;
 	return rc;
@@ -138,8 +123,8 @@
 		return -EACCES;
 
 	if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
-	     _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR  ||
-	     _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
+	    _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR  ||
+	    _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
 		CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
 		       _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
 		return -EINVAL;
@@ -152,16 +137,10 @@
 			return -EPERM;
 		panic("debugctl-invoked panic");
 		return 0;
-	case IOC_LIBCFS_MEMHOG:
-		if (!capable(CFS_CAP_SYS_ADMIN))
-			return -EPERM;
-		/* go thought */
 	}
 
-	pfile.off = 0;
-	pfile.private_data = file->private_data;
-	if (libcfs_psdev_ops.p_ioctl != NULL)
-		rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg);
+	if (libcfs_psdev_ops.p_ioctl)
+		rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg);
 	else
 		rc = -EPERM;
 	return rc;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
index 64a136c..91c2ae8 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
@@ -63,9 +63,8 @@
 		cfs_trace_data[i] =
 			kmalloc(sizeof(union cfs_trace_data_union) *
 				num_possible_cpus(), GFP_KERNEL);
-		if (cfs_trace_data[i] == NULL)
+		if (!cfs_trace_data[i])
 			goto out;
-
 	}
 
 	/* arch related info initialized */
@@ -82,7 +81,7 @@
 				kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
 					GFP_KERNEL);
 
-			if (cfs_trace_console_buffers[i][j] == NULL)
+			if (!cfs_trace_console_buffers[i][j])
 				goto out;
 		}
 
@@ -105,7 +104,7 @@
 			cfs_trace_console_buffers[i][j] = NULL;
 		}
 
-	for (i = 0; cfs_trace_data[i] != NULL; i++) {
+	for (i = 0; cfs_trace_data[i]; i++) {
 		kfree(cfs_trace_data[i]);
 		cfs_trace_data[i] = NULL;
 	}
@@ -131,14 +130,13 @@
 	up_write(&cfs_tracefile_sem);
 }
 
-cfs_trace_buf_type_t cfs_trace_buf_idx_get(void)
+enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
 {
 	if (in_irq())
 		return CFS_TCD_TYPE_IRQ;
-	else if (in_softirq())
+	if (in_softirq())
 		return CFS_TCD_TYPE_SOFTIRQ;
-	else
-		return CFS_TCD_TYPE_PROC;
+	return CFS_TCD_TYPE_PROC;
 }
 
 /*
@@ -176,16 +174,6 @@
 		spin_unlock(&tcd->tcd_lock);
 }
 
-int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
-		      struct cfs_trace_page *tage)
-{
-	/*
-	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
-	 * from here: this will lead to infinite recursion.
-	 */
-	return tcd->tcd_cpu == tage->cpu;
-}
-
 void
 cfs_set_ptldebug_header(struct ptldebug_header *header,
 			struct libcfs_debug_msg_data *msgdata,
@@ -200,14 +188,14 @@
 	header->ph_cpu_id = smp_processor_id();
 	header->ph_type = cfs_trace_buf_idx_get();
 	/* y2038 safe since all user space treats this as unsigned, but
-	 * will overflow in 2106 */
+	 * will overflow in 2106
+	 */
 	header->ph_sec = (u32)ts.tv_sec;
 	header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
 	header->ph_stack = stack;
 	header->ph_pid = current->pid;
 	header->ph_line_num = msgdata->msg_line;
 	header->ph_extern_pid = 0;
-	return;
 }
 
 static char *
@@ -261,12 +249,11 @@
 		       hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
 		       fn, len, buf);
 	}
-	return;
 }
 
 int cfs_trace_max_debug_mb(void)
 {
 	int  total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
 
-	return max(512, (total_mb * 80)/100);
+	return max(512, (total_mb * 80) / 100);
 }
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 329d78c..05e2c56 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -54,11 +54,15 @@
 
 # define DEBUG_SUBSYSTEM S_LNET
 
+#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \
+				sizeof(struct lnet_ioctl_config_data))
+
 #include "../../include/linux/libcfs/libcfs.h"
 #include <asm/div64.h>
 
 #include "../../include/linux/libcfs/libcfs_crypto.h"
 #include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
 #include "../../include/linux/lnet/lnet.h"
 #include "tracefile.h"
 
@@ -68,142 +72,16 @@
 
 static struct dentry *lnet_debugfs_root;
 
-static void kportal_memhog_free(struct libcfs_device_userstate *ldu)
-{
-	struct page **level0p = &ldu->ldu_memhog_root_page;
-	struct page **level1p;
-	struct page **level2p;
-	int	   count1;
-	int	   count2;
-
-	if (*level0p != NULL) {
-
-		level1p = (struct page **)page_address(*level0p);
-		count1 = 0;
-
-		while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
-		       *level1p != NULL) {
-
-			level2p = (struct page **)page_address(*level1p);
-			count2 = 0;
-
-			while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
-			       *level2p != NULL) {
-
-				__free_page(*level2p);
-				ldu->ldu_memhog_pages--;
-				level2p++;
-				count2++;
-			}
-
-			__free_page(*level1p);
-			ldu->ldu_memhog_pages--;
-			level1p++;
-			count1++;
-		}
-
-		__free_page(*level0p);
-		ldu->ldu_memhog_pages--;
-
-		*level0p = NULL;
-	}
-
-	LASSERT(ldu->ldu_memhog_pages == 0);
-}
-
-static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
-		     gfp_t flags)
-{
-	struct page **level0p;
-	struct page **level1p;
-	struct page **level2p;
-	int	   count1;
-	int	   count2;
-
-	LASSERT(ldu->ldu_memhog_pages == 0);
-	LASSERT(ldu->ldu_memhog_root_page == NULL);
-
-	if (npages < 0)
-		return -EINVAL;
-
-	if (npages == 0)
-		return 0;
-
-	level0p = &ldu->ldu_memhog_root_page;
-	*level0p = alloc_page(flags);
-	if (*level0p == NULL)
-		return -ENOMEM;
-	ldu->ldu_memhog_pages++;
-
-	level1p = (struct page **)page_address(*level0p);
-	count1 = 0;
-	memset(level1p, 0, PAGE_CACHE_SIZE);
-
-	while (ldu->ldu_memhog_pages < npages &&
-	       count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
-
-		if (cfs_signal_pending())
-			return -EINTR;
-
-		*level1p = alloc_page(flags);
-		if (*level1p == NULL)
-			return -ENOMEM;
-		ldu->ldu_memhog_pages++;
-
-		level2p = (struct page **)page_address(*level1p);
-		count2 = 0;
-		memset(level2p, 0, PAGE_CACHE_SIZE);
-
-		while (ldu->ldu_memhog_pages < npages &&
-		       count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
-
-			if (cfs_signal_pending())
-				return -EINTR;
-
-			*level2p = alloc_page(flags);
-			if (*level2p == NULL)
-				return -ENOMEM;
-			ldu->ldu_memhog_pages++;
-
-			level2p++;
-			count2++;
-		}
-
-		level1p++;
-		count1++;
-	}
-
-	return 0;
-}
-
 /* called when opening /dev/device */
 static int libcfs_psdev_open(unsigned long flags, void *args)
 {
-	struct libcfs_device_userstate *ldu;
-
 	try_module_get(THIS_MODULE);
-
-	LIBCFS_ALLOC(ldu, sizeof(*ldu));
-	if (ldu != NULL) {
-		ldu->ldu_memhog_pages = 0;
-		ldu->ldu_memhog_root_page = NULL;
-	}
-	*(struct libcfs_device_userstate **)args = ldu;
-
 	return 0;
 }
 
 /* called when closing /dev/device */
 static int libcfs_psdev_release(unsigned long flags, void *args)
 {
-	struct libcfs_device_userstate *ldu;
-
-	ldu = (struct libcfs_device_userstate *)args;
-	if (ldu != NULL) {
-		kportal_memhog_free(ldu);
-		LIBCFS_FREE(ldu, sizeof(*ldu));
-	}
-
 	module_put(THIS_MODULE);
 	return 0;
 }
@@ -241,11 +119,25 @@
 }
 EXPORT_SYMBOL(libcfs_deregister_ioctl);
 
-static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
-			    void *arg, struct libcfs_ioctl_data *data)
+static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
+			       void *arg, struct libcfs_ioctl_hdr *hdr)
 {
+	struct libcfs_ioctl_data *data = NULL;
 	int err = -EINVAL;
 
+	/*
+	 * The libcfs_ioctl_data_adjust() function performs adjustment
+	 * operations on the libcfs_ioctl_data structure to make
+	 * it usable by the code.  This doesn't need to be called
+	 * for new data structures added.
+	 */
+	if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
+		data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
+		err = libcfs_ioctl_data_adjust(data);
+		if (err)
+			return err;
+	}
+
 	switch (cmd) {
 	case IOC_LIBCFS_CLEAR_DEBUG:
 		libcfs_debug_clear_buffer();
@@ -255,24 +147,11 @@
 	 * Handled in arch/cfs_module.c
 	 */
 	case IOC_LIBCFS_MARK_DEBUG:
-		if (data->ioc_inlbuf1 == NULL ||
+		if (!data->ioc_inlbuf1 ||
 		    data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
 			return -EINVAL;
 		libcfs_debug_mark_buffer(data->ioc_inlbuf1);
 		return 0;
-	case IOC_LIBCFS_MEMHOG:
-		if (pfile->private_data == NULL) {
-			err = -EINVAL;
-		} else {
-			kportal_memhog_free(pfile->private_data);
-			/* XXX The ioc_flags is not GFP flags now, need to be fixed */
-			err = kportal_memhog_alloc(pfile->private_data,
-						   data->ioc_count,
-						   data->ioc_flags);
-			if (err != 0)
-				kportal_memhog_free(pfile->private_data);
-		}
-		break;
 
 	default: {
 		struct libcfs_ioctl_handler *hand;
@@ -280,11 +159,11 @@
 		err = -EINVAL;
 		down_read(&ioctl_list_sem);
 		list_for_each_entry(hand, &ioctl_list, item) {
-			err = hand->handle_ioctl(cmd, data);
+			err = hand->handle_ioctl(cmd, hdr);
 			if (err != -EINVAL) {
 				if (err == 0)
 					err = libcfs_ioctl_popdata(arg,
-							data, sizeof(*data));
+							hdr, hdr->ioc_len);
 				break;
 			}
 		}
@@ -296,28 +175,41 @@
 	return err;
 }
 
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
+static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd,
+			void __user *arg)
 {
-	char    *buf;
-	struct libcfs_ioctl_data *data;
+	struct libcfs_ioctl_hdr *hdr;
 	int err = 0;
+	__u32 buf_len;
 
-	LIBCFS_ALLOC_GFP(buf, 1024, GFP_KERNEL);
-	if (buf == NULL)
+	err = libcfs_ioctl_getdata_len(arg, &buf_len);
+	if (err)
+		return err;
+
+	/*
+	 * do a check here to restrict the size of the memory
+	 * to allocate to guard against DoS attacks.
+	 */
+	if (buf_len > LNET_MAX_IOCTL_BUF_LEN) {
+		CERROR("LNET: user buffer exceeds kernel buffer\n");
+		return -EINVAL;
+	}
+
+	LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL);
+	if (!hdr)
 		return -ENOMEM;
 
 	/* 'cmd' and permissions get checked in our arch-specific caller */
-	if (libcfs_ioctl_getdata(buf, buf + 800, arg)) {
-		CERROR("PORTALS ioctl: data error\n");
-		err = -EINVAL;
+	if (copy_from_user(hdr, arg, buf_len)) {
+		CERROR("LNET ioctl: data error\n");
+		err = -EFAULT;
 		goto out;
 	}
-	data = (struct libcfs_ioctl_data *)buf;
 
-	err = libcfs_ioctl_int(pfile, cmd, arg, data);
+	err = libcfs_ioctl_handle(pfile, cmd, arg, hdr);
 
 out:
-	LIBCFS_FREE(buf, 1024);
+	LIBCFS_FREE(hdr, buf_len);
 	return err;
 }
 
@@ -330,9 +222,9 @@
 };
 
 static int proc_call_handler(void *data, int write, loff_t *ppos,
-		void __user *buffer, size_t *lenp,
-		int (*handler)(void *data, int write,
-		loff_t pos, void __user *buffer, int len))
+			     void __user *buffer, size_t *lenp,
+			     int (*handler)(void *data, int write, loff_t pos,
+					    void __user *buffer, int len))
 {
 	int rc = handler(data, write, *ppos, buffer, *lenp);
 
@@ -467,11 +359,11 @@
 	if (write)
 		return -EPERM;
 
-	LASSERT(cfs_cpt_table != NULL);
+	LASSERT(cfs_cpt_table);
 
 	while (1) {
 		LIBCFS_ALLOC(buf, len);
-		if (buf == NULL)
+		if (!buf)
 			return -ENOMEM;
 
 		rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
@@ -493,23 +385,19 @@
 
 	rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL);
  out:
-	if (buf != NULL)
+	if (buf)
 		LIBCFS_FREE(buf, len);
 	return rc;
 }
 
 static int proc_cpt_table(struct ctl_table *table, int write,
-			   void __user *buffer, size_t *lenp, loff_t *ppos)
+			  void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	return proc_call_handler(table->data, write, ppos, buffer, lenp,
 				 __proc_cpt_table);
 }
 
 static struct ctl_table lnet_table[] = {
-	/*
-	 * NB No .strategy entries have been provided since sysctl(8) prefers
-	 * to go via /proc for portability.
-	 */
 	{
 		.procname = "debug",
 		.data     = &libcfs_debug,
@@ -640,42 +528,63 @@
 	return error;
 }
 
-static const struct file_operations lnet_debugfs_file_operations = {
+static const struct file_operations lnet_debugfs_file_operations_rw = {
 	.open		= simple_open,
 	.read		= lnet_debugfs_read,
 	.write		= lnet_debugfs_write,
 	.llseek		= default_llseek,
 };
 
+static const struct file_operations lnet_debugfs_file_operations_ro = {
+	.open		= simple_open,
+	.read		= lnet_debugfs_read,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations lnet_debugfs_file_operations_wo = {
+	.open		= simple_open,
+	.write		= lnet_debugfs_write,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations *lnet_debugfs_fops_select(umode_t mode)
+{
+	if (!(mode & S_IWUGO))
+		return &lnet_debugfs_file_operations_ro;
+
+	if (!(mode & S_IRUGO))
+		return &lnet_debugfs_file_operations_wo;
+
+	return &lnet_debugfs_file_operations_rw;
+}
+
 void lustre_insert_debugfs(struct ctl_table *table,
 			   const struct lnet_debugfs_symlink_def *symlinks)
 {
-	struct dentry *entry;
-
-	if (lnet_debugfs_root == NULL)
+	if (!lnet_debugfs_root)
 		lnet_debugfs_root = debugfs_create_dir("lnet", NULL);
 
 	/* Even if we cannot create, just ignore it altogether) */
 	if (IS_ERR_OR_NULL(lnet_debugfs_root))
 		return;
 
+	/* We don't save the dentry returned in next two calls, because
+	 * we don't call debugfs_remove() but rather remove_recursive()
+	 */
 	for (; table->procname; table++)
-		entry = debugfs_create_file(table->procname, table->mode,
-					    lnet_debugfs_root, table,
-					    &lnet_debugfs_file_operations);
+		debugfs_create_file(table->procname, table->mode,
+				    lnet_debugfs_root, table,
+				    lnet_debugfs_fops_select(table->mode));
 
 	for (; symlinks && symlinks->name; symlinks++)
-		entry = debugfs_create_symlink(symlinks->name,
-					       lnet_debugfs_root,
-					       symlinks->target);
-
+		debugfs_create_symlink(symlinks->name, lnet_debugfs_root,
+				       symlinks->target);
 }
 EXPORT_SYMBOL_GPL(lustre_insert_debugfs);
 
 static void lustre_remove_debugfs(void)
 {
-	if (lnet_debugfs_root != NULL)
-		debugfs_remove_recursive(lnet_debugfs_root);
+	debugfs_remove_recursive(lnet_debugfs_root);
 
 	lnet_debugfs_root = NULL;
 }
diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lustre/libcfs/prng.c
index 4147664..c75ae9a 100644
--- a/drivers/staging/lustre/lustre/libcfs/prng.c
+++ b/drivers/staging/lustre/lustre/libcfs/prng.c
@@ -42,11 +42,11 @@
 #include "../../include/linux/libcfs/libcfs.h"
 
 /*
-From: George Marsaglia <geo@stat.fsu.edu>
-Newsgroups: sci.math
-Subject: Re: A RANDOM NUMBER GENERATOR FOR C
-Date: Tue, 30 Sep 1997 05:29:35 -0700
-
+ * From: George Marsaglia <geo@stat.fsu.edu>
+ * Newsgroups: sci.math
+ * Subject: Re: A RANDOM NUMBER GENERATOR FOR C
+ * Date: Tue, 30 Sep 1997 05:29:35 -0700
+ *
  * You may replace the two constants 36969 and 18000 by any
  * pair of distinct constants from this list:
  * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584
@@ -58,7 +58,8 @@
  * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013
  * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083
  * (or any other 16-bit constants k for which both k*2^16-1
- * and k*2^15-1 are prime) */
+ * and k*2^15-1 are prime)
+ */
 
 #define RANDOM_CONST_A 18030
 #define RANDOM_CONST_B 29013
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 65c4f1a..ec3bc04 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -56,6 +56,51 @@
 
 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
 
+struct page_collection {
+	struct list_head	pc_pages;
+	/*
+	 * if this flag is set, collect_pages() will spill both
+	 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
+	 * only ->tcd_pages are spilled.
+	 */
+	int		pc_want_daemon_pages;
+};
+
+struct tracefiled_ctl {
+	struct completion	tctl_start;
+	struct completion	tctl_stop;
+	wait_queue_head_t		tctl_waitq;
+	pid_t			tctl_pid;
+	atomic_t		tctl_shutdown;
+};
+
+/*
+ * small data-structure for each page owned by tracefiled.
+ */
+struct cfs_trace_page {
+	/*
+	 * page itself
+	 */
+	struct page	  *page;
+	/*
+	 * linkage into one of the lists in trace_data_union or
+	 * page_collection
+	 */
+	struct list_head	   linkage;
+	/*
+	 * number of bytes used within this page
+	 */
+	unsigned int	 used;
+	/*
+	 * cpu that owns this page
+	 */
+	unsigned short       cpu;
+	/*
+	 * type(context) of this page
+	 */
+	unsigned short       type;
+};
+
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
 					 struct cfs_trace_cpu_data *tcd);
 
@@ -80,11 +125,11 @@
 	 */
 	gfp |= __GFP_NOWARN;
 	page = alloc_page(gfp);
-	if (page == NULL)
+	if (!page)
 		return NULL;
 
 	tage = kmalloc(sizeof(*tage), gfp);
-	if (tage == NULL) {
+	if (!tage) {
 		__free_page(page);
 		return NULL;
 	}
@@ -96,9 +141,6 @@
 
 static void cfs_tage_free(struct cfs_trace_page *tage)
 {
-	__LASSERT(tage != NULL);
-	__LASSERT(tage->page != NULL);
-
 	__free_page(tage->page);
 	kfree(tage);
 	atomic_dec(&cfs_tage_allocated);
@@ -107,9 +149,6 @@
 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
 			     struct list_head *queue)
 {
-	__LASSERT(tage != NULL);
-	__LASSERT(queue != NULL);
-
 	list_move_tail(&tage->linkage, queue);
 }
 
@@ -127,7 +166,7 @@
 		struct cfs_trace_page *tage;
 
 		tage = cfs_tage_alloc(gfp);
-		if (tage == NULL)
+		if (!tage)
 			break;
 		list_add_tail(&tage->linkage, stock);
 	}
@@ -154,7 +193,7 @@
 			list_del_init(&tage->linkage);
 		} else {
 			tage = cfs_tage_alloc(GFP_ATOMIC);
-			if (unlikely(tage == NULL)) {
+			if (unlikely(!tage)) {
 				if ((!memory_pressure_get() ||
 				     in_interrupt()) && printk_ratelimit())
 					printk(KERN_WARNING
@@ -227,7 +266,7 @@
 	}
 
 	tage = cfs_trace_get_tage_try(tcd, len);
-	if (tage != NULL)
+	if (tage)
 		return tage;
 	if (thread_running)
 		cfs_tcd_shrink(tcd);
@@ -278,10 +317,11 @@
 
 	/* cfs_trace_get_tcd() grabs a lock, which disables preemption and
 	 * pins us to a particular CPU.  This avoids an smp_processor_id()
-	 * warning on Linux when debugging is enabled. */
+	 * warning on Linux when debugging is enabled.
+	 */
 	cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
 
-	if (tcd == NULL)		/* arch may not log in IRQ context */
+	if (!tcd)		/* arch may not log in IRQ context */
 		goto console;
 
 	if (tcd->tcd_cur_pages == 0)
@@ -301,14 +341,14 @@
 	if (libcfs_debug_binary)
 		known_size += sizeof(header);
 
-	/*/
+	/*
 	 * '2' used because vsnprintf return real size required for output
 	 * _without_ terminating NULL.
 	 * if needed is to small for this format.
 	 */
 	for (i = 0; i < 2; i++) {
 		tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
-		if (tage == NULL) {
+		if (!tage) {
 			if (needed + known_size > PAGE_CACHE_SIZE)
 				mask |= D_ERROR;
 
@@ -352,7 +392,7 @@
 			break;
 	}
 
-	if (*(string_buf+needed-1) != '\n')
+	if (*(string_buf + needed - 1) != '\n')
 		printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
 		       file, msgdata->msg_line, msgdata->msg_fn);
 
@@ -384,30 +424,30 @@
 	__LASSERT(debug_buf == string_buf);
 
 	tage->used += needed;
-	__LASSERT (tage->used <= PAGE_CACHE_SIZE);
+	__LASSERT(tage->used <= PAGE_CACHE_SIZE);
 
 console:
 	if ((mask & libcfs_printk) == 0) {
 		/* no console output requested */
-		if (tcd != NULL)
+		if (tcd)
 			cfs_trace_put_tcd(tcd);
 		return 1;
 	}
 
-	if (cdls != NULL) {
+	if (cdls) {
 		if (libcfs_console_ratelimit &&
 		    cdls->cdls_next != 0 &&     /* not first time ever */
 		    !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
 			/* skipping a console message */
 			cdls->cdls_count++;
-			if (tcd != NULL)
+			if (tcd)
 				cfs_trace_put_tcd(tcd);
 			return 1;
 		}
 
-		if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
-						       libcfs_console_max_delay
-						       + cfs_time_seconds(10))) {
+		if (cfs_time_after(cfs_time_current(),
+				   cdls->cdls_next + libcfs_console_max_delay +
+				   cfs_time_seconds(10))) {
 			/* last timeout was a long time ago */
 			cdls->cdls_delay /= libcfs_console_backoff * 4;
 		} else {
@@ -423,7 +463,7 @@
 		cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
 	}
 
-	if (tcd != NULL) {
+	if (tcd) {
 		cfs_print_to_console(&header, mask, string_buf, needed, file,
 				     msgdata->msg_fn);
 		cfs_trace_put_tcd(tcd);
@@ -431,18 +471,18 @@
 		string_buf = cfs_trace_get_console_buffer();
 
 		needed = 0;
-		if (format1 != NULL) {
+		if (format1) {
 			va_copy(ap, args);
 			needed = vsnprintf(string_buf,
 					   CFS_TRACE_CONSOLE_BUFFER_SIZE,
 					   format1, ap);
 			va_end(ap);
 		}
-		if (format2 != NULL) {
+		if (format2) {
 			remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
 			if (remain > 0) {
 				va_start(ap, format2);
-				needed += vsnprintf(string_buf+needed, remain,
+				needed += vsnprintf(string_buf + needed, remain,
 						    format2, ap);
 				va_end(ap);
 			}
@@ -453,7 +493,7 @@
 		put_cpu();
 	}
 
-	if (cdls != NULL && cdls->cdls_count != 0) {
+	if (cdls && cdls->cdls_count != 0) {
 		string_buf = cfs_trace_get_console_buffer();
 
 		needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -497,7 +537,8 @@
 {
 	/* Do the collect_pages job on a single CPU: assumes that all other
 	 * CPUs have been stopped during a panic.  If this isn't true for some
-	 * arch, this will have to be implemented separately in each arch.  */
+	 * arch, this will have to be implemented separately in each arch.
+	 */
 	int			i;
 	int			j;
 	struct cfs_trace_cpu_data *tcd;
@@ -509,8 +550,7 @@
 		tcd->tcd_cur_pages = 0;
 
 		if (pc->pc_want_daemon_pages) {
-			list_splice_init(&tcd->tcd_daemon_pages,
-					     &pc->pc_pages);
+			list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
 			tcd->tcd_cur_daemon_pages = 0;
 		}
 	}
@@ -527,7 +567,7 @@
 			tcd->tcd_cur_pages = 0;
 			if (pc->pc_want_daemon_pages) {
 				list_splice_init(&tcd->tcd_daemon_pages,
-						     &pc->pc_pages);
+						 &pc->pc_pages);
 				tcd->tcd_cur_daemon_pages = 0;
 			}
 		}
@@ -558,7 +598,6 @@
 
 			list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
 						 linkage) {
-
 				__LASSERT_TAGE_INVARIANT(tage);
 
 				if (tage->cpu != cpu || tage->type != i)
@@ -580,7 +619,8 @@
 /* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
  * we have a good amount of data at all times for dumping during an LBUG, even
  * if we have been steadily writing (and otherwise discarding) pages via the
- * debug daemon. */
+ * debug daemon.
+ */
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
 					 struct cfs_trace_cpu_data *tcd)
 {
@@ -588,7 +628,6 @@
 	struct cfs_trace_page *tmp;
 
 	list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
-
 		__LASSERT_TAGE_INVARIANT(tage);
 
 		if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
@@ -674,12 +713,13 @@
 
 	cfs_tracefile_write_lock();
 
-	filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
+	filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
+			 0600);
 	if (IS_ERR(filp)) {
 		rc = PTR_ERR(filp);
 		filp = NULL;
 		pr_err("LustreError: can't open %s for dump: rc %d\n",
-			filename, rc);
+		       filename, rc);
 		goto out;
 	}
 
@@ -691,10 +731,10 @@
 	}
 
 	/* ok, for now, just write the pages.  in the future we'll be building
-	 * iobufs with the pages and calling generic_direct_IO */
+	 * iobufs with the pages and calling generic_direct_IO
+	 */
 	MMSPACE_OPEN;
 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
-
 		__LASSERT_TAGE_INVARIANT(tage);
 
 		buf = kmap(tage->page);
@@ -732,7 +772,6 @@
 	pc.pc_want_daemon_pages = 1;
 	collect_pages(&pc);
 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
-
 		__LASSERT_TAGE_INVARIANT(tage);
 
 		list_del(&tage->linkage);
@@ -771,9 +810,10 @@
 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
 			     const char *knl_buffer, char *append)
 {
-	/* NB if 'append' != NULL, it's a single character to append to the
-	 * copied out string - usually "\n", for /proc entries and "" (i.e. a
-	 * terminating zero byte) for sysctl entries */
+	/*
+	 * NB if 'append' != NULL, it's a single character to append to the
+	 * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
+	 */
 	int   nob = strlen(knl_buffer);
 
 	if (nob > usr_buffer_nob)
@@ -782,7 +822,7 @@
 	if (copy_to_user(usr_buffer, knl_buffer, nob))
 		return -EFAULT;
 
-	if (append != NULL && nob < usr_buffer_nob) {
+	if (append && nob < usr_buffer_nob) {
 		if (copy_to_user(usr_buffer + nob, append, 1))
 			return -EFAULT;
 
@@ -799,7 +839,7 @@
 		return -EINVAL;
 
 	*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
-	if (*str == NULL)
+	if (!*str)
 		return -ENOMEM;
 
 	return 0;
@@ -842,12 +882,15 @@
 		memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
 
 	} else if (strncmp(str, "size=", 5) == 0) {
-		cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
-		if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
-			cfs_tracefile_size = CFS_TRACEFILE_SIZE;
-		else
-			cfs_tracefile_size <<= 20;
+		unsigned long tmp;
 
+		rc = kstrtoul(str + 5, 10, &tmp);
+		if (!rc) {
+			if (tmp < 10 || tmp > 20480)
+				cfs_tracefile_size = CFS_TRACEFILE_SIZE;
+			else
+				cfs_tracefile_size = tmp << 20;
+		}
 	} else if (strlen(str) >= sizeof(cfs_tracefile)) {
 		rc = -ENAMETOOLONG;
 	} else if (str[0] != '/') {
@@ -877,7 +920,7 @@
 		return rc;
 
 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
-				 usr_str, usr_str_nob);
+				     usr_str, usr_str_nob);
 	if (rc == 0)
 		rc = cfs_trace_daemon_command(str);
 
@@ -977,7 +1020,7 @@
 			}
 		}
 		cfs_tracefile_read_unlock();
-		if (filp == NULL) {
+		if (!filp) {
 			put_pages_on_daemon_list(&pc);
 			__LASSERT(list_empty(&pc.pc_pages));
 			goto end_loop;
@@ -985,8 +1028,7 @@
 
 		MMSPACE_OPEN;
 
-		list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
-						   linkage) {
+		list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
 			static loff_t f_pos;
 
 			__LASSERT_TAGE_INVARIANT(tage);
@@ -1017,8 +1059,7 @@
 			int i;
 
 			printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
-			pr_err("total cpus(%d): ",
-				num_possible_cpus());
+			pr_err("total cpus(%d): ", num_possible_cpus());
 			for (i = 0; i < num_possible_cpus(); i++)
 				if (cpu_online(i))
 					pr_cont("%d(on) ", i);
@@ -1028,9 +1069,9 @@
 
 			i = 0;
 			list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
-						     linkage)
+						 linkage)
 				pr_err("page %d belongs to cpu %d\n",
-					++i, tage->cpu);
+				       ++i, tage->cpu);
 			pr_err("There are %d pages unwritten\n", i);
 		}
 		__LASSERT(list_empty(&pc.pc_pages));
@@ -1056,6 +1097,7 @@
 int cfs_trace_start_thread(void)
 {
 	struct tracefiled_ctl *tctl = &trace_tctl;
+	struct task_struct *task;
 	int rc = 0;
 
 	mutex_lock(&cfs_trace_thread_mutex);
@@ -1067,8 +1109,9 @@
 	init_waitqueue_head(&tctl->tctl_waitq);
 	atomic_set(&tctl->tctl_shutdown, 0);
 
-	if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
-		rc = -ECHILD;
+	task = kthread_run(tracefiled, tctl, "ktracefiled");
+	if (IS_ERR(task)) {
+		rc = PTR_ERR(task);
 		goto out;
 	}
 
@@ -1135,7 +1178,7 @@
 			tcd->tcd_shutting_down = 1;
 
 			list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
-							   linkage) {
+						 linkage) {
 				__LASSERT_TAGE_INVARIANT(tage);
 
 				list_del(&tage->linkage);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h
index 7bf1471..4c77f90 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.h
@@ -39,12 +39,12 @@
 
 #include "../../include/linux/libcfs/libcfs.h"
 
-typedef enum {
+enum cfs_trace_buf_type {
 	CFS_TCD_TYPE_PROC = 0,
 	CFS_TCD_TYPE_SOFTIRQ,
 	CFS_TCD_TYPE_IRQ,
 	CFS_TCD_TYPE_MAX
-} cfs_trace_buf_type_t;
+};
 
 /* trace file lock routines */
 
@@ -101,8 +101,10 @@
 
 #define CFS_TRACEFILE_SIZE (500 << 20)
 
-/* Size of a buffer for sprinting console messages if we can't get a page
- * from system */
+/*
+ * Size of a buffer for sprinting console messages if we can't get a page
+ * from system
+ */
 #define CFS_TRACE_CONSOLE_BUFFER_SIZE   1024
 
 union cfs_trace_data_union {
@@ -185,66 +187,15 @@
 extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
 
 #define cfs_tcd_for_each(tcd, i, j)				       \
-    for (i = 0; cfs_trace_data[i] != NULL; i++)			   \
-	for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);	       \
-	     j < num_possible_cpus();				 \
-	     j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
+	for (i = 0; cfs_trace_data[i]; i++)				\
+		for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);	\
+		     j < num_possible_cpus();				 \
+		     j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
 
 #define cfs_tcd_for_each_type_lock(tcd, i, cpu)			   \
-    for (i = 0; cfs_trace_data[i] &&				      \
-	 (tcd = &(*cfs_trace_data[i])[cpu].tcd) &&			\
-	 cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
-
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct page_collection {
-	struct list_head	pc_pages;
-	/*
-	 * if this flag is set, collect_pages() will spill both
-	 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
-	 * only ->tcd_pages are spilled.
-	 */
-	int		pc_want_daemon_pages;
-};
-
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct tracefiled_ctl {
-	struct completion	tctl_start;
-	struct completion	tctl_stop;
-	wait_queue_head_t		tctl_waitq;
-	pid_t			tctl_pid;
-	atomic_t		tctl_shutdown;
-};
-
-/*
- * small data-structure for each page owned by tracefiled.
- */
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct cfs_trace_page {
-	/*
-	 * page itself
-	 */
-	struct page	  *page;
-	/*
-	 * linkage into one of the lists in trace_data_union or
-	 * page_collection
-	 */
-	struct list_head	   linkage;
-	/*
-	 * number of bytes used within this page
-	 */
-	unsigned int	 used;
-	/*
-	 * cpu that owns this page
-	 */
-	unsigned short       cpu;
-	/*
-	 * type(context) of this page
-	 */
-	unsigned short       type;
-};
+	for (i = 0; cfs_trace_data[i] &&				\
+	     (tcd = &(*cfs_trace_data[i])[cpu].tcd) &&			\
+	     cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
 
 void cfs_set_ptldebug_header(struct ptldebug_header *header,
 			     struct libcfs_debug_msg_data *m,
@@ -257,7 +208,7 @@
 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
 
 extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
+enum cfs_trace_buf_type cfs_trace_buf_idx_get(void);
 
 static inline char *
 cfs_trace_get_console_buffer(void)
@@ -279,8 +230,7 @@
 	return tcd;
 }
 
-static inline void
-cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
+static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
 {
 	cfs_trace_unlock_tcd(tcd, 0);
 
@@ -290,9 +240,6 @@
 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
 			   struct list_head *stock);
 
-int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
-		      struct cfs_trace_page *tage);
-
 void cfs_trace_assertion_failed(const char *str,
 				struct libcfs_debug_msg_data *m);
 
@@ -308,8 +255,8 @@
 
 #define __LASSERT_TAGE_INVARIANT(tage)				  \
 do {								    \
-	__LASSERT(tage != NULL);					\
-	__LASSERT(tage->page != NULL);				  \
+	__LASSERT(tage);					\
+	__LASSERT(tage->page);				  \
 	__LASSERT(tage->used <= PAGE_CACHE_SIZE);			 \
 	__LASSERT(page_count(tage->page) > 0);		      \
 } while (0)
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index 60bb88a..136bc13 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -46,18 +46,21 @@
 #define CFS_WS_NAME_LEN	 16
 
 struct cfs_wi_sched {
-	struct list_head		ws_list;	/* chain on global list */
+	/* chain on global list */
+	struct list_head		ws_list;
 	/** serialised workitems */
 	spinlock_t		ws_lock;
 	/** where schedulers sleep */
 	wait_queue_head_t		ws_waitq;
 	/** concurrent workitems */
 	struct list_head		ws_runq;
-	/** rescheduled running-workitems, a workitem can be rescheduled
+	/**
+	 * rescheduled running-workitems, a workitem can be rescheduled
 	 * while running in wi_action(), but we don't to execute it again
 	 * unless it returns from wi_action(), so we put it on ws_rerunq
 	 * while rescheduling, and move it to runq after it returns
-	 * from wi_action() */
+	 * from wi_action()
+	 */
 	struct list_head		ws_rerunq;
 	/** CPT-table for this scheduler */
 	struct cfs_cpt_table	*ws_cptab;
@@ -128,8 +131,6 @@
 
 	wi->wi_scheduled = 1; /* LBUG future schedule attempts */
 	spin_unlock(&sched->ws_lock);
-
-	return;
 }
 EXPORT_SYMBOL(cfs_wi_exit);
 
@@ -163,7 +164,7 @@
 		wi->wi_scheduled = 0;
 	}
 
-	LASSERT (list_empty(&wi->wi_list));
+	LASSERT(list_empty(&wi->wi_list));
 
 	spin_unlock(&sched->ws_lock);
 	return rc;
@@ -186,7 +187,7 @@
 	spin_lock(&sched->ws_lock);
 
 	if (!wi->wi_scheduled) {
-		LASSERT (list_empty(&wi->wi_list));
+		LASSERT(list_empty(&wi->wi_list));
 
 		wi->wi_scheduled = 1;
 		sched->ws_nscheduled++;
@@ -198,21 +199,19 @@
 		}
 	}
 
-	LASSERT (!list_empty(&wi->wi_list));
+	LASSERT(!list_empty(&wi->wi_list));
 	spin_unlock(&sched->ws_lock);
-	return;
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
 
-static int
-cfs_wi_scheduler (void *arg)
+static int cfs_wi_scheduler(void *arg)
 {
 	struct cfs_wi_sched	*sched = (struct cfs_wi_sched *)arg;
 
 	cfs_block_allsigs();
 
 	/* CPT affinity scheduler? */
-	if (sched->ws_cptab != NULL)
+	if (sched->ws_cptab)
 		if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
 			CWARN("Failed to bind %s on CPT %d\n",
 			      sched->ws_name, sched->ws_cpt);
@@ -234,8 +233,8 @@
 
 		while (!list_empty(&sched->ws_runq) &&
 		       nloops < CFS_WI_RESCHED) {
-			wi = list_entry(sched->ws_runq.next,
-					    cfs_workitem_t, wi_list);
+			wi = list_entry(sched->ws_runq.next, cfs_workitem_t,
+					wi_list);
 			LASSERT(wi->wi_scheduled && !wi->wi_running);
 
 			list_del_init(&wi->wi_list);
@@ -261,14 +260,16 @@
 
 			LASSERT(wi->wi_scheduled);
 			/* wi is rescheduled, should be on rerunq now, we
-			 * move it to runq so it can run action now */
+			 * move it to runq so it can run action now
+			 */
 			list_move_tail(&wi->wi_list, &sched->ws_runq);
 		}
 
 		if (!list_empty(&sched->ws_runq)) {
 			spin_unlock(&sched->ws_lock);
 			/* don't sleep because some workitems still
-			 * expect me to come back soon */
+			 * expect me to come back soon
+			 */
 			cond_resched();
 			spin_lock(&sched->ws_lock);
 			continue;
@@ -343,11 +344,11 @@
 
 	LASSERT(cfs_wi_data.wi_init);
 	LASSERT(!cfs_wi_data.wi_stopping);
-	LASSERT(cptab == NULL || cpt == CFS_CPT_ANY ||
+	LASSERT(!cptab || cpt == CFS_CPT_ANY ||
 		(cpt >= 0 && cpt < cfs_cpt_number(cptab)));
 
 	LIBCFS_ALLOC(sched, sizeof(*sched));
-	if (sched == NULL)
+	if (!sched)
 		return -ENOMEM;
 
 	strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN);
@@ -376,7 +377,7 @@
 		sched->ws_starting++;
 		spin_unlock(&cfs_wi_data.wi_glock);
 
-		if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
+		if (sched->ws_cptab && sched->ws_cpt >= 0) {
 			snprintf(name, sizeof(name), "%s_%02d_%02u",
 				 sched->ws_name, sched->ws_cpt,
 				 sched->ws_nthreads);
@@ -455,7 +456,7 @@
 	}
 	while (!list_empty(&cfs_wi_data.wi_scheds)) {
 		sched = list_entry(cfs_wi_data.wi_scheds.next,
-				       struct cfs_wi_sched, ws_list);
+				   struct cfs_wi_sched, ws_list);
 		list_del(&sched->ws_list);
 		LIBCFS_FREE(sched, sizeof(*sched));
 	}
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 3d6745e..0bc0fb9f 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -60,9 +60,9 @@
 {
 	struct ll_dentry_data *lld;
 
-	LASSERT(de != NULL);
+	LASSERT(de);
 	lld = ll_d2d(de);
-	if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
+	if (!lld) /* NFS copies the de->d_op methods (bug 4655) */
 		return;
 
 	if (lld->lld_it) {
@@ -80,7 +80,8 @@
  * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
  * an AST before calling d_revalidate_it().  The dentry still exists (marked
  * INVALID) so d_lookup() matches it, but we have no lock on it (so
- * lock_match() fails) and we spin around real_lookup(). */
+ * lock_match() fails) and we spin around real_lookup().
+ */
 static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
 		       unsigned int len, const char *str,
 		       const struct qstr *name)
@@ -117,7 +118,8 @@
 /* find any ldlm lock of the inode in mdc and lov
  * return 0    not find
  *	1    find one
- *      < 0    error */
+ *      < 0    error
+ */
 static int find_cbdata(struct inode *inode)
 {
 	struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -131,7 +133,7 @@
 		return rc;
 
 	lsm = ccc_inode_lsm_get(inode);
-	if (lsm == NULL)
+	if (!lsm)
 		return rc;
 
 	rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
@@ -163,10 +165,12 @@
 	/* Disable this piece of code temporarily because this is called
 	 * inside dcache_lock so it's not appropriate to do lots of work
 	 * here. ATTENTION: Before this piece of code enabling, LU-2487 must be
-	 * resolved. */
+	 * resolved.
+	 */
 #if 0
 	/* if not ldlm lock for this inode, set i_nlink to 0 so that
-	 * this inode can be recycled later b=20433 */
+	 * this inode can be recycled later b=20433
+	 */
 	if (d_really_is_positive(de) && !find_cbdata(d_inode(de)))
 		clear_nlink(d_inode(de));
 #endif
@@ -178,19 +182,17 @@
 
 int ll_d_init(struct dentry *de)
 {
-	LASSERT(de != NULL);
-
 	CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
 		de, de, de->d_parent, d_inode(de),
 		d_count(de));
 
-	if (de->d_fsdata == NULL) {
+	if (!de->d_fsdata) {
 		struct ll_dentry_data *lld;
 
 		lld = kzalloc(sizeof(*lld), GFP_NOFS);
 		if (likely(lld)) {
 			spin_lock(&de->d_lock);
-			if (likely(de->d_fsdata == NULL)) {
+			if (likely(!de->d_fsdata)) {
 				de->d_fsdata = lld;
 				__d_lustre_invalidate(de);
 			} else {
@@ -218,7 +220,8 @@
 		ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
 
 		/* bug 494: intent_release may be called multiple times, from
-		 * this thread and we don't want to double-decref this lock */
+		 * this thread and we don't want to double-decref this lock
+		 */
 		it->d.lustre.it_lock_mode = 0;
 		if (it->d.lustre.it_remote_lock_mode != 0) {
 			handle.cookie = it->d.lustre.it_remote_lock_handle;
@@ -251,8 +254,6 @@
 {
 	struct dentry *dentry;
 
-	LASSERT(inode != NULL);
-
 	CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
 	       inode->i_ino, inode->i_generation, inode);
 
@@ -286,9 +287,7 @@
 
 void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 {
-	LASSERT(it != NULL);
-
-	if (it->d.lustre.it_lock_mode && inode != NULL) {
+	if (it->d.lustre.it_lock_mode && inode) {
 		struct ll_sb_info *sbi = ll_i2sbi(inode);
 
 		CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
@@ -300,7 +299,8 @@
 	if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
 		/* on 2.6 there are situation when several lookups and
 		 * revalidations may be requested during single operation.
-		 * therefore, we don't release intent here -bzzz */
+		 * therefore, we don't release intent here -bzzz
+		 */
 		ll_intent_drop_lock(it);
 	}
 }
@@ -328,7 +328,7 @@
 	if (lookup_flags & LOOKUP_RCU)
 		return -ECHILD;
 
-	do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL);
+	do_statahead_enter(dir, &dentry, !d_inode(dentry));
 	ll_statahead_mark(dir, dentry);
 	return 1;
 }
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 8982f7d..bd88a3b 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -55,6 +55,7 @@
 #include "../include/lustre_lite.h"
 #include "../include/lustre_dlm.h"
 #include "../include/lustre_fid.h"
+#include "../include/lustre_kernelcomm.h"
 #include "llite_internal.h"
 
 /*
@@ -189,8 +190,6 @@
 	} else if (rc == 0) {
 		body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
 		/* Checked by mdc_readpage() */
-		LASSERT(body != NULL);
-
 		if (body->valid & OBD_MD_FLSIZE)
 			cl_isize_write(inode, body->size);
 
@@ -244,7 +243,7 @@
 	kunmap(page);
 	if (remove) {
 		lock_page(page);
-		if (likely(page->mapping != NULL))
+		if (likely(page->mapping))
 			truncate_complete_page(page->mapping, page);
 		unlock_page(page);
 	}
@@ -333,7 +332,7 @@
 	struct lustre_handle lockh;
 	struct lu_dirpage *dp;
 	struct page *page;
-	ldlm_mode_t mode;
+	enum ldlm_mode mode;
 	int rc;
 	__u64 start = 0;
 	__u64 end = 0;
@@ -380,7 +379,8 @@
 				 &it.d.lustre.it_lock_handle, dir, NULL);
 	} else {
 		/* for cross-ref object, l_ast_data of the lock may not be set,
-		 * we reset it here */
+		 * we reset it here
+		 */
 		md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
 				 dir, NULL);
 	}
@@ -392,7 +392,7 @@
 		CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
 		       PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
 		goto out_unlock;
-	} else if (page != NULL) {
+	} else if (page) {
 		/*
 		 * XXX nikita: not entirely correct handling of a corner case:
 		 * suppose hash chain of entries with hash value HASH crosses
@@ -498,7 +498,7 @@
 			__u64 next;
 
 			dp = page_address(page);
-			for (ent = lu_dirent_start(dp); ent != NULL && !done;
+			for (ent = lu_dirent_start(dp); ent && !done;
 			     ent = lu_dirent_next(ent)) {
 				__u16	  type;
 				int	    namelen;
@@ -688,7 +688,7 @@
 	struct obd_device *mgc = lsi->lsi_mgc;
 	int lum_size;
 
-	if (lump != NULL) {
+	if (lump) {
 		/*
 		 * This is coming from userspace, so should be in
 		 * local endian.  But the MDS would like it in little
@@ -724,7 +724,7 @@
 	if (IS_ERR(op_data))
 		return PTR_ERR(op_data);
 
-	if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
+	if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
 		op_data->op_cli_flags |= CLI_SET_MEA;
 
 	/* swabbing is done in lov_setstripe() on server side */
@@ -738,8 +738,9 @@
 	}
 
 	/* In the following we use the fact that LOV_USER_MAGIC_V1 and
-	 LOV_USER_MAGIC_V3 have the same initial fields so we do not
-	 need to make the distinction between the 2 versions */
+	 * LOV_USER_MAGIC_V3 have the same initial fields so we do not
+	 * need to make the distinction between the 2 versions
+	 */
 	if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
 		char *param = NULL;
 		char *buf;
@@ -811,7 +812,6 @@
 	}
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	LASSERT(body != NULL);
 
 	lmmsize = body->eadatasize;
 
@@ -823,7 +823,6 @@
 
 	lmm = req_capsule_server_sized_get(&req->rq_pill,
 					   &RMF_MDT_MD, lmmsize);
-	LASSERT(lmm != NULL);
 
 	/*
 	 * This is coming from the MDS, so is probably in
@@ -932,7 +931,8 @@
 		}
 
 		/* Store it the hsm_copy for later copytool use.
-		 * Always modified even if no lsm. */
+		 * Always modified even if no lsm.
+		 */
 		copy->hc_data_version = data_version;
 	}
 
@@ -1009,11 +1009,13 @@
 		}
 
 		/* Store it the hsm_copy for later copytool use.
-		 * Always modified even if no lsm. */
+		 * Always modified even if no lsm.
+		 */
 		hpk.hpk_data_version = data_version;
 
 		/* File could have been stripped during archiving, so we need
-		 * to check anyway. */
+		 * to check anyway.
+		 */
 		if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
 		    (copy->hc_data_version != data_version)) {
 			CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
@@ -1025,7 +1027,8 @@
 			 * the cdt will loop on retried archive requests.
 			 * The policy engine will ask for a new archive later
 			 * when the file will not be modified for some tunable
-			 * time */
+			 * time
+			 */
 			/* we do not notify caller */
 			hpk.hpk_flags &= ~HP_FLAG_RETRY;
 			/* hpk_errval must be >= 0 */
@@ -1153,7 +1156,8 @@
 			return rc;
 		}
 		/* If QIF_SPACE is not set, client should collect the
-		 * space usage from OSSs by itself */
+		 * space usage from OSSs by itself
+		 */
 		if (cmd == Q_GETQUOTA &&
 		    !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
 		    !oqctl->qc_dqblk.dqb_curspace) {
@@ -1204,7 +1208,8 @@
 
 /* This function tries to get a single name component,
  * to send to the server. No actual path traversal involved,
- * so we limit to NAME_MAX */
+ * so we limit to NAME_MAX
+ */
 static char *ll_getname(const char __user *filename)
 {
 	int ret = 0, len;
@@ -1252,7 +1257,7 @@
 		return ll_iocontrol(inode, file, cmd, arg);
 	case FSFILT_IOC_GETVERSION_OLD:
 	case FSFILT_IOC_GETVERSION:
-		return put_user(inode->i_generation, (int *)arg);
+		return put_user(inode->i_generation, (int __user *)arg);
 	/* We need to special case any other ioctls we want to handle,
 	 * to send them to the MDS/OST as appropriate and to properly
 	 * network encode the arg field.
@@ -1266,7 +1271,7 @@
 		if (mdtidx < 0)
 			return mdtidx;
 
-		if (put_user((int)mdtidx, (int *)arg))
+		if (put_user((int)mdtidx, (int __user *)arg))
 			return -EFAULT;
 
 		return 0;
@@ -1278,7 +1283,7 @@
 		char *filename;
 		struct md_op_data *op_data;
 
-		rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
+		rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
 		if (rc)
 			return rc;
 		data = (void *)buf;
@@ -1320,12 +1325,12 @@
 		int		 len;
 		int		 rc;
 
-		rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
+		rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
 		if (rc)
 			return rc;
 
 		data = (void *)buf;
-		if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
+		if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
 		    data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
 			rc = -EINVAL;
 			goto lmv_out_free;
@@ -1363,8 +1368,8 @@
 	case LL_IOC_LOV_SETSTRIPE: {
 		struct lov_user_md_v3 lumv3;
 		struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
-		struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
-		struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+		struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
+		struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
 
 		int set_default = 0;
 
@@ -1389,7 +1394,7 @@
 		return rc;
 	}
 	case LL_IOC_LMV_GETSTRIPE: {
-		struct lmv_user_md *lump = (struct lmv_user_md *)arg;
+		struct lmv_user_md __user *lump = (void __user *)arg;
 		struct lmv_user_md lum;
 		struct lmv_user_md *tmp;
 		int lum_size;
@@ -1422,7 +1427,7 @@
 		tmp->lum_objects[0].lum_mds = mdtindex;
 		memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
 		       sizeof(struct lu_fid));
-		if (copy_to_user((void *)arg, tmp, lum_size)) {
+		if (copy_to_user((void __user *)arg, tmp, lum_size)) {
 			rc = -EFAULT;
 			goto free_lmv;
 		}
@@ -1433,13 +1438,13 @@
 	case LL_IOC_LOV_SWAP_LAYOUTS:
 		return -EPERM;
 	case LL_IOC_OBD_STATFS:
-		return ll_obd_statfs(inode, (void *)arg);
+		return ll_obd_statfs(inode, (void __user *)arg);
 	case LL_IOC_LOV_GETSTRIPE:
 	case LL_IOC_MDC_GETINFO:
 	case IOC_MDC_GETFILEINFO:
 	case IOC_MDC_GETFILESTRIPE: {
 		struct ptlrpc_request *request = NULL;
-		struct lov_user_md *lump;
+		struct lov_user_md __user *lump;
 		struct lov_mds_md *lmm = NULL;
 		struct mdt_body *body;
 		char *filename = NULL;
@@ -1447,7 +1452,7 @@
 
 		if (cmd == IOC_MDC_GETFILEINFO ||
 		    cmd == IOC_MDC_GETFILESTRIPE) {
-			filename = ll_getname((const char *)arg);
+			filename = ll_getname((const char __user *)arg);
 			if (IS_ERR(filename))
 				return PTR_ERR(filename);
 
@@ -1460,7 +1465,7 @@
 		if (request) {
 			body = req_capsule_server_get(&request->rq_pill,
 						      &RMF_MDT_BODY);
-			LASSERT(body != NULL);
+			LASSERT(body);
 		} else {
 			goto out_req;
 		}
@@ -1476,11 +1481,11 @@
 
 		if (cmd == IOC_MDC_GETFILESTRIPE ||
 		    cmd == LL_IOC_LOV_GETSTRIPE) {
-			lump = (struct lov_user_md *)arg;
+			lump = (struct lov_user_md __user *)arg;
 		} else {
-			struct lov_user_mds_data *lmdp;
+			struct lov_user_mds_data __user *lmdp;
 
-			lmdp = (struct lov_user_mds_data *)arg;
+			lmdp = (struct lov_user_mds_data __user *)arg;
 			lump = &lmdp->lmd_lmm;
 		}
 		if (copy_to_user(lump, lmm, lmmsize)) {
@@ -1492,7 +1497,7 @@
 		}
 skip_lmm:
 		if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
-			struct lov_user_mds_data *lmdp;
+			struct lov_user_mds_data __user *lmdp;
 			lstat_t st = { 0 };
 
 			st.st_dev     = inode->i_sb->s_dev;
@@ -1509,7 +1514,7 @@
 			st.st_ctime   = body->ctime;
 			st.st_ino     = inode->i_ino;
 
-			lmdp = (struct lov_user_mds_data *)arg;
+			lmdp = (struct lov_user_mds_data __user *)arg;
 			if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
 				rc = -EFAULT;
 				goto out_req;
@@ -1523,14 +1528,14 @@
 		return rc;
 	}
 	case IOC_LOV_GETINFO: {
-		struct lov_user_mds_data *lumd;
+		struct lov_user_mds_data __user *lumd;
 		struct lov_stripe_md *lsm;
-		struct lov_user_md *lum;
+		struct lov_user_md __user *lum;
 		struct lov_mds_md *lmm;
 		int lmmsize;
 		lstat_t st;
 
-		lumd = (struct lov_user_mds_data *)arg;
+		lumd = (struct lov_user_mds_data __user *)arg;
 		lum = &lumd->lmd_lmm;
 
 		rc = ll_get_max_mdsize(sbi, &lmmsize);
@@ -1538,7 +1543,7 @@
 			return rc;
 
 		lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
-		if (lmm == NULL)
+		if (!lmm)
 			return -ENOMEM;
 		if (copy_from_user(lmm, lum, lmmsize)) {
 			rc = -EFAULT;
@@ -1636,8 +1641,8 @@
 				   NULL);
 		if (rc) {
 			CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
-			if (copy_to_user((void *)arg, check,
-					     sizeof(*check)))
+			if (copy_to_user((void __user *)arg, check,
+					 sizeof(*check)))
 				CDEBUG(D_QUOTA, "copy_to_user failed\n");
 			goto out_poll;
 		}
@@ -1646,8 +1651,8 @@
 				   NULL);
 		if (rc) {
 			CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
-			if (copy_to_user((void *)arg, check,
-					     sizeof(*check)))
+			if (copy_to_user((void __user *)arg, check,
+					 sizeof(*check)))
 				CDEBUG(D_QUOTA, "copy_to_user failed\n");
 			goto out_poll;
 		}
@@ -1662,14 +1667,15 @@
 		if (!qctl)
 			return -ENOMEM;
 
-		if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) {
+		if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) {
 			rc = -EFAULT;
 			goto out_quotactl;
 		}
 
 		rc = quotactl_ioctl(sbi, qctl);
 
-		if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl)))
+		if (rc == 0 && copy_to_user((void __user *)arg, qctl,
+					    sizeof(*qctl)))
 			rc = -EFAULT;
 
 out_quotactl:
@@ -1686,7 +1692,6 @@
 	    if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
 		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 
-		LASSERT(fd != NULL);
 		rc = rct_add(&sbi->ll_rct, current_pid(), arg);
 		if (!rc)
 			fd->fd_flags |= LL_FILE_RMTACL;
@@ -1699,7 +1704,7 @@
 		int count, vallen;
 		struct obd_export *exp;
 
-		if (copy_from_user(&count, (int *)arg, sizeof(int)))
+		if (copy_from_user(&count, (int __user *)arg, sizeof(int)))
 			return -EFAULT;
 
 		/* get ost count when count is zero, get mdt count otherwise */
@@ -1712,34 +1717,35 @@
 			return rc;
 		}
 
-		if (copy_to_user((int *)arg, &count, sizeof(int)))
+		if (copy_to_user((int __user *)arg, &count, sizeof(int)))
 			return -EFAULT;
 
 		return 0;
 	}
 	case LL_IOC_PATH2FID:
-		if (copy_to_user((void *)arg, ll_inode2fid(inode),
-				     sizeof(struct lu_fid)))
+		if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
+				 sizeof(struct lu_fid)))
 			return -EFAULT;
 		return 0;
 	case LL_IOC_GET_CONNECT_FLAGS: {
-		return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg);
+		return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL,
+				     (void __user *)arg);
 	}
 	case OBD_IOC_CHANGELOG_SEND:
 	case OBD_IOC_CHANGELOG_CLEAR:
 		if (!capable(CFS_CAP_SYS_ADMIN))
 			return -EPERM;
 
-		rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
+		rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
 				    sizeof(struct ioc_changelog));
 		return rc;
 	case OBD_IOC_FID2PATH:
-		return ll_fid2path(inode, (void *)arg);
+		return ll_fid2path(inode, (void __user *)arg);
 	case LL_IOC_HSM_REQUEST: {
 		struct hsm_user_request	*hur;
 		ssize_t			 totalsize;
 
-		hur = memdup_user((void *)arg, sizeof(*hur));
+		hur = memdup_user((void __user *)arg, sizeof(*hur));
 		if (IS_ERR(hur))
 			return PTR_ERR(hur);
 
@@ -1754,11 +1760,11 @@
 			return -E2BIG;
 
 		hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
-		if (hur == NULL)
+		if (!hur)
 			return -ENOMEM;
 
 		/* Copy the whole struct */
-		if (copy_from_user(hur, (void *)arg, totalsize)) {
+		if (copy_from_user(hur, (void __user *)arg, totalsize)) {
 			kvfree(hur);
 			return -EFAULT;
 		}
@@ -1794,7 +1800,7 @@
 		struct hsm_progress_kernel	hpk;
 		struct hsm_progress		hp;
 
-		if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
+		if (copy_from_user(&hp, (void __user *)arg, sizeof(hp)))
 			return -EFAULT;
 
 		hpk.hpk_fid = hp.hp_fid;
@@ -1805,13 +1811,14 @@
 		hpk.hpk_data_version = 0;
 
 		/* File may not exist in Lustre; all progress
-		 * reported to Lustre root */
+		 * reported to Lustre root
+		 */
 		rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
 				   NULL);
 		return rc;
 	}
 	case LL_IOC_HSM_CT_START:
-		rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
+		rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
 				    sizeof(struct lustre_kernelcomm));
 		return rc;
 
@@ -1819,12 +1826,12 @@
 		struct hsm_copy	*copy;
 		int		 rc;
 
-		copy = memdup_user((char *)arg, sizeof(*copy));
+		copy = memdup_user((char __user *)arg, sizeof(*copy));
 		if (IS_ERR(copy))
 			return PTR_ERR(copy);
 
 		rc = ll_ioc_copy_start(inode->i_sb, copy);
-		if (copy_to_user((char *)arg, copy, sizeof(*copy)))
+		if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
 			rc = -EFAULT;
 
 		kfree(copy);
@@ -1834,19 +1841,20 @@
 		struct hsm_copy	*copy;
 		int		 rc;
 
-		copy = memdup_user((char *)arg, sizeof(*copy));
+		copy = memdup_user((char __user *)arg, sizeof(*copy));
 		if (IS_ERR(copy))
 			return PTR_ERR(copy);
 
 		rc = ll_ioc_copy_end(inode->i_sb, copy);
-		if (copy_to_user((char *)arg, copy, sizeof(*copy)))
+		if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
 			rc = -EFAULT;
 
 		kfree(copy);
 		return rc;
 	}
 	default:
-		return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
+		return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
+				     (void __user *)arg);
 	}
 }
 
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 39e2ffd..4c25cf2 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -65,7 +65,7 @@
 	struct ll_file_data *fd;
 
 	fd = kmem_cache_alloc(ll_file_data_slab, GFP_NOFS | __GFP_ZERO);
-	if (fd == NULL)
+	if (!fd)
 		return NULL;
 	fd->fd_write_failed = false;
 	return fd;
@@ -73,7 +73,7 @@
 
 static void ll_file_data_put(struct ll_file_data *fd)
 {
-	if (fd != NULL)
+	if (fd)
 		kmem_cache_free(ll_file_data_slab, fd);
 }
 
@@ -134,7 +134,7 @@
 	int epoch_close = 1;
 	int rc;
 
-	if (obd == NULL) {
+	if (!obd) {
 		/*
 		 * XXX: in case of LMV, is this correct to access
 		 * ->exp_handle?
@@ -153,7 +153,7 @@
 	}
 
 	ll_prepare_close(inode, op_data, och);
-	if (data_version != NULL) {
+	if (data_version) {
 		/* Pass in data_version implies release. */
 		op_data->op_bias |= MDS_HSM_RELEASE;
 		op_data->op_data_version = *data_version;
@@ -166,7 +166,8 @@
 		/* This close must have the epoch closed. */
 		LASSERT(epoch_close);
 		/* MDS has instructed us to obtain Size-on-MDS attribute from
-		 * OSTs and send setattr to back to MDS. */
+		 * OSTs and send setattr to back to MDS.
+		 */
 		rc = ll_som_update(inode, op_data);
 		if (rc) {
 			CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
@@ -179,7 +180,8 @@
 	}
 
 	/* DATA_MODIFIED flag was successfully sent on close, cancel data
-	 * modification flag. */
+	 * modification flag.
+	 */
 	if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
 		struct ll_inode_info *lli = ll_i2info(inode);
 
@@ -242,7 +244,8 @@
 	mutex_lock(&lli->lli_och_mutex);
 	if (*och_usecount > 0) {
 		/* There are still users of this handle, so skip
-		 * freeing it. */
+		 * freeing it.
+		 */
 		mutex_unlock(&lli->lli_och_mutex);
 		return 0;
 	}
@@ -251,9 +254,10 @@
 	*och_p = NULL;
 	mutex_unlock(&lli->lli_och_mutex);
 
-	if (och != NULL) {
+	if (och) {
 		/* There might be a race and this handle may already
-		   be closed. */
+		 * be closed.
+		 */
 		rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
 					       inode, och, NULL);
 	}
@@ -276,11 +280,12 @@
 	if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
 		ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
 
-	if (fd->fd_lease_och != NULL) {
+	if (fd->fd_lease_och) {
 		bool lease_broken;
 
 		/* Usually the lease is not released when the
-		 * application crashed, we need to release here. */
+		 * application crashed, we need to release here.
+		 */
 		rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
 		CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
 			PFID(&lli->lli_fid), rc, lease_broken);
@@ -288,14 +293,15 @@
 		fd->fd_lease_och = NULL;
 	}
 
-	if (fd->fd_och != NULL) {
+	if (fd->fd_och) {
 		rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
 		fd->fd_och = NULL;
 		goto out;
 	}
 
 	/* Let's see if we have good enough OPEN lock on the file and if
-	   we can skip talking to MDS */
+	 * we can skip talking to MDS
+	 */
 
 	mutex_lock(&lli->lli_och_mutex);
 	if (fd->fd_omode & FMODE_WRITE) {
@@ -343,7 +349,6 @@
 	if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
 		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 
-		LASSERT(fd != NULL);
 		if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
 			fd->fd_flags &= ~LL_FILE_RMTACL;
 			rct_del(&sbi->ll_rct, current_pid());
@@ -355,11 +360,12 @@
 	if (!is_root_inode(inode))
 		ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
 	fd = LUSTRE_FPRIVATE(file);
-	LASSERT(fd != NULL);
+	LASSERT(fd);
 
-	/* The last ref on @file, maybe not the owner pid of statahead.
+	/* The last ref on @file, maybe not be the owner pid of statahead.
 	 * Different processes can open the same dir, "ll_opendir_key" means:
-	 * it is me that should stop the statahead thread. */
+	 * it is me that should stop the statahead thread.
+	 */
 	if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
 	    lli->lli_opendir_pid != 0)
 		ll_stop_statahead(inode, lli->lli_opendir_key);
@@ -396,16 +402,16 @@
 	__u32 opc = LUSTRE_OPC_ANY;
 	int rc;
 
-	/* Usually we come here only for NFSD, and we want open lock.
-	   But we can also get here with pre 2.6.15 patchless kernels, and in
-	   that case that lock is also ok */
+	/* Usually we come here only for NFSD, and we want open lock. */
 	/* We can also get here if there was cached open handle in revalidate_it
 	 * but it disappeared while we were getting from there to ll_file_open.
 	 * But this means this file was closed and immediately opened which
-	 * makes a good candidate for using OPEN lock */
+	 * makes a good candidate for using OPEN lock
+	 */
 	/* If lmmsize & lmm are not 0, we are just setting stripe info
-	 * parameters. No need for the open lock */
-	if (lmm == NULL && lmmsize == 0) {
+	 * parameters. No need for the open lock
+	 */
+	if (!lmm && lmmsize == 0) {
 		itp->it_flags |= MDS_OPEN_LOCK;
 		if (itp->it_flags & FMODE_WRITE)
 			opc = LUSTRE_OPC_CREATE;
@@ -492,7 +498,7 @@
 
 	LASSERT(!LUSTRE_FPRIVATE(file));
 
-	LASSERT(fd != NULL);
+	LASSERT(fd);
 
 	if (och) {
 		struct ptlrpc_request *req = it->d.lustre.it_data;
@@ -543,7 +549,7 @@
 	file->private_data = NULL; /* prevent ll_local_open assertion */
 
 	fd = ll_file_data_get();
-	if (fd == NULL) {
+	if (!fd) {
 		rc = -ENOMEM;
 		goto out_openerr;
 	}
@@ -551,7 +557,7 @@
 	fd->fd_file = file;
 	if (S_ISDIR(inode->i_mode)) {
 		spin_lock(&lli->lli_sa_lock);
-		if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+		if (!lli->lli_opendir_key && !lli->lli_sai &&
 		    lli->lli_opendir_pid == 0) {
 			lli->lli_opendir_key = fd;
 			lli->lli_opendir_pid = current_pid();
@@ -568,7 +574,8 @@
 	if (!it || !it->d.lustre.it_disposition) {
 		/* Convert f_flags into access mode. We cannot use file->f_mode,
 		 * because everything but O_ACCMODE mask was stripped from
-		 * there */
+		 * there
+		 */
 		if ((oit.it_flags + 1) & O_ACCMODE)
 			oit.it_flags++;
 		if (file->f_flags & O_TRUNC)
@@ -577,17 +584,20 @@
 		/* kernel only call f_op->open in dentry_open.  filp_open calls
 		 * dentry_open after call to open_namei that checks permissions.
 		 * Only nfsd_open call dentry_open directly without checking
-		 * permissions and because of that this code below is safe. */
+		 * permissions and because of that this code below is safe.
+		 */
 		if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
 			oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
 
 		/* We do not want O_EXCL here, presumably we opened the file
-		 * already? XXX - NFS implications? */
+		 * already? XXX - NFS implications?
+		 */
 		oit.it_flags &= ~O_EXCL;
 
 		/* bug20584, if "it_flags" contains O_CREAT, the file will be
 		 * created if necessary, then "IT_CREAT" should be set to keep
-		 * consistent with it */
+		 * consistent with it
+		 */
 		if (oit.it_flags & O_CREAT)
 			oit.it_op |= IT_CREAT;
 
@@ -611,7 +621,8 @@
 	if (*och_p) { /* Open handle is present */
 		if (it_disposition(it, DISP_OPEN_OPEN)) {
 			/* Well, there's extra open request that we do not need,
-			   let's close it somehow. This will decref request. */
+			 * let's close it somehow. This will decref request.
+			 */
 			rc = it_open_error(DISP_OPEN_OPEN, it);
 			if (rc) {
 				mutex_unlock(&lli->lli_och_mutex);
@@ -632,10 +643,11 @@
 		LASSERT(*och_usecount == 0);
 		if (!it->d.lustre.it_disposition) {
 			/* We cannot just request lock handle now, new ELC code
-			   means that one of other OPEN locks for this file
-			   could be cancelled, and since blocking ast handler
-			   would attempt to grab och_mutex as well, that would
-			   result in a deadlock */
+			 * means that one of other OPEN locks for this file
+			 * could be cancelled, and since blocking ast handler
+			 * would attempt to grab och_mutex as well, that would
+			 * result in a deadlock
+			 */
 			mutex_unlock(&lli->lli_och_mutex);
 			it->it_create_mode |= M_CHECK_STALE;
 			rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
@@ -655,9 +667,11 @@
 
 		/* md_intent_lock() didn't get a request ref if there was an
 		 * open error, so don't do cleanup on the request here
-		 * (bug 3430) */
+		 * (bug 3430)
+		 */
 		/* XXX (green): Should not we bail out on any error here, not
-		 * just open error? */
+		 * just open error?
+		 */
 		rc = it_open_error(DISP_OPEN_OPEN, it);
 		if (rc)
 			goto out_och_free;
@@ -672,8 +686,9 @@
 	fd = NULL;
 
 	/* Must do this outside lli_och_mutex lock to prevent deadlock where
-	   different kind of OPEN lock for this same inode gets cancelled
-	   by ldlm_cancel_lru */
+	 * different kind of OPEN lock for this same inode gets cancelled
+	 * by ldlm_cancel_lru
+	 */
 	if (!S_ISREG(inode->i_mode))
 		goto out_och_free;
 
@@ -752,7 +767,7 @@
 	if (fmode != FMODE_WRITE && fmode != FMODE_READ)
 		return ERR_PTR(-EINVAL);
 
-	if (file != NULL) {
+	if (file) {
 		struct ll_inode_info *lli = ll_i2info(inode);
 		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 		struct obd_client_handle **och_p;
@@ -764,18 +779,18 @@
 		/* Get the openhandle of the file */
 		rc = -EBUSY;
 		mutex_lock(&lli->lli_och_mutex);
-		if (fd->fd_lease_och != NULL) {
+		if (fd->fd_lease_och) {
 			mutex_unlock(&lli->lli_och_mutex);
 			return ERR_PTR(rc);
 		}
 
-		if (fd->fd_och == NULL) {
+		if (!fd->fd_och) {
 			if (file->f_mode & FMODE_WRITE) {
-				LASSERT(lli->lli_mds_write_och != NULL);
+				LASSERT(lli->lli_mds_write_och);
 				och_p = &lli->lli_mds_write_och;
 				och_usecount = &lli->lli_open_fd_write_count;
 			} else {
-				LASSERT(lli->lli_mds_read_och != NULL);
+				LASSERT(lli->lli_mds_read_och);
 				och_p = &lli->lli_mds_read_och;
 				och_usecount = &lli->lli_open_fd_read_count;
 			}
@@ -790,7 +805,7 @@
 		if (rc < 0) /* more than 1 opener */
 			return ERR_PTR(rc);
 
-		LASSERT(fd->fd_och != NULL);
+		LASSERT(fd->fd_och);
 		old_handle = fd->fd_och->och_fh;
 	}
 
@@ -817,7 +832,8 @@
 	 * broken;
 	 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
 	 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
-	 * doesn't deal with openhandle, so normal openhandle will be leaked. */
+	 * doesn't deal with openhandle, so normal openhandle will be leaked.
+	 */
 				LDLM_FL_NO_LRU | LDLM_FL_EXCL);
 	ll_finish_md_op_data(op_data);
 	ptlrpc_req_finished(req);
@@ -886,7 +902,7 @@
 	int rc;
 
 	lock = ldlm_handle2lock(&och->och_lease_handle);
-	if (lock != NULL) {
+	if (lock) {
 		lock_res_and_lock(lock);
 		cancelled = ldlm_is_cancel(lock);
 		unlock_res_and_lock(lock);
@@ -898,7 +914,7 @@
 
 	if (!cancelled)
 		ldlm_cli_cancel(&och->och_lease_handle, 0);
-	if (lease_broken != NULL)
+	if (lease_broken)
 		*lease_broken = cancelled;
 
 	rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
@@ -914,7 +930,7 @@
 	struct obd_info	    oinfo = { };
 	int			rc;
 
-	LASSERT(lsm != NULL);
+	LASSERT(lsm);
 
 	oinfo.oi_md = lsm;
 	oinfo.oi_oa = obdo;
@@ -933,7 +949,7 @@
 	}
 
 	set = ptlrpc_prep_set();
-	if (set == NULL) {
+	if (!set) {
 		CERROR("can't allocate ptlrpc set\n");
 		rc = -ENOMEM;
 	} else {
@@ -986,7 +1002,8 @@
 
 	ll_inode_size_lock(inode);
 	/* merge timestamps the most recently obtained from mds with
-	   timestamps obtained from osts */
+	 * timestamps obtained from osts
+	 */
 	LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
 	LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
 	LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
@@ -1155,7 +1172,8 @@
 out:
 	cl_io_fini(env, io);
 	/* If any bit been read/written (result != 0), we just return
-	 * short read/write instead of restart io. */
+	 * short read/write instead of restart io.
+	 */
 	if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
 		CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n",
 		       iot == CIT_READ ? "read" : "write",
@@ -1261,7 +1279,7 @@
 	struct lov_stripe_md *lsm = NULL, *lsm2;
 
 	oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
-	if (oa == NULL)
+	if (!oa)
 		return -ENOMEM;
 
 	lsm = ccc_inode_lsm_get(inode);
@@ -1274,7 +1292,7 @@
 		   (lsm->lsm_stripe_count));
 
 	lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS);
-	if (lsm2 == NULL) {
+	if (!lsm2) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1307,7 +1325,7 @@
 	if (!capable(CFS_CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
+	if (copy_from_user(&ucreat, (struct ll_recreate_obj __user *)arg,
 			   sizeof(ucreat)))
 		return -EFAULT;
 
@@ -1325,7 +1343,7 @@
 	if (!capable(CFS_CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
+	if (copy_from_user(&fid, (struct lu_fid __user *)arg, sizeof(fid)))
 		return -EFAULT;
 
 	fid_to_ostid(&fid, &oi);
@@ -1341,7 +1359,7 @@
 	int rc = 0;
 
 	lsm = ccc_inode_lsm_get(inode);
-	if (lsm != NULL) {
+	if (lsm) {
 		ccc_inode_lsm_put(inode, lsm);
 		CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
 		       inode->i_ino);
@@ -1401,7 +1419,6 @@
 	}
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	LASSERT(body != NULL); /* checked by mdc_getattr_name */
 
 	lmmsize = body->eadatasize;
 
@@ -1412,7 +1429,6 @@
 	}
 
 	lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
-	LASSERT(lmm != NULL);
 
 	if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
 	    (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
@@ -1433,7 +1449,8 @@
 			stripe_count = 0;
 
 		/* if function called for directory - we should
-		 * avoid swab not existent lsm objects */
+		 * avoid swab not existent lsm objects
+		 */
 		if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
 			lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
 			if (S_ISREG(body->mode))
@@ -1469,10 +1486,10 @@
 		return -EPERM;
 
 	lump = libcfs_kvzalloc(lum_size, GFP_NOFS);
-	if (lump == NULL)
+	if (!lump)
 		return -ENOMEM;
 
-	if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
+	if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) {
 		kvfree(lump);
 		return -EFAULT;
 	}
@@ -1488,12 +1505,12 @@
 static int ll_lov_setstripe(struct inode *inode, struct file *file,
 			    unsigned long arg)
 {
-	struct lov_user_md_v3	 lumv3;
-	struct lov_user_md_v1	*lumv1 = (struct lov_user_md_v1 *)&lumv3;
-	struct lov_user_md_v1	*lumv1p = (struct lov_user_md_v1 *)arg;
-	struct lov_user_md_v3	*lumv3p = (struct lov_user_md_v3 *)arg;
-	int			 lum_size, rc;
-	int			 flags = FMODE_WRITE;
+	struct lov_user_md_v3 lumv3;
+	struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+	struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
+	struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
+	int lum_size, rc;
+	int flags = FMODE_WRITE;
 
 	/* first try with v1 which is smaller than v3 */
 	lum_size = sizeof(struct lov_user_md_v1);
@@ -1518,7 +1535,7 @@
 		ll_layout_refresh(inode, &gen);
 		lsm = ccc_inode_lsm_get(inode);
 		rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
-				   0, lsm, (void *)arg);
+				   0, lsm, (void __user *)arg);
 		ccc_inode_lsm_put(inode, lsm);
 	}
 	return rc;
@@ -1530,9 +1547,9 @@
 	int rc = -ENODATA;
 
 	lsm = ccc_inode_lsm_get(inode);
-	if (lsm != NULL)
+	if (lsm)
 		rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
-				   lsm, (void *)arg);
+				   lsm, (void __user *)arg);
 	ccc_inode_lsm_put(inode, lsm);
 	return rc;
 }
@@ -1560,7 +1577,7 @@
 		spin_unlock(&lli->lli_lock);
 		return -EINVAL;
 	}
-	LASSERT(fd->fd_grouplock.cg_lock == NULL);
+	LASSERT(!fd->fd_grouplock.cg_lock);
 	spin_unlock(&lli->lli_lock);
 
 	rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
@@ -1597,7 +1614,7 @@
 		CWARN("no group lock held\n");
 		return -EINVAL;
 	}
-	LASSERT(fd->fd_grouplock.cg_lock != NULL);
+	LASSERT(fd->fd_grouplock.cg_lock);
 
 	if (fd->fd_grouplock.cg_gid != arg) {
 		CWARN("group lock %lu doesn't match current id %lu\n",
@@ -1688,7 +1705,7 @@
 	}
 
 	lsm = ccc_inode_lsm_get(inode);
-	if (lsm == NULL)
+	if (!lsm)
 		return -ENOENT;
 
 	/* If the stripe_count > 1 and the application does not understand
@@ -1782,7 +1799,8 @@
 	int rc = 0;
 
 	/* Get the extent count so we can calculate the size of
-	 * required fiemap buffer */
+	 * required fiemap buffer
+	 */
 	if (get_user(extent_count,
 	    &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
 		return -EFAULT;
@@ -1794,7 +1812,7 @@
 					 sizeof(struct ll_fiemap_extent));
 
 	fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
-	if (fiemap_s == NULL)
+	if (!fiemap_s)
 		return -ENOMEM;
 
 	/* get the fiemap value */
@@ -1806,7 +1824,8 @@
 
 	/* If fm_extent_count is non-zero, read the first extent since
 	 * it is used to calculate end_offset and device from previous
-	 * fiemap call. */
+	 * fiemap call.
+	 */
 	if (extent_count) {
 		if (copy_from_user(&fiemap_s->fm_extents[0],
 		    (char __user *)arg + sizeof(*fiemap_s),
@@ -1826,7 +1845,7 @@
 		ret_bytes += (fiemap_s->fm_mapped_extents *
 				 sizeof(struct ll_fiemap_extent));
 
-	if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+	if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes))
 		rc = -EFAULT;
 
 error:
@@ -1917,13 +1936,14 @@
 
 	/* Release the file.
 	 * NB: lease lock handle is released in mdc_hsm_release_pack() because
-	 * we still need it to pack l_remote_handle to MDT. */
+	 * we still need it to pack l_remote_handle to MDT.
+	 */
 	rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
 				       &data_version);
 	och = NULL;
 
 out:
-	if (och != NULL && !IS_ERR(och)) /* close the file */
+	if (och && !IS_ERR(och)) /* close the file */
 		ll_lease_close(och, inode, NULL);
 
 	return rc;
@@ -2007,7 +2027,8 @@
 	}
 
 	/* to be able to restore mtime and atime after swap
-	 * we need to first save them */
+	 * we need to first save them
+	 */
 	if (lsl->sl_flags &
 	    (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
 		llss->ia1.ia_mtime = llss->inode1->i_mtime;
@@ -2019,7 +2040,8 @@
 	}
 
 	/* ultimate check, before swapping the layouts we check if
-	 * dataversion has changed (if requested) */
+	 * dataversion has changed (if requested)
+	 */
 	if (llss->check_dv1) {
 		rc = ll_data_version(llss->inode1, &dv, 0);
 		if (rc)
@@ -2042,9 +2064,11 @@
 
 	/* struct md_op_data is used to send the swap args to the mdt
 	 * only flags is missing, so we use struct mdc_swap_layouts
-	 * through the md_op_data->op_data */
+	 * through the md_op_data->op_data
+	 */
 	/* flags from user space have to be converted before they are send to
-	 * server, no flag is sent today, they are only used on the client */
+	 * server, no flag is sent today, they are only used on the client
+	 */
 	msl.msl_flags = 0;
 	rc = -ENOMEM;
 	op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
@@ -2113,7 +2137,8 @@
 		return -EINVAL;
 
 	/* Non-root users are forbidden to set or clear flags which are
-	 * NOT defined in HSM_USER_MASK. */
+	 * NOT defined in HSM_USER_MASK.
+	 */
 	if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
 	    !capable(CFS_CAP_SYS_ADMIN))
 		return -EPERM;
@@ -2211,14 +2236,14 @@
 	switch (cmd) {
 	case LL_IOC_GETFLAGS:
 		/* Get the current value of the file flags */
-		return put_user(fd->fd_flags, (int *)arg);
+		return put_user(fd->fd_flags, (int __user *)arg);
 	case LL_IOC_SETFLAGS:
 	case LL_IOC_CLRFLAGS:
 		/* Set or clear specific file flags */
 		/* XXX This probably needs checks to ensure the flags are
 		 *     not abused, and to handle any flag side effects.
 		 */
-		if (get_user(flags, (int *) arg))
+		if (get_user(flags, (int __user *)arg))
 			return -EFAULT;
 
 		if (cmd == LL_IOC_SETFLAGS) {
@@ -2242,15 +2267,15 @@
 		struct file *file2;
 		struct lustre_swap_layouts lsl;
 
-		if (copy_from_user(&lsl, (char *)arg,
-				       sizeof(struct lustre_swap_layouts)))
+		if (copy_from_user(&lsl, (char __user *)arg,
+				   sizeof(struct lustre_swap_layouts)))
 			return -EFAULT;
 
 		if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
 			return -EPERM;
 
 		file2 = fget(lsl.sl_fd);
-		if (file2 == NULL)
+		if (!file2)
 			return -EBADF;
 
 		rc = -EPERM;
@@ -2272,13 +2297,13 @@
 		return ll_iocontrol(inode, file, cmd, arg);
 	case FSFILT_IOC_GETVERSION_OLD:
 	case FSFILT_IOC_GETVERSION:
-		return put_user(inode->i_generation, (int *)arg);
+		return put_user(inode->i_generation, (int __user *)arg);
 	case LL_IOC_GROUP_LOCK:
 		return ll_get_grouplock(inode, file, arg);
 	case LL_IOC_GROUP_UNLOCK:
 		return ll_put_grouplock(inode, file, arg);
 	case IOC_OBD_STATFS:
-		return ll_obd_statfs(inode, (void *)arg);
+		return ll_obd_statfs(inode, (void __user *)arg);
 
 	/* We need to special case any other ioctls we want to handle,
 	 * to send them to the MDS/OST as appropriate and to properly
@@ -2289,25 +2314,26 @@
 	case LL_IOC_FLUSHCTX:
 		return ll_flush_ctx(inode);
 	case LL_IOC_PATH2FID: {
-		if (copy_to_user((void *)arg, ll_inode2fid(inode),
+		if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
 				 sizeof(struct lu_fid)))
 			return -EFAULT;
 
 		return 0;
 	}
 	case OBD_IOC_FID2PATH:
-		return ll_fid2path(inode, (void *)arg);
+		return ll_fid2path(inode, (void __user *)arg);
 	case LL_IOC_DATA_VERSION: {
 		struct ioc_data_version	idv;
 		int			rc;
 
-		if (copy_from_user(&idv, (char *)arg, sizeof(idv)))
+		if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
 			return -EFAULT;
 
 		rc = ll_data_version(inode, &idv.idv_version,
 				!(idv.idv_flags & LL_DV_NOFLUSH));
 
-		if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv)))
+		if (rc == 0 && copy_to_user((char __user *)arg, &idv,
+					    sizeof(idv)))
 			return -EFAULT;
 
 		return rc;
@@ -2320,7 +2346,7 @@
 		if (mdtidx < 0)
 			return mdtidx;
 
-		if (put_user((int)mdtidx, (int *)arg))
+		if (put_user(mdtidx, (int __user *)arg))
 			return -EFAULT;
 
 		return 0;
@@ -2347,7 +2373,7 @@
 		rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
 				   op_data, NULL);
 
-		if (copy_to_user((void *)arg, hus, sizeof(*hus)))
+		if (copy_to_user((void __user *)arg, hus, sizeof(*hus)))
 			rc = -EFAULT;
 
 		ll_finish_md_op_data(op_data);
@@ -2358,7 +2384,7 @@
 		struct hsm_state_set	*hss;
 		int			 rc;
 
-		hss = memdup_user((char *)arg, sizeof(*hss));
+		hss = memdup_user((char __user *)arg, sizeof(*hss));
 		if (IS_ERR(hss))
 			return PTR_ERR(hss);
 
@@ -2386,7 +2412,7 @@
 		rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
 				   op_data, NULL);
 
-		if (copy_to_user((char *)arg, hca, sizeof(*hca)))
+		if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
 			rc = -EFAULT;
 
 		ll_finish_md_op_data(op_data);
@@ -2412,13 +2438,13 @@
 			break;
 		case F_UNLCK:
 			mutex_lock(&lli->lli_och_mutex);
-			if (fd->fd_lease_och != NULL) {
+			if (fd->fd_lease_och) {
 				och = fd->fd_lease_och;
 				fd->fd_lease_och = NULL;
 			}
 			mutex_unlock(&lli->lli_och_mutex);
 
-			if (och != NULL) {
+			if (och) {
 				mode = och->och_flags &
 				       (FMODE_READ|FMODE_WRITE);
 				rc = ll_lease_close(och, inode, &lease_broken);
@@ -2443,12 +2469,12 @@
 
 		rc = 0;
 		mutex_lock(&lli->lli_och_mutex);
-		if (fd->fd_lease_och == NULL) {
+		if (!fd->fd_lease_och) {
 			fd->fd_lease_och = och;
 			och = NULL;
 		}
 		mutex_unlock(&lli->lli_och_mutex);
-		if (och != NULL) {
+		if (och) {
 			/* impossible now that only excl is supported for now */
 			ll_lease_close(och, inode, &lease_broken);
 			rc = -EBUSY;
@@ -2461,11 +2487,11 @@
 
 		rc = 0;
 		mutex_lock(&lli->lli_och_mutex);
-		if (fd->fd_lease_och != NULL) {
+		if (fd->fd_lease_och) {
 			struct obd_client_handle *och = fd->fd_lease_och;
 
 			lock = ldlm_handle2lock(&och->och_lease_handle);
-			if (lock != NULL) {
+			if (lock) {
 				lock_res_and_lock(lock);
 				if (!ldlm_is_cancel(lock))
 					rc = och->och_flags &
@@ -2480,7 +2506,7 @@
 	case LL_IOC_HSM_IMPORT: {
 		struct hsm_user_import *hui;
 
-		hui = memdup_user((void *)arg, sizeof(*hui));
+		hui = memdup_user((void __user *)arg, sizeof(*hui));
 		if (IS_ERR(hui))
 			return PTR_ERR(hui);
 
@@ -2497,7 +2523,7 @@
 			return err;
 
 		return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
-				     (void *)arg);
+				     (void __user *)arg);
 	}
 	}
 }
@@ -2536,15 +2562,17 @@
 	LASSERT(!S_ISDIR(inode->i_mode));
 
 	/* catch async errors that were recorded back when async writeback
-	 * failed for pages in this mapping. */
+	 * failed for pages in this mapping.
+	 */
 	rc = lli->lli_async_rc;
 	lli->lli_async_rc = 0;
 	err = lov_read_and_clear_async_rc(lli->lli_clob);
 	if (rc == 0)
 		rc = err;
 
-	/* The application has been told write failure already.
-	 * Do not report failure again. */
+	/* The application has been told about write failure already.
+	 * Do not report failure again.
+	 */
 	if (fd->fd_write_failed)
 		return 0;
 	return rc ? -EIO : 0;
@@ -2612,7 +2640,8 @@
 	inode_lock(inode);
 
 	/* catch async errors that were recorded back when async writeback
-	 * failed for pages in this mapping. */
+	 * failed for pages in this mapping.
+	 */
 	if (!S_ISDIR(inode->i_mode)) {
 		err = lli->lli_async_rc;
 		lli->lli_async_rc = 0;
@@ -2683,7 +2712,8 @@
 	 * I guess between lockd processes) and then compares pid.
 	 * As such we assign pid to the owner field to make it all work,
 	 * conflict with normal locks is unlikely since pid space and
-	 * pointer space for current->files are not intersecting */
+	 * pointer space for current->files are not intersecting
+	 */
 	if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
 		flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
 
@@ -2699,7 +2729,8 @@
 		 * order to process an unlock request we need all of the same
 		 * information that is given with a normal read or write record
 		 * lock request. To avoid creating another ldlm unlock (cancel)
-		 * message we'll treat a LCK_NL flock request as an unlock. */
+		 * message we'll treat a LCK_NL flock request as an unlock.
+		 */
 		einfo.ei_mode = LCK_NL;
 		break;
 	case F_WRLCK:
@@ -2730,7 +2761,8 @@
 #endif
 		flags = LDLM_FL_TEST_LOCK;
 		/* Save the old mode so that if the mode in the lock changes we
-		 * can decrement the appropriate reader or writer refcount. */
+		 * can decrement the appropriate reader or writer refcount.
+		 */
 		file_lock->fl_type = einfo.ei_mode;
 		break;
 	default:
@@ -2782,11 +2814,12 @@
  * \param l_req_mode [IN] searched lock mode
  * \retval boolean, true iff all bits are found
  */
-int ll_have_md_lock(struct inode *inode, __u64 *bits,  ldlm_mode_t l_req_mode)
+int ll_have_md_lock(struct inode *inode, __u64 *bits,
+		    enum ldlm_mode l_req_mode)
 {
 	struct lustre_handle lockh;
 	ldlm_policy_data_t policy;
-	ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
+	enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
 				(LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
 	struct lu_fid *fid;
 	__u64 flags;
@@ -2822,13 +2855,13 @@
 	return *bits == 0;
 }
 
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
-			    struct lustre_handle *lockh, __u64 flags,
-			    ldlm_mode_t mode)
+enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
+			       struct lustre_handle *lockh, __u64 flags,
+			       enum ldlm_mode mode)
 {
 	ldlm_policy_data_t policy = { .l_inodebits = {bits} };
 	struct lu_fid *fid;
-	ldlm_mode_t rc;
+	enum ldlm_mode rc;
 
 	fid = &ll_i2info(inode)->lli_fid;
 	CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
@@ -2866,8 +2899,6 @@
 	struct obd_export *exp;
 	int rc = 0;
 
-	LASSERT(inode != NULL);
-
 	CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
 	       inode->i_ino, inode->i_generation, inode, dentry);
 
@@ -2875,7 +2906,8 @@
 
 	/* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
 	 *      But under CMD case, it caused some lock issues, should be fixed
-	 *      with new CMD ibits lock. See bug 12718 */
+	 *      with new CMD ibits lock. See bug 12718
+	 */
 	if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) {
 		struct lookup_intent oit = { .it_op = IT_GETATTR };
 		struct md_op_data *op_data;
@@ -2893,7 +2925,8 @@
 		oit.it_create_mode |= M_CHECK_STALE;
 		rc = md_intent_lock(exp, op_data, NULL, 0,
 				    /* we are not interested in name
-				       based lookup */
+				     * based lookup
+				     */
 				    &oit, 0, &req,
 				    ll_md_blocking_ast, 0);
 		ll_finish_md_op_data(op_data);
@@ -2910,9 +2943,10 @@
 		}
 
 		/* Unlinked? Unhash dentry, so it is not picked up later by
-		   do_lookup() -> ll_revalidate_it(). We cannot use d_drop
-		   here to preserve get_cwd functionality on 2.6.
-		   Bug 10503 */
+		 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
+		 * here to preserve get_cwd functionality on 2.6.
+		 * Bug 10503
+		 */
 		if (!d_inode(dentry)->i_nlink)
 			d_lustre_invalidate(dentry, 0);
 
@@ -3026,26 +3060,33 @@
 				       sizeof(struct ll_fiemap_extent));
 	fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
 
-	if (fiemap == NULL)
+	if (!fiemap)
 		return -ENOMEM;
 
 	fiemap->fm_flags = fieinfo->fi_flags;
 	fiemap->fm_extent_count = fieinfo->fi_extents_max;
 	fiemap->fm_start = start;
 	fiemap->fm_length = len;
-	if (extent_count > 0)
-		memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
-		       sizeof(struct ll_fiemap_extent));
+	if (extent_count > 0 &&
+	    copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
+			   sizeof(struct ll_fiemap_extent)) != 0) {
+		rc = -EFAULT;
+		goto out;
+	}
 
 	rc = ll_do_fiemap(inode, fiemap, num_bytes);
 
 	fieinfo->fi_flags = fiemap->fm_flags;
 	fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
-	if (extent_count > 0)
-		memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
-		       fiemap->fm_mapped_extents *
-		       sizeof(struct ll_fiemap_extent));
+	if (extent_count > 0 &&
+	    copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
+			 fiemap->fm_mapped_extents *
+			 sizeof(struct ll_fiemap_extent)) != 0) {
+		rc = -EFAULT;
+		goto out;
+	}
 
+out:
 	kvfree(fiemap);
 	return rc;
 }
@@ -3067,13 +3108,12 @@
 {
 	int rc = 0;
 
-#ifdef MAY_NOT_BLOCK
 	if (mask & MAY_NOT_BLOCK)
 		return -ECHILD;
-#endif
 
        /* as root inode are NOT getting validated in lookup operation,
-	* need to do it before permission check. */
+	* need to do it before permission check.
+	*/
 
 	if (is_root_inode(inode)) {
 		rc = __ll_inode_revalidate(inode->i_sb->s_root,
@@ -3173,8 +3213,7 @@
 	unsigned int size;
 	struct llioc_data *in_data = NULL;
 
-	if (cb == NULL || cmd == NULL ||
-	    count > LLIOC_MAX_CMD || count < 0)
+	if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0)
 		return NULL;
 
 	size = sizeof(*in_data) + count * sizeof(unsigned int);
@@ -3200,7 +3239,7 @@
 {
 	struct llioc_data *tmp;
 
-	if (magic == NULL)
+	if (!magic)
 		return;
 
 	down_write(&llioc.ioc_sem);
@@ -3254,7 +3293,7 @@
 	struct lu_env *env;
 	int result;
 
-	if (lli->lli_clob == NULL)
+	if (!lli->lli_clob)
 		return 0;
 
 	env = cl_env_nested_get(&nest);
@@ -3267,13 +3306,14 @@
 	if (conf->coc_opc == OBJECT_CONF_SET) {
 		struct ldlm_lock *lock = conf->coc_lock;
 
-		LASSERT(lock != NULL);
+		LASSERT(lock);
 		LASSERT(ldlm_has_layout(lock));
 		if (result == 0) {
 			/* it can only be allowed to match after layout is
 			 * applied to inode otherwise false layout would be
 			 * seen. Applying layout should happen before dropping
-			 * the intent lock. */
+			 * the intent lock.
+			 */
 			ldlm_lock_allow_match(lock);
 		}
 	}
@@ -3296,14 +3336,15 @@
 	       PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
 	       lock->l_lvb_data, lock->l_lvb_len);
 
-	if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
+	if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY))
 		return 0;
 
 	/* if layout lock was granted right away, the layout is returned
 	 * within DLM_LVB of dlm reply; otherwise if the lock was ever
 	 * blocked and then granted via completion ast, we have to fetch
 	 * layout here. Please note that we can't use the LVB buffer in
-	 * completion AST because it doesn't have a large enough buffer */
+	 * completion AST because it doesn't have a large enough buffer
+	 */
 	rc = ll_get_default_mdsize(sbi, &lmmsize);
 	if (rc == 0)
 		rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
@@ -3313,7 +3354,7 @@
 		return rc;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -3325,20 +3366,20 @@
 	}
 
 	lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
-	if (lmm == NULL) {
+	if (!lmm) {
 		rc = -EFAULT;
 		goto out;
 	}
 
 	lvbdata = libcfs_kvzalloc(lmmsize, GFP_NOFS);
-	if (lvbdata == NULL) {
+	if (!lvbdata) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	memcpy(lvbdata, lmm, lmmsize);
 	lock_res_and_lock(lock);
-	if (lock->l_lvb_data != NULL)
+	if (lock->l_lvb_data)
 		kvfree(lock->l_lvb_data);
 
 	lock->l_lvb_data = lvbdata;
@@ -3354,8 +3395,8 @@
  * Apply the layout to the inode. Layout lock is held and will be released
  * in this function.
  */
-static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
-				struct inode *inode, __u32 *gen, bool reconf)
+static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
+			      struct inode *inode, __u32 *gen, bool reconf)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct ll_sb_info    *sbi = ll_i2sbi(inode);
@@ -3369,7 +3410,7 @@
 	LASSERT(lustre_handle_is_used(lockh));
 
 	lock = ldlm_handle2lock(lockh);
-	LASSERT(lock != NULL);
+	LASSERT(lock);
 	LASSERT(ldlm_has_layout(lock));
 
 	LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n",
@@ -3382,12 +3423,14 @@
 	lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
 	unlock_res_and_lock(lock);
 	/* checking lvb_ready is racy but this is okay. The worst case is
-	 * that multi processes may configure the file on the same time. */
+	 * that multi processes may configure the file on the same time.
+	 */
 	if (lvb_ready || !reconf) {
 		rc = -ENODATA;
 		if (lvb_ready) {
 			/* layout_gen must be valid if layout lock is not
-			 * cancelled and stripe has already set */
+			 * cancelled and stripe has already set
+			 */
 			*gen = ll_layout_version_get(lli);
 			rc = 0;
 		}
@@ -3401,13 +3444,14 @@
 	/* for layout lock, lmm is returned in lock's lvb.
 	 * lvb_data is immutable if the lock is held so it's safe to access it
 	 * without res lock. See the description in ldlm_lock_decref_internal()
-	 * for the condition to free lvb_data of layout lock */
-	if (lock->l_lvb_data != NULL) {
+	 * for the condition to free lvb_data of layout lock
+	 */
+	if (lock->l_lvb_data) {
 		rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
 				  lock->l_lvb_data, lock->l_lvb_len);
 		if (rc >= 0) {
 			*gen = LL_LAYOUT_GEN_EMPTY;
-			if (md.lsm != NULL)
+			if (md.lsm)
 				*gen = md.lsm->lsm_layout_gen;
 			rc = 0;
 		} else {
@@ -3420,7 +3464,8 @@
 		goto out;
 
 	/* set layout to file. Unlikely this will fail as old layout was
-	 * surely eliminated */
+	 * surely eliminated
+	 */
 	memset(&conf, 0, sizeof(conf));
 	conf.coc_opc = OBJECT_CONF_SET;
 	conf.coc_inode = inode;
@@ -3428,7 +3473,7 @@
 	conf.u.coc_md = &md;
 	rc = ll_layout_conf(inode, &conf);
 
-	if (md.lsm != NULL)
+	if (md.lsm)
 		obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
 
 	/* refresh layout failed, need to wait */
@@ -3477,7 +3522,7 @@
 	struct md_op_data     *op_data;
 	struct lookup_intent   it;
 	struct lustre_handle   lockh;
-	ldlm_mode_t	       mode;
+	enum ldlm_mode	       mode;
 	struct ldlm_enqueue_info einfo = {
 		.ei_type = LDLM_IBITS,
 		.ei_mode = LCK_CR,
@@ -3499,7 +3544,8 @@
 
 again:
 	/* mostly layout lock is caching on the local side, so try to match
-	 * it before grabbing layout lock mutex. */
+	 * it before grabbing layout lock mutex.
+	 */
 	mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
 			       LCK_CR | LCK_CW | LCK_PR | LCK_PW);
 	if (mode != 0) { /* hit cached lock */
@@ -3529,8 +3575,7 @@
 
 	rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
 			NULL, 0, NULL, 0);
-	if (it.d.lustre.it_data != NULL)
-		ptlrpc_req_finished(it.d.lustre.it_data);
+	ptlrpc_req_finished(it.d.lustre.it_data);
 	it.d.lustre.it_data = NULL;
 
 	ll_finish_md_op_data(op_data);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 3f348a3..fcb6657 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -52,7 +52,7 @@
 
 	spin_lock(&lli->lli_lock);
 	lli->lli_flags |= LLIF_SOM_DIRTY;
-	if (page != NULL && list_empty(&page->cpg_pending_linkage))
+	if (page && list_empty(&page->cpg_pending_linkage))
 		list_add(&page->cpg_pending_linkage,
 			     &club->cob_pending_list);
 	spin_unlock(&lli->lli_lock);
@@ -65,7 +65,7 @@
 	int rc = 0;
 
 	spin_lock(&lli->lli_lock);
-	if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
+	if (page && !list_empty(&page->cpg_pending_linkage)) {
 		list_del_init(&page->cpg_pending_linkage);
 		rc = 1;
 	}
@@ -76,7 +76,8 @@
 
 /** Queues DONE_WRITING if
  * - done writing is allowed;
- * - inode has no no dirty pages; */
+ * - inode has no no dirty pages;
+ */
 void ll_queue_done_writing(struct inode *inode, unsigned long flags)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
@@ -106,7 +107,8 @@
 		 * close() happen, epoch is closed as the inode is marked as
 		 * LLIF_EPOCH_PENDING. When pages are written inode should not
 		 * be inserted into the queue again, clear this flag to avoid
-		 * it. */
+		 * it.
+		 */
 		lli->lli_flags &= ~LLIF_DONE_WRITING;
 
 		wake_up(&lcq->lcq_waitq);
@@ -144,10 +146,11 @@
 	spin_lock(&lli->lli_lock);
 	if (!(list_empty(&club->cob_pending_list))) {
 		if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
-			LASSERT(*och != NULL);
-			LASSERT(lli->lli_pending_och == NULL);
+			LASSERT(*och);
+			LASSERT(!lli->lli_pending_och);
 			/* Inode is dirty and there is no pending write done
-			 * request yet, DONE_WRITE is to be sent later. */
+			 * request yet, DONE_WRITE is to be sent later.
+			 */
 			lli->lli_flags |= LLIF_EPOCH_PENDING;
 			lli->lli_pending_och = *och;
 			spin_unlock(&lli->lli_lock);
@@ -159,7 +162,8 @@
 		if (flags & LLIF_DONE_WRITING) {
 			/* Some pages are still dirty, it is early to send
 			 * DONE_WRITE. Wait until all pages will be flushed
-			 * and try DONE_WRITE again later. */
+			 * and try DONE_WRITE again later.
+			 */
 			LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
 			lli->lli_flags |= LLIF_DONE_WRITING;
 			spin_unlock(&lli->lli_lock);
@@ -187,7 +191,8 @@
 		}
 
 		/* There is a pending DONE_WRITE -- close epoch with no
-		 * attribute change. */
+		 * attribute change.
+		 */
 		if (lli->lli_flags & LLIF_EPOCH_PENDING) {
 			spin_unlock(&lli->lli_lock);
 			goto out;
@@ -215,7 +220,7 @@
 	struct obdo *oa;
 	int rc;
 
-	LASSERT(op_data != NULL);
+	LASSERT(op_data);
 	if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
 		CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
 		       inode->i_ino, inode->i_generation,
@@ -266,7 +271,7 @@
 {
 	ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
 	/* If there is no @och, we do not do D_W yet. */
-	if (*och == NULL)
+	if (!*och)
 		return;
 
 	ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
@@ -289,13 +294,14 @@
 
 	ll_prepare_done_writing(inode, op_data, &och);
 	/* If there is no @och, we do not do D_W yet. */
-	if (och == NULL)
+	if (!och)
 		goto out;
 
 	rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
 	if (rc == -EAGAIN)
 		/* MDS has instructed us to obtain Size-on-MDS attribute from
-		 * OSTs and send setattr to back to MDS. */
+		 * OSTs and send setattr to back to MDS.
+		 */
 		rc = ll_som_update(inode, op_data);
 	else if (rc)
 		CERROR("inode %lu mdc done_writing failed: rc = %d\n",
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 845e992..f6921ff 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -93,9 +93,10 @@
 	gid_t		   lrp_gid;
 	uid_t		   lrp_fsuid;
 	gid_t		   lrp_fsgid;
-	int		     lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
-						    is access permission with
-						    lrp_fsuid/lrp_fsgid. */
+	int		   lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
+					     * is access permission with
+					     * lrp_fsuid/lrp_fsgid.
+					     */
 };
 
 enum lli_flags {
@@ -106,7 +107,8 @@
 	/* DONE WRITING is allowed. */
 	LLIF_DONE_WRITING       = (1 << 2),
 	/* Sizeon-on-MDS attributes are changed. An attribute update needs to
-	 * be sent to MDS. */
+	 * be sent to MDS.
+	 */
 	LLIF_SOM_DIRTY	  = (1 << 3),
 	/* File data is modified. */
 	LLIF_DATA_MODIFIED      = (1 << 4),
@@ -130,22 +132,23 @@
 	/* identifying fields for both metadata and data stacks. */
 	struct lu_fid		   lli_fid;
 	/* Parent fid for accessing default stripe data on parent directory
-	 * for allocating OST objects after a mknod() and later open-by-FID. */
+	 * for allocating OST objects after a mknod() and later open-by-FID.
+	 */
 	struct lu_fid		   lli_pfid;
 
-	struct list_head		      lli_close_list;
-	/* open count currently used by capability only, indicate whether
-	 * capability needs renewal */
-	atomic_t		    lli_open_count;
+	struct list_head	      lli_close_list;
+
 	unsigned long		      lli_rmtperm_time;
 
 	/* handle is to be sent to MDS later on done_writing and setattr.
 	 * Open handle data are needed for the recovery to reconstruct
-	 * the inode state on the MDS. XXX: recovery is not ready yet. */
+	 * the inode state on the MDS. XXX: recovery is not ready yet.
+	 */
 	struct obd_client_handle       *lli_pending_och;
 
 	/* We need all three because every inode may be opened in different
-	 * modes */
+	 * modes
+	 */
 	struct obd_client_handle       *lli_mds_read_och;
 	struct obd_client_handle       *lli_mds_write_och;
 	struct obd_client_handle       *lli_mds_exec_och;
@@ -162,7 +165,8 @@
 	spinlock_t			lli_agl_lock;
 
 	/* Try to make the d::member and f::member are aligned. Before using
-	 * these members, make clear whether it is directory or not. */
+	 * these members, make clear whether it is directory or not.
+	 */
 	union {
 		/* for directory */
 		struct {
@@ -173,13 +177,15 @@
 			/* since parent-child threads can share the same @file
 			 * struct, "opendir_key" is the token when dir close for
 			 * case of parent exit before child -- it is me should
-			 * cleanup the dir readahead. */
+			 * cleanup the dir readahead.
+			 */
 			void			   *d_opendir_key;
 			struct ll_statahead_info       *d_sai;
 			/* protect statahead stuff. */
 			spinlock_t			d_sa_lock;
-			/* "opendir_pid" is the token when lookup/revalid
-			 * -- I am the owner of dir statahead. */
+			/* "opendir_pid" is the token when lookup/revalidate
+			 * -- I am the owner of dir statahead.
+			 */
 			pid_t			   d_opendir_pid;
 		} d;
 
@@ -305,7 +311,8 @@
 }
 
 /* default to about 40meg of readahead on a given system.  That much tied
- * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
+ * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
+ */
 #define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
 
 /* default to read-ahead full files smaller than 2MB on the second read */
@@ -344,11 +351,13 @@
 	unsigned long ria_end;    /* end offset of read-ahead*/
 	/* If stride read pattern is detected, ria_stoff means where
 	 * stride read is started. Note: for normal read-ahead, the
-	 * value here is meaningless, and also it will not be accessed*/
+	 * value here is meaningless, and also it will not be accessed
+	 */
 	pgoff_t ria_stoff;
 	/* ria_length and ria_pages are the length and pages length in the
 	 * stride I/O mode. And they will also be used to check whether
-	 * it is stride I/O read-ahead in the read-ahead pages*/
+	 * it is stride I/O read-ahead in the read-ahead pages
+	 */
 	unsigned long ria_length;
 	unsigned long ria_pages;
 };
@@ -455,7 +464,8 @@
 
 struct ll_sb_info {
 	/* this protects pglist and ra_info.  It isn't safe to
-	 * grab from interrupt contexts */
+	 * grab from interrupt contexts
+	 */
 	spinlock_t		  ll_lock;
 	spinlock_t		  ll_pp_extent_lock; /* pp_extent entry*/
 	spinlock_t		  ll_process_lock; /* ll_rw_process_info */
@@ -468,10 +478,8 @@
 	int		       ll_flags;
 	unsigned int		  ll_umounting:1,
 				  ll_xattr_cache_enabled:1;
-	struct list_head		ll_conn_chain; /* per-conn chain of SBs */
 	struct lustre_client_ocd  ll_lco;
 
-	struct list_head		ll_orphan_dentry_list; /*please don't ask -p*/
 	struct ll_close_queue    *ll_lcq;
 
 	struct lprocfs_stats     *ll_stats; /* lprocfs stats counter */
@@ -502,13 +510,16 @@
 	/* metadata stat-ahead */
 	unsigned int	      ll_sa_max;     /* max statahead RPCs */
 	atomic_t		  ll_sa_total;   /* statahead thread started
-						  * count */
+						  * count
+						  */
 	atomic_t		  ll_sa_wrong;   /* statahead thread stopped for
-						  * low hit ratio */
+						  * low hit ratio
+						  */
 	atomic_t		  ll_agl_total;  /* AGL thread started count */
 
-	dev_t		     ll_sdev_orig; /* save s_dev before assign for
-						 * clustered nfs */
+	dev_t			  ll_sdev_orig; /* save s_dev before assign for
+						 * clustered nfs
+						 */
 	struct rmtacl_ctl_table   ll_rct;
 	struct eacl_table	 ll_et;
 	__kernel_fsid_t		  ll_fsid;
@@ -619,13 +630,15 @@
 	__u32 fd_flags;
 	fmode_t fd_omode;
 	/* openhandle if lease exists for this file.
-	 * Borrow lli->lli_och_mutex to protect assignment */
+	 * Borrow lli->lli_och_mutex to protect assignment
+	 */
 	struct obd_client_handle *fd_lease_och;
 	struct obd_client_handle *fd_och;
 	struct file *fd_file;
 	/* Indicate whether need to report failure when close.
 	 * true: failure is known, not report again.
-	 * false: unknown failure, should report. */
+	 * false: unknown failure, should report.
+	 */
 	bool fd_write_failed;
 };
 
@@ -705,10 +718,10 @@
 extern struct file_operations ll_file_operations_noflock;
 extern const struct inode_operations ll_file_inode_operations;
 int ll_have_md_lock(struct inode *inode, __u64 *bits,
-		    ldlm_mode_t l_req_mode);
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
-			    struct lustre_handle *lockh, __u64 flags,
-			    ldlm_mode_t mode);
+		    enum ldlm_mode l_req_mode);
+enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
+			       struct lustre_handle *lockh, __u64 flags,
+			       enum ldlm_mode mode);
 int ll_file_open(struct inode *inode, struct file *file);
 int ll_file_release(struct inode *inode, struct file *file);
 int ll_glimpse_ioctl(struct ll_sb_info *sbi,
@@ -782,7 +795,7 @@
 void ll_dirty_page_discard_warn(struct page *page, int ioret);
 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
 		  struct super_block *, struct lookup_intent *);
-int ll_obd_statfs(struct inode *inode, void *arg);
+int ll_obd_statfs(struct inode *inode, void __user *arg);
 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
 int ll_process_config(struct lustre_cfg *lcfg);
@@ -796,7 +809,7 @@
 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
 
 /* llite/llite_nfs.c */
-extern struct export_operations lustre_export_operations;
+extern const struct export_operations lustre_export_operations;
 __u32 get_uuid2int(const char *name, int len);
 void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
 struct inode *search_inode_for_lustre(struct super_block *sb,
@@ -913,7 +926,7 @@
 	struct vvp_thread_info      *info;
 
 	info = lu_context_key_get(&env->le_ctx, &vvp_key);
-	LASSERT(info != NULL);
+	LASSERT(info);
 	return info;
 }
 
@@ -937,7 +950,7 @@
 	struct vvp_session *ses;
 
 	ses = lu_context_key_get(env->le_ses, &vvp_session_key);
-	LASSERT(ses != NULL);
+	LASSERT(ses);
 	return ses;
 }
 
@@ -968,7 +981,7 @@
 	loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
 
 	LASSERT(PageLocked(vmpage));
-	if (mapping == NULL)
+	if (!mapping)
 		return;
 
 	ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
@@ -993,7 +1006,7 @@
 {
 	struct obd_device *obd = sbi->ll_md_exp->exp_obd;
 
-	if (obd == NULL)
+	if (!obd)
 		LBUG();
 	return &obd->u.cli;
 }
@@ -1018,7 +1031,7 @@
 {
 	struct lu_fid *fid;
 
-	LASSERT(inode != NULL);
+	LASSERT(inode);
 	fid = &ll_i2info(inode)->lli_fid;
 
 	return fid;
@@ -1107,39 +1120,44 @@
 struct ll_statahead_info {
 	struct inode	   *sai_inode;
 	atomic_t	    sai_refcount;   /* when access this struct, hold
-						 * refcount */
+					     * refcount
+					     */
 	unsigned int	    sai_generation; /* generation for statahead */
 	unsigned int	    sai_max;	/* max ahead of lookup */
 	__u64		   sai_sent;       /* stat requests sent count */
 	__u64		   sai_replied;    /* stat requests which received
-						 * reply */
+					    * reply
+					    */
 	__u64		   sai_index;      /* index of statahead entry */
 	__u64		   sai_index_wait; /* index of entry which is the
-						 * caller is waiting for */
+					    * caller is waiting for
+					    */
 	__u64		   sai_hit;	/* hit count */
 	__u64		   sai_miss;       /* miss count:
-						 * for "ls -al" case, it includes
-						 * hidden dentry miss;
-						 * for "ls -l" case, it does not
-						 * include hidden dentry miss.
-						 * "sai_miss_hidden" is used for
-						 * the later case.
-						 */
+					    * for "ls -al" case, it includes
+					    * hidden dentry miss;
+					    * for "ls -l" case, it does not
+					    * include hidden dentry miss.
+					    * "sai_miss_hidden" is used for
+					    * the later case.
+					    */
 	unsigned int	    sai_consecutive_miss; /* consecutive miss */
 	unsigned int	    sai_miss_hidden;/* "ls -al", but first dentry
-						 * is not a hidden one */
+					     * is not a hidden one
+					     */
 	unsigned int	    sai_skip_hidden;/* skipped hidden dentry count */
 	unsigned int	    sai_ls_all:1,   /* "ls -al", do stat-ahead for
-						 * hidden entries */
+					     * hidden entries
+					     */
 				sai_agl_valid:1;/* AGL is valid for the dir */
-	wait_queue_head_t	     sai_waitq;      /* stat-ahead wait queue */
+	wait_queue_head_t	sai_waitq;      /* stat-ahead wait queue */
 	struct ptlrpc_thread    sai_thread;     /* stat-ahead thread */
 	struct ptlrpc_thread    sai_agl_thread; /* AGL thread */
-	struct list_head	      sai_entries;    /* entry list */
-	struct list_head	      sai_entries_received; /* entries returned */
-	struct list_head	      sai_entries_stated;   /* entries stated */
-	struct list_head	      sai_entries_agl; /* AGL entries to be sent */
-	struct list_head	      sai_cache[LL_SA_CACHE_SIZE];
+	struct list_head	sai_entries;    /* entry list */
+	struct list_head	sai_entries_received; /* entries returned */
+	struct list_head	sai_entries_stated;   /* entries stated */
+	struct list_head	sai_entries_agl; /* AGL entries to be sent */
+	struct list_head	sai_cache[LL_SA_CACHE_SIZE];
 	spinlock_t		sai_cache_lock[LL_SA_CACHE_SIZE];
 	atomic_t		sai_cache_count; /* entry count in cache */
 };
@@ -1171,8 +1189,8 @@
 	if (lli->lli_opendir_pid != current_pid())
 		return;
 
-	LASSERT(ldd != NULL);
-	if (sai != NULL)
+	LASSERT(ldd);
+	if (sai)
 		ldd->lld_sa_generation = sai->sai_generation;
 }
 
@@ -1191,7 +1209,7 @@
 		return -EAGAIN;
 
 	/* statahead has been stopped */
-	if (lli->lli_opendir_key == NULL)
+	if (!lli->lli_opendir_key)
 		return -EAGAIN;
 
 	ldd = ll_d2d(dentryp);
@@ -1313,13 +1331,15 @@
 /** direct write pages */
 struct ll_dio_pages {
 	/** page array to be written. we don't support
-	 * partial pages except the last one. */
+	 * partial pages except the last one.
+	 */
 	struct page **ldp_pages;
 	/* offset of each page */
 	loff_t       *ldp_offsets;
 	/** if ldp_offsets is NULL, it means a sequential
 	 * pages to be written, then this is the file offset
-	 * of the * first page. */
+	 * of the first page.
+	 */
 	loff_t	ldp_start_offset;
 	/** how many bytes are to be written. */
 	size_t	ldp_size;
@@ -1345,7 +1365,6 @@
 	struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 	struct inode *inode = file_inode(file);
 
-	LASSERT(fd != NULL);
 	return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
 		(ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
 }
@@ -1362,7 +1381,8 @@
 		 * remote MDT, where the object is, will grant
 		 * UPDATE|PERM lock. The inode will be attached to both
 		 * LOOKUP and PERM locks, so revoking either locks will
-		 * case the dcache being cleared */
+		 * case the dcache being cleared
+		 */
 		if (it->d.lustre.it_remote_lock_mode) {
 			handle.cookie = it->d.lustre.it_remote_lock_handle;
 			CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
@@ -1383,7 +1403,7 @@
 		it->d.lustre.it_lock_set = 1;
 	}
 
-	if (bits != NULL)
+	if (bits)
 		*bits = it->d.lustre.it_lock_bits;
 }
 
@@ -1401,14 +1421,14 @@
 {
 	struct ll_dentry_data *lld = ll_d2d(dentry);
 
-	return (lld == NULL) || lld->lld_invalid;
+	return !lld || lld->lld_invalid;
 }
 
 static inline void __d_lustre_invalidate(struct dentry *dentry)
 {
 	struct ll_dentry_data *lld = ll_d2d(dentry);
 
-	if (lld != NULL)
+	if (lld)
 		lld->lld_invalid = 1;
 }
 
@@ -1442,7 +1462,7 @@
 static inline void d_lustre_revalidate(struct dentry *dentry)
 {
 	spin_lock(&dentry->d_lock);
-	LASSERT(ll_d2d(dentry) != NULL);
+	LASSERT(ll_d2d(dentry));
 	ll_d2d(dentry)->lld_invalid = 0;
 	spin_unlock(&dentry->d_lock);
 }
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b2fc5b3..1f3e8e8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -102,8 +102,6 @@
 	sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
 	sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
 					   SBI_DEFAULT_READAHEAD_WHOLE_MAX;
-	INIT_LIST_HEAD(&sbi->ll_conn_chain);
-	INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
 
 	ll_generate_random_uuid(uuid);
 	class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
@@ -171,7 +169,7 @@
 		return -ENOMEM;
 	}
 
-	if (llite_root != NULL) {
+	if (llite_root) {
 		err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
 		if (err < 0)
 			CERROR("could not register mount in <debugfs>/lustre/llite\n");
@@ -204,7 +202,8 @@
 
 	if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
 		/* flag mdc connection as lightweight, only used for test
-		 * purpose, use with care */
+		 * purpose, use with care
+		 */
 		data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
 
 	data->ocd_ibits_known = MDS_INODELOCK_FULL;
@@ -252,7 +251,8 @@
 
 	/* For mount, we only need fs info from MDT0, and also in DNE, it
 	 * can make sure the client can be mounted as long as MDT0 is
-	 * available */
+	 * available
+	 */
 	err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
 			cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
 			OBD_STATFS_FOR_MDT0);
@@ -265,7 +265,8 @@
 	 * we can access the MDC export directly and exp_connect_flags will
 	 * be non-zero, but if accessing an upgraded 2.1 server it will
 	 * have the correct flags filled in.
-	 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
+	 * XXX: fill in the LMV exp_connect_flags from MDC(s).
+	 */
 	valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
 	if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
 	    valid != CLIENT_CONNECT_MDT_REQD) {
@@ -382,7 +383,8 @@
 		/* OBD_CONNECT_CKSUM should always be set, even if checksums are
 		 * disabled by default, because it can still be enabled on the
 		 * fly via /sys. As a consequence, we still need to come to an
-		 * agreement on the supported algorithms at connect time */
+		 * agreement on the supported algorithms at connect time
+		 */
 		data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
 
 		if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
@@ -453,7 +455,8 @@
 #endif
 
 	/* make root inode
-	 * XXX: move this to after cbd setup? */
+	 * XXX: move this to after cbd setup?
+	 */
 	valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
 	if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
 		valid |= OBD_MD_FLRMTPERM;
@@ -493,7 +496,7 @@
 	md_free_lustre_md(sbi->ll_md_exp, &lmd);
 	ptlrpc_req_finished(request);
 
-	if (root == NULL || IS_ERR(root)) {
+	if (!(root)) {
 		if (lmd.lsm)
 			obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
 #ifdef CONFIG_FS_POSIX_ACL
@@ -502,8 +505,7 @@
 			lmd.posix_acl = NULL;
 		}
 #endif
-		err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
-		root = NULL;
+		err = -EBADF;
 		CERROR("lustre_lite: bad iget4 for root\n");
 		goto out_root;
 	}
@@ -532,7 +534,7 @@
 				 &sbi->ll_cache, NULL);
 
 	sb->s_root = d_make_root(root);
-	if (sb->s_root == NULL) {
+	if (!sb->s_root) {
 		CERROR("%s: can't make root dentry\n",
 			ll_get_fsname(sb, NULL, 0));
 		err = -ENOMEM;
@@ -543,11 +545,13 @@
 
 	/* We set sb->s_dev equal on all lustre clients in order to support
 	 * NFS export clustering.  NFSD requires that the FSID be the same
-	 * on all clients. */
+	 * on all clients.
+	 */
 	/* s_dev is also used in lt_compare() to compare two fs, but that is
-	 * only a node-local comparison. */
+	 * only a node-local comparison.
+	 */
 	uuid = obd_get_uuid(sbi->ll_md_exp);
-	if (uuid != NULL) {
+	if (uuid) {
 		sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
 		get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
 	}
@@ -619,13 +623,12 @@
 
 	cl_sb_fini(sb);
 
-	list_del(&sbi->ll_conn_chain);
-
 	obd_fid_fini(sbi->ll_dt_exp->exp_obd);
 	obd_disconnect(sbi->ll_dt_exp);
 	sbi->ll_dt_exp = NULL;
 	/* wait till all OSCs are gone, since cl_cache is accessing sbi.
-	 * see LU-2543. */
+	 * see LU-2543.
+	 */
 	obd_zombie_barrier();
 
 	ldebugfs_unregister_mountpoint(sbi);
@@ -646,7 +649,8 @@
 	sbi = ll_s2sbi(sb);
 	/* we need to restore s_dev from changed for clustered NFS before
 	 * put_super because new kernels have cached s_dev and change sb->s_dev
-	 * in put_super not affected real removing devices */
+	 * in put_super not affected real removing devices
+	 */
 	if (sbi) {
 		sb->s_dev = sbi->ll_sdev_orig;
 		sbi->ll_umounting = 1;
@@ -777,7 +781,7 @@
 next:
 		/* Find next opt */
 		s2 = strchr(s1, ',');
-		if (s2 == NULL)
+		if (!s2)
 			break;
 		s1 = s2 + 1;
 	}
@@ -797,7 +801,6 @@
 	/* Do not set lli_fid, it has been initialized already. */
 	fid_zero(&lli->lli_pfid);
 	INIT_LIST_HEAD(&lli->lli_close_list);
-	atomic_set(&lli->lli_open_count, 0);
 	lli->lli_rmtperm_time = 0;
 	lli->lli_pending_och = NULL;
 	lli->lli_mds_read_och = NULL;
@@ -890,8 +893,9 @@
 	sb->s_d_op = &ll_d_ops;
 
 	/* Generate a string unique to this super, in case some joker tries
-	   to mount the same fs at two mount points.
-	   Use the address of the super itself.*/
+	 * to mount the same fs at two mount points.
+	 * Use the address of the super itself.
+	 */
 	cfg->cfg_instance = sb;
 	cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
 	cfg->cfg_callback = class_config_llog_handler;
@@ -904,7 +908,7 @@
 
 	/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
 	lprof = class_get_profile(profilenm);
-	if (lprof == NULL) {
+	if (!lprof) {
 		LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS.  Does that filesystem exist?\n",
 				   profilenm);
 		err = -EINVAL;
@@ -964,7 +968,8 @@
 	}
 
 	/* We need to set force before the lov_disconnect in
-	   lustre_common_put_super, since l_d cleans up osc's as well. */
+	 * lustre_common_put_super, since l_d cleans up osc's as well.
+	 */
 	if (force) {
 		next = 0;
 		while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
@@ -1036,8 +1041,8 @@
 
 	if (S_ISDIR(inode->i_mode)) {
 		/* these should have been cleared in ll_file_release */
-		LASSERT(lli->lli_opendir_key == NULL);
-		LASSERT(lli->lli_sai == NULL);
+		LASSERT(!lli->lli_opendir_key);
+		LASSERT(!lli->lli_sai);
 		LASSERT(lli->lli_opendir_pid == 0);
 	}
 
@@ -1065,7 +1070,7 @@
 	ll_xattr_cache_destroy(inode);
 
 	if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-		LASSERT(lli->lli_posix_acl == NULL);
+		LASSERT(!lli->lli_posix_acl);
 		if (lli->lli_remote_perms) {
 			free_rmtperm_hash(lli->lli_remote_perms);
 			lli->lli_remote_perms = NULL;
@@ -1074,7 +1079,7 @@
 #ifdef CONFIG_FS_POSIX_ACL
 	else if (lli->lli_posix_acl) {
 		LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
-		LASSERT(lli->lli_remote_perms == NULL);
+		LASSERT(!lli->lli_remote_perms);
 		posix_acl_release(lli->lli_posix_acl);
 		lli->lli_posix_acl = NULL;
 	}
@@ -1115,7 +1120,8 @@
 		if (rc == -ENOENT) {
 			clear_nlink(inode);
 			/* Unlinked special device node? Or just a race?
-			 * Pretend we done everything. */
+			 * Pretend we did everything.
+			 */
 			if (!S_ISREG(inode->i_mode) &&
 			    !S_ISDIR(inode->i_mode)) {
 				ia_valid = op_data->op_attr.ia_valid;
@@ -1138,7 +1144,8 @@
 
 	ia_valid = op_data->op_attr.ia_valid;
 	/* inode size will be in cl_setattr_ost, can't do it now since dirty
-	 * cache is not cleared yet. */
+	 * cache is not cleared yet.
+	 */
 	op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
 	rc = simple_setattr(dentry, &op_data->op_attr);
 	op_data->op_attr.ia_valid = ia_valid;
@@ -1161,7 +1168,6 @@
 	struct ll_inode_info *lli = ll_i2info(inode);
 	int rc = 0;
 
-	LASSERT(op_data != NULL);
 	if (!S_ISREG(inode->i_mode))
 		return 0;
 
@@ -1175,7 +1181,8 @@
 	rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
 	if (rc == -EAGAIN)
 		/* MDS has instructed us to obtain Size-on-MDS attribute
-		 * from OSTs and send setattr to back to MDS. */
+		 * from OSTs and send setattr to back to MDS.
+		 */
 		rc = ll_som_update(inode, op_data);
 	else if (rc)
 		CERROR("inode %lu mdc truncate failed: rc = %d\n",
@@ -1222,7 +1229,8 @@
 
 		/* The maximum Lustre file size is variable, based on the
 		 * OST maximum object size and number of stripes.  This
-		 * needs another check in addition to the VFS check above. */
+		 * needs another check in addition to the VFS check above.
+		 */
 		if (attr->ia_size > ll_file_maxbytes(inode)) {
 			CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
 			       PFID(&lli->lli_fid), attr->ia_size,
@@ -1270,7 +1278,8 @@
 	}
 
 	/* We always do an MDS RPC, even if we're only changing the size;
-	 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
+	 * only the MDS knows whether truncate() should fail with -ETXTBUSY
+	 */
 
 	op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
 	if (!op_data)
@@ -1304,7 +1313,8 @@
 	/* if not in HSM import mode, clear size attr for released file
 	 * we clear the attribute send to MDT in op_data, not the original
 	 * received from caller in attr which is used later to
-	 * decide return code */
+	 * decide return code
+	 */
 	if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
 		op_data->op_attr.ia_valid &= ~ATTR_SIZE;
 
@@ -1342,7 +1352,8 @@
 		 * extent lock (new_size:EOF for truncate).  It may seem
 		 * excessive to send mtime/atime updates to OSTs when not
 		 * setting times to past, but it is necessary due to possible
-		 * time de-synchronization between MDT inode and OST objects */
+		 * time de-synchronization between MDT inode and OST objects
+		 */
 		if (attr->ia_valid & ATTR_SIZE)
 			down_write(&lli->lli_trunc_sem);
 		rc = cl_setattr_ost(inode, attr);
@@ -1470,7 +1481,8 @@
 	/* We need to downshift for all 32-bit kernels, because we can't
 	 * tell if the kernel is being called via sys_statfs64() or not.
 	 * Stop before overflowing f_bsize - in which case it is better
-	 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
+	 * to just risk EOVERFLOW if caller is using old sys_statfs().
+	 */
 	if (sizeof(long) < 8) {
 		while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
 			sfs->f_bsize <<= 1;
@@ -1514,7 +1526,7 @@
 	struct ll_sb_info *sbi = ll_i2sbi(inode);
 
 	LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
-	if (lsm != NULL) {
+	if (lsm) {
 		if (!lli->lli_has_smd &&
 		    !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
 			cl_file_inode_init(inode, md);
@@ -1599,12 +1611,13 @@
 		if (exp_connect_som(ll_i2mdexp(inode)) &&
 		    S_ISREG(inode->i_mode)) {
 			struct lustre_handle lockh;
-			ldlm_mode_t mode;
+			enum ldlm_mode mode;
 
 			/* As it is possible a blocking ast has been processed
 			 * by this time, we need to check there is an UPDATE
 			 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
-			 * it. */
+			 * it.
+			 */
 			mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
 					       &lockh, LDLM_FL_CBPENDING,
 					       LCK_CR | LCK_CW |
@@ -1617,7 +1630,8 @@
 					       inode->i_ino, lli->lli_flags);
 				} else {
 					/* Use old size assignment to avoid
-					 * deadlock bz14138 & bz14326 */
+					 * deadlock bz14138 & bz14326
+					 */
 					i_size_write(inode, body->size);
 					spin_lock(&lli->lli_lock);
 					lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
@@ -1627,7 +1641,8 @@
 			}
 		} else {
 			/* Use old size assignment to avoid
-			 * deadlock bz14138 & bz14326 */
+			 * deadlock bz14138 & bz14326
+			 */
 			i_size_write(inode, body->size);
 
 			CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
@@ -1657,7 +1672,8 @@
 	/* Core attributes from the MDS first.  This is a new inode, and
 	 * the VFS doesn't zero times in the core inode so we have to do
 	 * it ourselves.  They will be overwritten by either MDS or OST
-	 * attributes - we just need to make sure they aren't newer. */
+	 * attributes - we just need to make sure they aren't newer.
+	 */
 	LTIME_S(inode->i_mtime) = 0;
 	LTIME_S(inode->i_atime) = 0;
 	LTIME_S(inode->i_ctime) = 0;
@@ -1689,9 +1705,10 @@
 {
 	struct cl_inode_info *lli = cl_i2info(inode);
 
-	if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
+	if (S_ISREG(inode->i_mode) && lli->lli_clob)
 		/* discard all dirty pages before truncating them, required by
-		 * osc_extent implementation at LU-1030. */
+		 * osc_extent implementation at LU-1030.
+		 */
 		cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
 				   CL_FSYNC_DISCARD, 1);
 
@@ -1744,14 +1761,14 @@
 
 		ptlrpc_req_finished(req);
 
-		return put_user(flags, (int *)arg);
+		return put_user(flags, (int __user *)arg);
 	}
 	case FSFILT_IOC_SETFLAGS: {
 		struct lov_stripe_md *lsm;
 		struct obd_info oinfo = { };
 		struct md_op_data *op_data;
 
-		if (get_user(flags, (int *)arg))
+		if (get_user(flags, (int __user *)arg))
 			return -EFAULT;
 
 		op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
@@ -1831,7 +1848,7 @@
 	       sb->s_count, atomic_read(&sb->s_active));
 
 	obd = class_exp2obd(sbi->ll_md_exp);
-	if (obd == NULL) {
+	if (!obd) {
 		CERROR("Invalid MDC connection handle %#llx\n",
 		       sbi->ll_md_exp->exp_handle.h_cookie);
 		return;
@@ -1839,7 +1856,7 @@
 	obd->obd_force = 1;
 
 	obd = class_exp2obd(sbi->ll_dt_exp);
-	if (obd == NULL) {
+	if (!obd) {
 		CERROR("Invalid LOV connection handle %#llx\n",
 		       sbi->ll_dt_exp->exp_handle.h_cookie);
 		return;
@@ -1941,7 +1958,7 @@
 		  struct super_block *sb, struct lookup_intent *it)
 {
 	struct ll_sb_info *sbi = NULL;
-	struct lustre_md md;
+	struct lustre_md md = { NULL };
 	int rc;
 
 	LASSERT(*inode || sb);
@@ -1954,7 +1971,7 @@
 	if (*inode) {
 		ll_update_inode(*inode, &md);
 	} else {
-		LASSERT(sb != NULL);
+		LASSERT(sb);
 
 		/*
 		 * At this point server returns to client's same fid as client
@@ -1965,15 +1982,14 @@
 		*inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
 					     sbi->ll_flags & LL_SBI_32BIT_API),
 				 &md);
-		if (*inode == NULL || IS_ERR(*inode)) {
+		if (!inode) {
 #ifdef CONFIG_FS_POSIX_ACL
 			if (md.posix_acl) {
 				posix_acl_release(md.posix_acl);
 				md.posix_acl = NULL;
 			}
 #endif
-			rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
-			*inode = NULL;
+			rc = -ENOMEM;
 			CERROR("new_inode -fatal: rc %d\n", rc);
 			goto out;
 		}
@@ -1986,14 +2002,15 @@
 	 * 1. proc1: mdt returns a lsm but not granting layout
 	 * 2. layout was changed by another client
 	 * 3. proc2: refresh layout and layout lock granted
-	 * 4. proc1: to apply a stale layout */
-	if (it != NULL && it->d.lustre.it_lock_mode != 0) {
+	 * 4. proc1: to apply a stale layout
+	 */
+	if (it && it->d.lustre.it_lock_mode != 0) {
 		struct lustre_handle lockh;
 		struct ldlm_lock *lock;
 
 		lockh.cookie = it->d.lustre.it_lock_handle;
 		lock = ldlm_handle2lock(&lockh);
-		LASSERT(lock != NULL);
+		LASSERT(lock);
 		if (ldlm_has_layout(lock)) {
 			struct cl_object_conf conf;
 
@@ -2008,7 +2025,7 @@
 	}
 
 out:
-	if (md.lsm != NULL)
+	if (md.lsm)
 		obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
 	md_free_lustre_md(sbi->ll_md_exp, &md);
 
@@ -2019,14 +2036,13 @@
 	return rc;
 }
 
-int ll_obd_statfs(struct inode *inode, void *arg)
+int ll_obd_statfs(struct inode *inode, void __user *arg)
 {
 	struct ll_sb_info *sbi = NULL;
 	struct obd_export *exp;
 	char *buf = NULL;
 	struct obd_ioctl_data *data = NULL;
 	__u32 type;
-	__u32 flags;
 	int len = 0, rc;
 
 	if (!inode) {
@@ -2069,8 +2085,7 @@
 		goto out_statfs;
 	}
 
-	flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
-	rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
+	rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
 	if (rc)
 		goto out_statfs;
 out_statfs:
@@ -2101,7 +2116,8 @@
 	LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
 
 	/* Note we have not called client_common_fill_super yet, so
-	   proc fns must be able to handle that! */
+	 * proc fns must be able to handle that!
+	 */
 	rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
 				      lcfg, sb);
 	if (rc > 0)
@@ -2115,15 +2131,13 @@
 				       const char *name, int namelen,
 				       int mode, __u32 opc, void *data)
 {
-	LASSERT(i1 != NULL);
-
 	if (namelen > ll_i2sbi(i1)->ll_namelen)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	if (op_data == NULL)
+	if (!op_data)
 		op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
 
-	if (op_data == NULL)
+	if (!op_data)
 		return ERR_PTR(-ENOMEM);
 
 	ll_i2gids(op_data->op_suppgids, i1, i2);
@@ -2143,8 +2157,8 @@
 	op_data->op_cap = cfs_curproc_cap_pack();
 	op_data->op_bias = 0;
 	op_data->op_cli_flags = 0;
-	if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
-	     filename_is_volatile(name, namelen, NULL))
+	if ((opc == LUSTRE_OPC_CREATE) && name &&
+	    filename_is_volatile(name, namelen, NULL))
 		op_data->op_bias |= MDS_CREATE_VOLATILE;
 	op_data->op_opc = opc;
 	op_data->op_mds = 0;
@@ -2152,7 +2166,8 @@
 
 	/* If the file is being opened after mknod() (normally due to NFS)
 	 * try to use the default stripe data from parent directory for
-	 * allocating OST objects.  Try to pass the parent FID to MDS. */
+	 * allocating OST objects.  Try to pass the parent FID to MDS.
+	 */
 	if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
 	    !ll_i2info(i2)->lli_has_smd) {
 		struct ll_inode_info *lli = ll_i2info(i2);
@@ -2179,7 +2194,7 @@
 {
 	struct ll_sb_info *sbi;
 
-	LASSERT((seq != NULL) && (dentry != NULL));
+	LASSERT(seq && dentry);
 	sbi = ll_s2sbi(dentry->d_sb);
 
 	if (sbi->ll_flags & LL_SBI_NOLCK)
@@ -2221,8 +2236,8 @@
 	if (!obd)
 		return -ENOENT;
 
-	if (copy_to_user((void *)arg, obd->obd_name,
-			     strlen(obd->obd_name) + 1))
+	if (copy_to_user((void __user *)arg, obd->obd_name,
+			 strlen(obd->obd_name) + 1))
 		return -EFAULT;
 
 	return 0;
@@ -2240,10 +2255,11 @@
 	char *ptr;
 	int len;
 
-	if (buf == NULL) {
+	if (!buf) {
 		/* this means the caller wants to use static buffer
 		 * and it doesn't care about race. Usually this is
-		 * in error reporting path */
+		 * in error reporting path
+		 */
 		buf = fsname_static;
 		buflen = sizeof(fsname_static);
 	}
@@ -2269,9 +2285,9 @@
 
 	/* this can be called inside spin lock so use GFP_ATOMIC. */
 	buf = (char *)__get_free_page(GFP_ATOMIC);
-	if (buf != NULL) {
+	if (buf) {
 		dentry = d_find_alias(page->mapping->host);
-		if (dentry != NULL)
+		if (dentry)
 			path = dentry_path_raw(dentry, buf, PAGE_SIZE);
 	}
 
@@ -2282,9 +2298,9 @@
 	       PFID(&obj->cob_header.coh_lu.loh_fid),
 	       (path && !IS_ERR(path)) ? path : "", ioret);
 
-	if (dentry != NULL)
+	if (dentry)
 		dput(dentry);
 
-	if (buf != NULL)
+	if (buf)
 		free_page((unsigned long)buf);
 }
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index bbae95c..31abdb7f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -72,7 +72,7 @@
 	LASSERT(!down_write_trylock(&mm->mmap_sem));
 
 	for (vma = find_vma(mm, addr);
-	    vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
+	    vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
 		if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
 		    vma->vm_flags & VM_SHARED) {
 			ret = vma;
@@ -119,13 +119,13 @@
 	 */
 	env = cl_env_nested_get(nest);
 	if (IS_ERR(env))
-		 return ERR_PTR(-EINVAL);
+		return ERR_PTR(-EINVAL);
 
 	*env_ret = env;
 
 	io = ccc_env_thread_io(env);
 	io->ci_obj = ll_i2info(inode)->lli_clob;
-	LASSERT(io->ci_obj != NULL);
+	LASSERT(io->ci_obj);
 
 	fio = &io->u.ci_fault;
 	fio->ft_index      = index;
@@ -136,7 +136,7 @@
 	 * the kernel will not read other pages not covered by ldlm in
 	 * filemap_nopage. we do our readahead in ll_readpage.
 	 */
-	if (ra_flags != NULL)
+	if (ra_flags)
 		*ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
 	vma->vm_flags &= ~VM_SEQ_READ;
 	vma->vm_flags |= VM_RAND_READ;
@@ -151,8 +151,7 @@
 
 		LASSERT(cio->cui_cl.cis_io == io);
 
-		/* mmap lock must be MANDATORY it has to cache
-		 * pages. */
+		/* mmap lock must be MANDATORY it has to cache pages. */
 		io->ci_lockreq = CILR_MANDATORY;
 		cio->cui_fd = fd;
 	} else {
@@ -178,8 +177,6 @@
 	struct inode	     *inode;
 	struct ll_inode_info     *lli;
 
-	LASSERT(vmpage != NULL);
-
 	io = ll_fault_io_init(vma, &env,  &nest, vmpage->index, NULL);
 	if (IS_ERR(io)) {
 		result = PTR_ERR(io);
@@ -201,7 +198,8 @@
 
 	/* we grab lli_trunc_sem to exclude truncate case.
 	 * Otherwise, we could add dirty pages into osc cache
-	 * while truncate is on-going. */
+	 * while truncate is on-going.
+	 */
 	inode = ccc_object_inode(io->ci_obj);
 	lli = ll_i2info(inode);
 	down_read(&lli->lli_trunc_sem);
@@ -217,12 +215,13 @@
 		struct ll_inode_info *lli = ll_i2info(inode);
 
 		lock_page(vmpage);
-		if (vmpage->mapping == NULL) {
+		if (!vmpage->mapping) {
 			unlock_page(vmpage);
 
 			/* page was truncated and lock was cancelled, return
 			 * ENODATA so that VM_FAULT_NOPAGE will be returned
-			 * to handle_mm_fault(). */
+			 * to handle_mm_fault().
+			 */
 			if (result == 0)
 				result = -ENODATA;
 		} else if (!PageDirty(vmpage)) {
@@ -315,12 +314,13 @@
 		result = cl_io_loop(env, io);
 
 		/* ft_flags are only valid if we reached
-		 * the call to filemap_fault */
+		 * the call to filemap_fault
+		 */
 		if (vio->u.fault.fault.ft_flags_valid)
 			fault_ret = vio->u.fault.fault.ft_flags;
 
 		vmpage = vio->u.fault.ft_vmpage;
-		if (result != 0 && vmpage != NULL) {
+		if (result != 0 && vmpage) {
 			page_cache_release(vmpage);
 			vmf->page = NULL;
 		}
@@ -344,9 +344,10 @@
 	int result;
 	sigset_t set;
 
-	/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
+	/* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
 	 * so that it can be killed by admin but not cause segfault by
-	 * other signals. */
+	 * other signals.
+	 */
 	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 
 restart:
@@ -357,7 +358,7 @@
 
 		/* check if this page has been truncated */
 		lock_page(vmpage);
-		if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+		if (unlikely(!vmpage->mapping)) { /* unlucky */
 			unlock_page(vmpage);
 			page_cache_release(vmpage);
 			vmf->page = NULL;
@@ -447,7 +448,8 @@
 }
 
 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
- * nopage's reference passing to the pte */
+ * nopage's reference passing to the pte
+ */
 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
 {
 	int rc = -ENOENT;
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 18aab25f..74a8868 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -105,7 +105,8 @@
 		return ERR_PTR(rc);
 
 	/* Because inode is NULL, ll_prep_md_op_data can not
-	 * be used here. So we allocate op_data ourselves */
+	 * be used here. So we allocate op_data ourselves
+	 */
 	op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
 	if (!op_data)
 		return ERR_PTR(-ENOMEM);
@@ -141,10 +142,11 @@
 	struct inode  *inode;
 	struct dentry *result;
 
-	CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid));
 	if (!fid_is_sane(fid))
 		return ERR_PTR(-ESTALE);
 
+	CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid));
+
 	inode = search_inode_for_lustre(sb, fid);
 	if (IS_ERR(inode))
 		return ERR_CAST(inode);
@@ -160,7 +162,7 @@
 	 * We have to find the parent to tell MDS how to init lov objects.
 	 */
 	if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
-	    parent != NULL) {
+	    parent && !fid_is_zero(parent)) {
 		struct ll_inode_info *lli = ll_i2info(inode);
 
 		spin_lock(&lli->lli_lock);
@@ -174,8 +176,6 @@
 	return result;
 }
 
-#define LUSTRE_NFS_FID	  0x97
-
 /**
  * \a connectable - is nfsd will connect himself or this should be done
  *		  at lustre
@@ -188,20 +188,25 @@
 static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
 			struct inode *parent)
 {
+	int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
 	struct lustre_nfs_fid *nfs_fid = (void *)fh;
 
 	CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n",
-	      inode->i_ino, PFID(ll_inode2fid(inode)), *plen,
-	      (int)sizeof(struct lustre_nfs_fid));
+	      inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len);
 
-	if (*plen < sizeof(struct lustre_nfs_fid) / 4)
-		return 255;
+	if (*plen < fileid_len) {
+		*plen = fileid_len;
+		return FILEID_INVALID;
+	}
 
 	nfs_fid->lnf_child = *ll_inode2fid(inode);
-	nfs_fid->lnf_parent = *ll_inode2fid(parent);
-	*plen = sizeof(struct lustre_nfs_fid) / 4;
+	if (parent)
+		nfs_fid->lnf_parent = *ll_inode2fid(parent);
+	else
+		fid_zero(&nfs_fid->lnf_parent);
+	*plen = fileid_len;
 
-	return LUSTRE_NFS_FID;
+	return FILEID_LUSTRE;
 }
 
 static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
@@ -209,7 +214,8 @@
 				   unsigned type)
 {
 	/* It is hack to access lde_fid for comparison with lgd_fid.
-	 * So the input 'name' must be part of the 'lu_dirent'. */
+	 * So the input 'name' must be part of the 'lu_dirent'.
+	 */
 	struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name);
 	struct ll_getname_data *lgd =
 		container_of(ctx, struct ll_getname_data, ctx);
@@ -259,7 +265,7 @@
 {
 	struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
 
-	if (fh_type != LUSTRE_NFS_FID)
+	if (fh_type != FILEID_LUSTRE)
 		return ERR_PTR(-EPROTO);
 
 	return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
@@ -270,7 +276,7 @@
 {
 	struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
 
-	if (fh_type != LUSTRE_NFS_FID)
+	if (fh_type != FILEID_LUSTRE)
 		return ERR_PTR(-EPROTO);
 
 	return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
@@ -323,7 +329,7 @@
 	return result;
 }
 
-struct export_operations lustre_export_operations = {
+const struct export_operations lustre_export_operations = {
        .get_parent = ll_get_parent,
        .encode_fh  = ll_encode_fh,
        .get_name   = ll_get_name,
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
index b27c3f2..63e4847 100644
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
@@ -125,12 +125,12 @@
 	struct rmtacl_ctl_entry *rce, *e;
 
 	rce = rce_alloc(key, ops);
-	if (rce == NULL)
+	if (!rce)
 		return -ENOMEM;
 
 	spin_lock(&rct->rct_lock);
 	e = __rct_search(rct, key);
-	if (unlikely(e != NULL)) {
+	if (unlikely(e)) {
 		CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
 		      (int)key, ops);
 		rce_free(e);
@@ -213,7 +213,7 @@
 	struct eacl_entry *ee;
 	struct list_head *head = &et->et_entries[ee_hashfunc(key)];
 
-	LASSERT(fid != NULL);
+	LASSERT(fid);
 	list_for_each_entry(ee, head, ee_list)
 		if (ee->ee_key == key) {
 			if (lu_fid_eq(&ee->ee_fid, fid) &&
@@ -256,12 +256,12 @@
 	struct eacl_entry *ee, *e;
 
 	ee = ee_alloc(key, fid, type, header);
-	if (ee == NULL)
+	if (!ee)
 		return -ENOMEM;
 
 	spin_lock(&et->et_lock);
 	e = __et_search_del(et, key, fid, type);
-	if (unlikely(e != NULL)) {
+	if (unlikely(e)) {
 		CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
 		      (int)key, PFID(fid), type);
 		ee_free(e);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 871924b..3f53292 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -211,9 +211,8 @@
 		return io->ci_result;
 	io->ci_lockreq = CILR_NEVER;
 
-	LASSERT(head != NULL);
 	rw = head->bi_rw;
-	for (bio = head; bio != NULL; bio = bio->bi_next) {
+	for (bio = head; bio ; bio = bio->bi_next) {
 		LASSERT(rw == bio->bi_rw);
 
 		offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
@@ -297,7 +296,7 @@
 
 	spin_lock_irq(&lo->lo_lock);
 	first = lo->lo_bio;
-	if (unlikely(first == NULL)) {
+	if (unlikely(!first)) {
 		spin_unlock_irq(&lo->lo_lock);
 		return 0;
 	}
@@ -308,7 +307,7 @@
 	rw = first->bi_rw;
 	bio = &lo->lo_bio;
 	while (*bio && (*bio)->bi_rw == rw) {
-		CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+		CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n",
 		       (unsigned long long)(*bio)->bi_iter.bi_sector,
 		       (*bio)->bi_iter.bi_size,
 		       page_count, (*bio)->bi_vcnt);
@@ -458,7 +457,7 @@
 			       total_count, times, total_count / times);
 		}
 
-		LASSERT(bio != NULL);
+		LASSERT(bio);
 		LASSERT(count <= atomic_read(&lo->lo_pending));
 		loop_handle_bio(lo, bio);
 		atomic_sub(count, &lo->lo_pending);
@@ -560,7 +559,7 @@
 	if (lo->lo_refcnt > count)	/* we needed one fd for the ioctl */
 		return -EBUSY;
 
-	if (filp == NULL)
+	if (!filp)
 		return -EINVAL;
 
 	spin_lock_irq(&lo->lo_lock);
@@ -625,18 +624,18 @@
 	case LL_IOC_LLOOP_INFO: {
 		struct lu_fid fid;
 
-		if (lo->lo_backing_file == NULL) {
+		if (!lo->lo_backing_file) {
 			err = -ENOENT;
 			break;
 		}
-		if (inode == NULL)
+		if (!inode)
 			inode = file_inode(lo->lo_backing_file);
 		if (lo->lo_state == LLOOP_BOUND)
 			fid = ll_i2info(inode)->lli_fid;
 		else
 			fid_zero(&fid);
 
-		if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
+		if (copy_to_user((void __user *)arg, &fid, sizeof(fid)))
 			err = -EFAULT;
 		break;
 	}
@@ -676,7 +675,7 @@
 	if (magic != ll_iocontrol_magic)
 		return LLIOC_CONT;
 
-	if (disks == NULL) {
+	if (!disks) {
 		err = -ENODEV;
 		goto out1;
 	}
@@ -708,7 +707,7 @@
 		dev = MKDEV(lloop_major, lo->lo_number);
 
 		/* quit if the used pointer is writable */
-		if (put_user((long)old_encode_dev(dev), (long *)arg)) {
+		if (put_user((long)old_encode_dev(dev), (long __user *)arg)) {
 			err = -EFAULT;
 			goto out;
 		}
@@ -793,7 +792,7 @@
 	       lloop_major, max_loop);
 
 	ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
-	if (ll_iocontrol_magic == NULL)
+	if (!ll_iocontrol_magic)
 		goto out_mem1;
 
 	loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index f134ad9..f5fe928 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -43,7 +43,7 @@
 #include "llite_internal.h"
 #include "vvp_internal.h"
 
-/* /proc/lustre/llite mount point registration */
+/* debugfs llite mount point registration */
 static struct file_operations ll_rw_extents_stats_fops;
 static struct file_operations ll_rw_extents_stats_pp_fops;
 static struct file_operations ll_rw_offset_stats_fops;
@@ -345,7 +345,8 @@
 		return rc;
 
 	/* Cap this at the current max readahead window size, the readahead
-	 * algorithm does this anyway so it's pointless to set it larger. */
+	 * algorithm does this anyway so it's pointless to set it larger.
+	 */
 	if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
 		CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
 		       sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
@@ -453,7 +454,7 @@
 		if (diff <= 0)
 			break;
 
-		if (sbi->ll_dt_exp == NULL) { /* being initialized */
+		if (!sbi->ll_dt_exp) { /* being initialized */
 			rc = -ENODEV;
 			break;
 		}
@@ -966,9 +967,9 @@
 
 	name[MAX_STRING_SIZE] = '\0';
 
-	LASSERT(sbi != NULL);
-	LASSERT(mdc != NULL);
-	LASSERT(osc != NULL);
+	LASSERT(sbi);
+	LASSERT(mdc);
+	LASSERT(osc);
 
 	/* Get fsname */
 	len = strlen(lsi->lsi_lmd->lmd_profile);
@@ -1012,7 +1013,7 @@
 	/* File operations stats */
 	sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
 					    LPROCFS_STATS_FLAG_NONE);
-	if (sbi->ll_stats == NULL) {
+	if (!sbi->ll_stats) {
 		err = -ENOMEM;
 		goto out;
 	}
@@ -1039,7 +1040,7 @@
 
 	sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
 					       LPROCFS_STATS_FLAG_NONE);
-	if (sbi->ll_ra_stats == NULL) {
+	if (!sbi->ll_ra_stats) {
 		err = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index da5f443..56d2d1d 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -118,7 +118,7 @@
 
 			ll_read_inode2(inode, md);
 			if (S_ISREG(inode->i_mode) &&
-			    ll_i2info(inode)->lli_clob == NULL) {
+			    !ll_i2info(inode)->lli_clob) {
 				CDEBUG(D_INODE,
 					"%s: apply lsm %p to inode "DFID".\n",
 					ll_get_fsname(sb, NULL, 0), md->lsm,
@@ -127,7 +127,7 @@
 			}
 			if (rc != 0) {
 				iget_failed(inode);
-				inode = ERR_PTR(rc);
+				inode = NULL;
 			} else
 				unlock_new_inode(inode);
 		} else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
@@ -180,10 +180,11 @@
 		__u64 bits = lock->l_policy_data.l_inodebits.bits;
 
 		/* Inode is set to lock->l_resource->lr_lvb_inode
-		 * for mdc - bug 24555 */
-		LASSERT(lock->l_ast_data == NULL);
+		 * for mdc - bug 24555
+		 */
+		LASSERT(!lock->l_ast_data);
 
-		if (inode == NULL)
+		if (!inode)
 			break;
 
 		/* Invalidate all dentries associated with this inode */
@@ -202,7 +203,8 @@
 		}
 
 		/* For OPEN locks we differentiate between lock modes
-		 * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
+		 * LCK_CR, LCK_CW, LCK_PR - bug 22891
+		 */
 		if (bits & MDS_INODELOCK_OPEN)
 			ll_have_md_lock(inode, &bits, lock->l_req_mode);
 
@@ -260,7 +262,7 @@
 		}
 
 		if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
-		    inode->i_sb->s_root != NULL &&
+		    inode->i_sb->s_root &&
 		    !is_root_inode(inode))
 			ll_invalidate_aliases(inode);
 
@@ -285,15 +287,11 @@
 /* Pack the required supplementary groups into the supplied groups array.
  * If we don't need to use the groups from the target inode(s) then we
  * instead pack one or more groups from the user's supplementary group
- * array in case it might be useful.  Not needed if doing an MDS-side upcall. */
+ * array in case it might be useful.  Not needed if doing an MDS-side upcall.
+ */
 void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
 {
-#if 0
-	int i;
-#endif
-
-	LASSERT(i1 != NULL);
-	LASSERT(suppgids != NULL);
+	LASSERT(i1);
 
 	suppgids[0] = ll_i2suppgid(i1);
 
@@ -301,22 +299,6 @@
 		suppgids[1] = ll_i2suppgid(i2);
 		else
 			suppgids[1] = -1;
-
-#if 0
-	for (i = 0; i < current_ngroups; i++) {
-		if (suppgids[0] == -1) {
-			if (current_groups[i] != suppgids[1])
-				suppgids[0] = current_groups[i];
-			continue;
-		}
-		if (suppgids[1] == -1) {
-			if (current_groups[i] != suppgids[0])
-				suppgids[1] = current_groups[i];
-			continue;
-		}
-		break;
-	}
-#endif
 }
 
 /*
@@ -409,7 +391,8 @@
 	int rc = 0;
 
 	/* NB 1 request reference will be taken away by ll_intent_lock()
-	 * when I return */
+	 * when I return
+	 */
 	CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
 	       it->d.lustre.it_disposition);
 	if (!it_disposition(it, DISP_LOOKUP_NEG)) {
@@ -420,13 +403,14 @@
 		ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
 
 		/* We used to query real size from OSTs here, but actually
-		   this is not needed. For stat() calls size would be updated
-		   from subsequent do_revalidate()->ll_inode_revalidate_it() in
-		   2.4 and
-		   vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
-		   Everybody else who needs correct file size would call
-		   ll_glimpse_size or some equivalent themselves anyway.
-		   Also see bug 7198. */
+		 * this is not needed. For stat() calls size would be updated
+		 * from subsequent do_revalidate()->ll_inode_revalidate_it() in
+		 * 2.4 and
+		 * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
+		 * Everybody else who needs correct file size would call
+		 * ll_glimpse_size or some equivalent themselves anyway.
+		 * Also see bug 7198.
+		 */
 	}
 
 	/* Only hash *de if it is unhashed (new dentry).
@@ -443,9 +427,10 @@
 		*de = alias;
 	} else if (!it_disposition(it, DISP_LOOKUP_NEG)  &&
 		   !it_disposition(it, DISP_OPEN_CREATE)) {
-		/* With DISP_OPEN_CREATE dentry will
-		   instantiated in ll_create_it. */
-		LASSERT(d_inode(*de) == NULL);
+		/* With DISP_OPEN_CREATE dentry will be
+		 * instantiated in ll_create_it.
+		 */
+		LASSERT(!d_inode(*de));
 		d_instantiate(*de, inode);
 	}
 
@@ -498,7 +483,7 @@
 	if (d_mountpoint(dentry))
 		CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
 
-	if (it == NULL || it->it_op == IT_GETXATTR)
+	if (!it || it->it_op == IT_GETXATTR)
 		it = &lookup_it;
 
 	if (it->it_op == IT_GETATTR) {
@@ -557,7 +542,7 @@
  out:
 	if (req)
 		ptlrpc_req_finished(req);
-	if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry))
+	if (it->it_op == IT_GETATTR && (!retval || retval == dentry))
 		ll_statahead_mark(parent, dentry);
 	return retval;
 }
@@ -582,7 +567,7 @@
 		itp = &it;
 	de = ll_lookup_it(parent, dentry, itp, 0);
 
-	if (itp != NULL)
+	if (itp)
 		ll_intent_release(itp);
 
 	return de;
@@ -622,7 +607,7 @@
 	de = ll_lookup_it(dir, dentry, it, lookup_flags);
 	if (IS_ERR(de))
 		rc = PTR_ERR(de);
-	else if (de != NULL)
+	else if (de)
 		dentry = de;
 
 	if (!rc) {
@@ -631,7 +616,7 @@
 			rc = ll_create_it(dir, dentry, mode, it);
 			if (rc) {
 				/* We dget in ll_splice_alias. */
-				if (de != NULL)
+				if (de)
 					dput(de);
 				goto out_release;
 			}
@@ -655,7 +640,7 @@
 				/* We dget in ll_splice_alias. finish_open takes
 				 * care of dget for fd open.
 				 */
-				if (de != NULL)
+				if (de)
 					dput(de);
 			}
 		} else {
@@ -693,7 +678,8 @@
 
 	/* We asked for a lock on the directory, but were granted a
 	 * lock on the inode.  Since we finally have an inode pointer,
-	 * stuff it in the lock. */
+	 * stuff it in the lock.
+	 */
 	CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
 	       inode, inode->i_ino, inode->i_generation);
 	ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
@@ -767,7 +753,7 @@
 	int tgt_len = 0;
 	int err;
 
-	if (unlikely(tgt != NULL))
+	if (unlikely(tgt))
 		tgt_len = strlen(tgt) + 1;
 
 	op_data = ll_prep_md_op_data(NULL, dir, NULL,
@@ -888,10 +874,11 @@
 	/* The MDS sent back the EA because we unlinked the last reference
 	 * to this file. Use this EA to unlink the objects on the OST.
 	 * It's opaque so we don't swab here; we leave it to obd_unpackmd() to
-	 * check it is complete and sensible. */
+	 * check it is complete and sensible.
+	 */
 	eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD,
 					      body->eadatasize);
-	LASSERT(eadata != NULL);
+	LASSERT(eadata);
 
 	rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize);
 	if (rc < 0) {
@@ -901,7 +888,7 @@
 	LASSERT(rc >= sizeof(*lsm));
 
 	oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
-	if (oa == NULL) {
+	if (!oa) {
 		rc = -ENOMEM;
 		goto out_free_memmd;
 	}
@@ -917,7 +904,7 @@
 						     &RMF_LOGCOOKIES,
 						   sizeof(struct llog_cookie) *
 						     lsm->lsm_stripe_count);
-		if (oti.oti_logcookies == NULL) {
+		if (!oti.oti_logcookies) {
 			oa->o_valid &= ~OBD_MD_FLCOOKIE;
 			body->valid &= ~OBD_MD_FLCOOKIE;
 		}
@@ -938,7 +925,8 @@
 /* ll_unlink() doesn't update the inode with the new link count.
  * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there
  * is any lock existing. They will recycle dentries and inodes based upon locks
- * too. b=20433 */
+ * too. b=20433
+ */
 static int ll_unlink(struct inode *dir, struct dentry *dentry)
 {
 	struct ptlrpc_request *request = NULL;
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index f355474..671039a 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -70,9 +70,9 @@
 	struct cl_page *page = lcc->lcc_page;
 
 	LASSERT(lcc->lcc_cookie == current);
-	LASSERT(env != NULL);
+	LASSERT(env);
 
-	if (page != NULL) {
+	if (page) {
 		lu_ref_del(&page->cp_reference, "cl_io", io);
 		cl_page_put(env, page);
 	}
@@ -97,7 +97,7 @@
 	int result = 0;
 
 	clob = ll_i2info(vmpage->mapping->host)->lli_clob;
-	LASSERT(clob != NULL);
+	LASSERT(clob);
 
 	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
@@ -111,7 +111,7 @@
 
 	cio = ccc_env_io(env);
 	io = cio->cui_cl.cis_io;
-	if (io == NULL && create) {
+	if (!io && create) {
 		struct inode *inode = vmpage->mapping->host;
 		loff_t pos;
 
@@ -120,7 +120,8 @@
 
 			/* this is too bad. Someone is trying to write the
 			 * page w/o holding inode mutex. This means we can
-			 * add dirty pages into cache during truncate */
+			 * add dirty pages into cache during truncate
+			 */
 			CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
 			       current->comm);
 			dump_stack();
@@ -163,12 +164,11 @@
 	}
 
 	lcc->lcc_io = io;
-	if (io == NULL)
+	if (!io)
 		result = -EIO;
 	if (result == 0) {
 		struct cl_page   *page;
 
-		LASSERT(io != NULL);
 		LASSERT(io->ci_state == CIS_IO_GOING);
 		LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
 		page = cl_page_find(env, clob, vmpage->index, vmpage,
@@ -240,7 +240,8 @@
 			ll_cl_fini(lcc);
 		}
 		/* returning 0 in prepare assumes commit must be called
-		 * afterwards */
+		 * afterwards
+		 */
 	} else {
 		result = PTR_ERR(lcc);
 	}
@@ -296,8 +297,8 @@
  * to get an ra budget that is larger than the remaining readahead pages
  * and reach here at exactly the same time. They will compute /a ret to
  * consume the remaining pages, but will fail at atomic_add_return() and
- * get a zero ra window, although there is still ra space remaining. - Jay */
-
+ * get a zero ra window, although there is still ra space remaining. - Jay
+ */
 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
 				     struct ra_io_arg *ria,
 				     unsigned long pages)
@@ -307,7 +308,8 @@
 
 	/* If read-ahead pages left are less than 1M, do not do read-ahead,
 	 * otherwise it will form small read RPC(< 1M), which hurt server
-	 * performance a lot. */
+	 * performance a lot.
+	 */
 	ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
 	if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) {
 		ret = 0;
@@ -324,7 +326,8 @@
 	 * branch is more expensive than subtracting zero from the result.
 	 *
 	 * Strided read is left unaligned to avoid small fragments beyond
-	 * the RPC boundary from needing an extra read RPC. */
+	 * the RPC boundary from needing an extra read RPC.
+	 */
 	if (ria->ria_pages == 0) {
 		long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
 
@@ -364,7 +367,7 @@
 #define RAS_CDEBUG(ras) \
 	CDEBUG(D_READA,						      \
 	       "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu"    \
-	       "csr %lu sf %lu sp %lu sl %lu \n",			    \
+	       "csr %lu sf %lu sp %lu sl %lu\n",			    \
 	       ras->ras_last_readpage, ras->ras_consecutive_requests,	\
 	       ras->ras_consecutive_pages, ras->ras_window_start,	    \
 	       ras->ras_window_len, ras->ras_next_readahead,		 \
@@ -378,9 +381,9 @@
 	unsigned long start = point - before, end = point + after;
 
 	if (start > point)
-	       start = 0;
+		start = 0;
 	if (end < point)
-	       end = ~0;
+		end = ~0;
 
 	return start <= index && index <= end;
 }
@@ -473,7 +476,7 @@
 	const char       *msg   = NULL;
 
 	vmpage = grab_cache_page_nowait(mapping, index);
-	if (vmpage != NULL) {
+	if (vmpage) {
 		/* Check if vmpage was truncated or reclaimed */
 		if (vmpage->mapping == mapping) {
 			page = cl_page_find(env, clob, vmpage->index,
@@ -500,7 +503,7 @@
 		which = RA_STAT_FAILED_GRAB_PAGE;
 		msg   = "g_c_p_n failed";
 	}
-	if (msg != NULL) {
+	if (msg) {
 		ll_ra_stats_inc(mapping, which);
 		CDEBUG(D_READA, "%s\n", msg);
 	}
@@ -515,13 +518,15 @@
 /* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
  * know what the actual RPC size is.  If this needs to change, it makes more
  * sense to tune the i_blkbits value for the file based on the OSTs it is
- * striped over, rather than having a constant value for all files here. */
+ * striped over, rather than having a constant value for all files here.
+ */
 
 /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
  * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
  * by default, this should be adjusted corresponding with max_read_ahead_mb
  * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
- * up quickly which will affect read performance significantly. See LU-2816 */
+ * up quickly which will affect read performance significantly. See LU-2816
+ */
 #define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
 
 static inline int stride_io_mode(struct ll_readahead_state *ras)
@@ -570,7 +575,7 @@
 	if (end_left > st_pgs)
 		end_left = st_pgs;
 
-	CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n",
+	CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n",
 	       start, end, start_left, end_left);
 
 	if (start == end)
@@ -600,7 +605,8 @@
 	/* If ria_length == ria_pages, it means non-stride I/O mode,
 	 * idx should always inside read-ahead window in this case
 	 * For stride I/O mode, just check whether the idx is inside
-	 * the ria_pages. */
+	 * the ria_pages.
+	 */
 	return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
 	       (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
 		ria->ria_length < ria->ria_pages);
@@ -616,7 +622,7 @@
 	int rc, count = 0, stride_ria;
 	unsigned long page_idx;
 
-	LASSERT(ria != NULL);
+	LASSERT(ria);
 	RIA_DEBUG(ria);
 
 	stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
@@ -634,11 +640,13 @@
 		} else if (stride_ria) {
 			/* If it is not in the read-ahead window, and it is
 			 * read-ahead mode, then check whether it should skip
-			 * the stride gap */
+			 * the stride gap
+			 */
 			pgoff_t offset;
 			/* FIXME: This assertion only is valid when it is for
 			 * forward read-ahead, it will be fixed when backward
-			 * read-ahead is implemented */
+			 * read-ahead is implemented
+			 */
 			LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
 				 page_idx,
 				 ria->ria_start, ria->ria_end, ria->ria_stoff,
@@ -647,7 +655,7 @@
 			offset = offset % (ria->ria_length);
 			if (offset > ria->ria_pages) {
 				page_idx += ria->ria_length - offset;
-				CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
+				CDEBUG(D_READA, "i %lu skip %lu\n", page_idx,
 				       ria->ria_length - offset);
 				continue;
 			}
@@ -699,7 +707,7 @@
 		bead = NULL;
 
 	/* Enlarge the RA window to encompass the full read */
-	if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
+	if (bead && ras->ras_window_start + ras->ras_window_len <
 	    bead->lrr_start + bead->lrr_count) {
 		ras->ras_window_len = bead->lrr_start + bead->lrr_count -
 				      ras->ras_window_start;
@@ -721,7 +729,8 @@
 		 */
 		/* Note: we only trim the RPC, instead of extending the RPC
 		 * to the boundary, so to avoid reading too much pages during
-		 * random reading. */
+		 * random reading.
+		 */
 		rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1));
 		if (rpc_boundary > 0)
 			rpc_boundary--;
@@ -764,7 +773,6 @@
 	ret = ll_read_ahead_pages(env, io, queue,
 				  ria, &reserved, mapping, &ra_end);
 
-	LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
 	if (reserved != 0)
 		ll_ra_count_put(ll_i2sbi(inode), reserved);
 
@@ -775,8 +783,9 @@
 	 * the ras we need to go back and update the ras so that the
 	 * next read-ahead tries from where we left off.  we only do so
 	 * if the region we failed to issue read-ahead on is still ahead
-	 * of the app and behind the next index to start read-ahead from */
-	CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
+	 * of the app and behind the next index to start read-ahead from
+	 */
+	CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n",
 	       ra_end, end, ria->ria_end);
 
 	if (ra_end != end + 1) {
@@ -881,7 +890,8 @@
 }
 
 /* Stride Read-ahead window will be increased inc_len according to
- * stride I/O pattern */
+ * stride I/O pattern
+ */
 static void ras_stride_increase_window(struct ll_readahead_state *ras,
 				       struct ll_ra_info *ra,
 				       unsigned long inc_len)
@@ -952,7 +962,8 @@
 	 * or reads to some other part of the file.  Secondly if we get a
 	 * read-ahead miss that we think we've previously issued.  This can
 	 * be a symptom of there being so many read-ahead pages that the VM is
-	 * reclaiming it before we get to it. */
+	 * reclaiming it before we get to it.
+	 */
 	if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
 		zero = 1;
 		ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
@@ -969,7 +980,8 @@
 	 * file up to ra_max_pages_per_file.  This is simply a best effort
 	 * and only occurs once per open file.  Normal RA behavior is reverted
 	 * to for subsequent IO.  The mmap case does not increment
-	 * ras_requests and thus can never trigger this behavior. */
+	 * ras_requests and thus can never trigger this behavior.
+	 */
 	if (ras->ras_requests == 2 && !ras->ras_request_index) {
 		__u64 kms_pages;
 
@@ -1015,14 +1027,16 @@
 			    stride_io_mode(ras)) {
 				/*If stride-RA hit cache miss, the stride dector
 				 *will not be reset to avoid the overhead of
-				 *redetecting read-ahead mode */
+				 *redetecting read-ahead mode
+				 */
 				if (index != ras->ras_last_readpage + 1)
 					ras->ras_consecutive_pages = 0;
 				ras_reset(inode, ras, index);
 				RAS_CDEBUG(ras);
 			} else {
 				/* Reset both stride window and normal RA
-				 * window */
+				 * window
+				 */
 				ras_reset(inode, ras, index);
 				ras->ras_consecutive_pages++;
 				ras_stride_reset(ras);
@@ -1031,7 +1045,8 @@
 		} else if (stride_io_mode(ras)) {
 			/* If this is contiguous read but in stride I/O mode
 			 * currently, check whether stride step still is valid,
-			 * if invalid, it will reset the stride ra window*/
+			 * if invalid, it will reset the stride ra window
+			 */
 			if (!index_in_stride_window(ras, index)) {
 				/* Shrink stride read-ahead window to be zero */
 				ras_stride_reset(ras);
@@ -1047,7 +1062,8 @@
 	if (stride_io_mode(ras))
 		/* Since stride readahead is sensitive to the offset
 		 * of read-ahead, so we use original offset here,
-		 * instead of ras_window_start, which is RPC aligned */
+		 * instead of ras_window_start, which is RPC aligned
+		 */
 		ras->ras_next_readahead = max(index, ras->ras_next_readahead);
 	else
 		ras->ras_next_readahead = max(ras->ras_window_start,
@@ -1055,7 +1071,8 @@
 	RAS_CDEBUG(ras);
 
 	/* Trigger RA in the mmap case where ras_consecutive_requests
-	 * is not incremented and thus can't be used to trigger RA */
+	 * is not incremented and thus can't be used to trigger RA
+	 */
 	if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
 		ras->ras_window_len = RAS_INCREASE_STEP(inode);
 		goto out_unlock;
@@ -1101,7 +1118,7 @@
 	LASSERT(PageLocked(vmpage));
 	LASSERT(!PageWriteback(vmpage));
 
-	LASSERT(ll_i2dtexp(inode) != NULL);
+	LASSERT(ll_i2dtexp(inode));
 
 	env = cl_env_nested_get(&nest);
 	if (IS_ERR(env)) {
@@ -1110,7 +1127,7 @@
 	}
 
 	clob  = ll_i2info(inode)->lli_clob;
-	LASSERT(clob != NULL);
+	LASSERT(clob);
 
 	io = ccc_env_thread_io(env);
 	io->ci_obj = clob;
@@ -1153,14 +1170,16 @@
 		/* Flush page failed because the extent is being written out.
 		 * Wait for the write of extent to be finished to avoid
 		 * breaking kernel which assumes ->writepage should mark
-		 * PageWriteback or clean the page. */
+		 * PageWriteback or clean the page.
+		 */
 		result = cl_sync_file_range(inode, offset,
 					    offset + PAGE_CACHE_SIZE - 1,
 					    CL_FSYNC_LOCAL, 1);
 		if (result > 0) {
 			/* actually we may have written more than one page.
 			 * decreasing this page because the caller will count
-			 * it. */
+			 * it.
+			 */
 			wbc->nr_to_write -= result - 1;
 			result = 0;
 		}
@@ -1210,7 +1229,8 @@
 	if (sbi->ll_umounting)
 		/* if the mountpoint is being umounted, all pages have to be
 		 * evicted to avoid hitting LBUG when truncate_inode_pages()
-		 * is called later on. */
+		 * is called later on.
+		 */
 		ignore_layout = 1;
 	result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
 	if (result > 0) {
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 711fda9..b0f7ffa 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -92,9 +92,9 @@
 		if (!IS_ERR(env)) {
 			inode = vmpage->mapping->host;
 			obj = ll_i2info(inode)->lli_clob;
-			if (obj != NULL) {
+			if (obj) {
 				page = cl_vmpage_page(vmpage, obj);
-				if (page != NULL) {
+				if (page) {
 					lu_ref_add(&page->cp_reference,
 						   "delete", vmpage);
 					cl_page_delete(env, page);
@@ -128,11 +128,11 @@
 		return 0;
 
 	mapping = vmpage->mapping;
-	if (mapping == NULL)
+	if (!mapping)
 		return 1;
 
 	obj = ll_i2info(mapping->host)->lli_clob;
-	if (obj == NULL)
+	if (!obj)
 		return 1;
 
 	/* 1 for page allocator, 1 for cl_page and 1 for page cache */
@@ -145,12 +145,13 @@
 		/* If we can't allocate an env we won't call cl_page_put()
 		 * later on which further means it's impossible to drop
 		 * page refcount by cl_page, so ask kernel to not free
-		 * this page. */
+		 * this page.
+		 */
 		return 0;
 
 	page = cl_vmpage_page(vmpage, obj);
-	result = page == NULL;
-	if (page != NULL) {
+	result = !page;
+	if (page) {
 		if (!cl_page_in_use(page)) {
 			result = 1;
 			cl_page_delete(env, page);
@@ -212,7 +213,8 @@
 }
 
 /*  ll_free_user_pages - tear down page struct array
- *  @pages: array of page struct pointers underlying target buffer */
+ *  @pages: array of page struct pointers underlying target buffer
+ */
 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
 {
 	int i;
@@ -246,7 +248,7 @@
 	cl_2queue_init(queue);
 	for (i = 0; i < page_count; i++) {
 		if (pv->ldp_offsets)
-		    file_offset = pv->ldp_offsets[i];
+			file_offset = pv->ldp_offsets[i];
 
 		LASSERT(!(file_offset & (page_size - 1)));
 		clp = cl_page_find(env, obj, cl_index(obj, file_offset),
@@ -266,7 +268,8 @@
 		do_io = true;
 
 		/* check the page type: if the page is a host page, then do
-		 * write directly */
+		 * write directly
+		 */
 		if (clp->cp_type == CPT_CACHEABLE) {
 			struct page *vmpage = cl_page_vmpage(env, clp);
 			struct page *src_page;
@@ -284,14 +287,16 @@
 			kunmap_atomic(src);
 
 			/* make sure page will be added to the transfer by
-			 * cl_io_submit()->...->vvp_page_prep_write(). */
+			 * cl_io_submit()->...->vvp_page_prep_write().
+			 */
 			if (rw == WRITE)
 				set_page_dirty(vmpage);
 
 			if (rw == READ) {
 				/* do not issue the page for read, since it
 				 * may reread a ra page which has NOT uptodate
-				 * bit set. */
+				 * bit set.
+				 */
 				cl_page_disown(env, io, clp);
 				do_io = false;
 			}
@@ -359,7 +364,8 @@
  * kmalloc limit.  We need to fit all of the brw_page structs, each one
  * representing PAGE_SIZE worth of user data, into a single buffer, and
  * then truncate this to be a full-sized RPC.  For 4kB PAGE_SIZE this is
- * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
+ * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
+ */
 #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
 		      ~(DT_MAX_BRW_SIZE - 1))
 static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
@@ -396,7 +402,7 @@
 	env = cl_env_get(&refcheck);
 	LASSERT(!IS_ERR(env));
 	io = ccc_env_io(env)->cui_cl.cis_io;
-	LASSERT(io != NULL);
+	LASSERT(io);
 
 	/* 0. Need locking between buffered and direct access. and race with
 	 *    size changing by concurrent truncates and writes.
@@ -433,7 +439,8 @@
 			 * for the request, shrink it to a smaller
 			 * PAGE_SIZE multiple and try again.
 			 * We should always be able to kmalloc for a
-			 * page worth of page pointers = 4MB on i386. */
+			 * page worth of page pointers = 4MB on i386.
+			 */
 			if (result == -ENOMEM &&
 			    size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
 				   PAGE_CACHE_SIZE) {
@@ -461,7 +468,7 @@
 			struct lov_stripe_md *lsm;
 
 			lsm = ccc_inode_lsm_get(inode);
-			LASSERT(lsm != NULL);
+			LASSERT(lsm);
 			lov_stripe_lock(lsm);
 			obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
 			lov_stripe_unlock(lsm);
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 88ffd8e..d4a3722 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -49,13 +49,13 @@
 
 #define SA_OMITTED_ENTRY_MAX 8ULL
 
-typedef enum {
+enum se_stat {
 	/** negative values are for error cases */
 	SA_ENTRY_INIT = 0,      /** init entry */
 	SA_ENTRY_SUCC = 1,      /** stat succeed */
 	SA_ENTRY_INVA = 2,      /** invalid entry */
 	SA_ENTRY_DEST = 3,      /** entry to be destroyed */
-} se_stat_t;
+};
 
 struct ll_sa_entry {
 	/* link into sai->sai_entries */
@@ -71,7 +71,7 @@
 	/* low layer ldlm lock handle */
 	__u64		   se_handle;
 	/* entry status */
-	se_stat_t	       se_stat;
+	enum se_stat	   se_stat;
 	/* entry size, contains name */
 	int		     se_size;
 	/* pointer to async getattr enqueue info */
@@ -130,7 +130,7 @@
 static inline int agl_should_run(struct ll_statahead_info *sai,
 				 struct inode *inode)
 {
-	return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
+	return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
 }
 
 static inline int sa_sent_full(struct ll_statahead_info *sai)
@@ -366,7 +366,7 @@
  */
 static void
 do_sa_entry_to_stated(struct ll_statahead_info *sai,
-		      struct ll_sa_entry *entry, se_stat_t stat)
+		      struct ll_sa_entry *entry, enum se_stat stat)
 {
 	struct ll_sa_entry *se;
 	struct list_head	 *pos = &sai->sai_entries_stated;
@@ -392,7 +392,7 @@
  */
 static int
 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
-		      struct ll_sa_entry *entry, se_stat_t stat)
+		      struct ll_sa_entry *entry, enum se_stat stat)
 {
 	struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
 	int		   ret = 1;
@@ -494,12 +494,13 @@
 
 		if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
 			/* It is race case, the interpret callback just hold
-			 * a reference count */
+			 * a reference count
+			 */
 			spin_unlock(&lli->lli_sa_lock);
 			return;
 		}
 
-		LASSERT(lli->lli_opendir_key == NULL);
+		LASSERT(!lli->lli_opendir_key);
 		LASSERT(thread_is_stopped(&sai->sai_thread));
 		LASSERT(thread_is_stopped(&sai->sai_agl_thread));
 
@@ -618,20 +619,21 @@
 	it = &minfo->mi_it;
 	req = entry->se_req;
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EFAULT;
 		goto out;
 	}
 
 	child = entry->se_inode;
-	if (child == NULL) {
+	if (!child) {
 		/*
 		 * lookup.
 		 */
 		LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
 
 		/* XXX: No fid in reply, this is probably cross-ref case.
-		 * SA can't handle it yet. */
+		 * SA can't handle it yet.
+		 */
 		if (body->valid & OBD_MD_MDS) {
 			rc = -EAGAIN;
 			goto out;
@@ -672,7 +674,8 @@
 	/* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
 	 * reference count by calling "ll_intent_drop_lock()" in spite of the
 	 * above operations failed or not. Do not worry about calling
-	 * "ll_intent_drop_lock()" more than once. */
+	 * "ll_intent_drop_lock()" more than once.
+	 */
 	rc = ll_sa_entry_to_stated(sai, entry,
 				   rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
 	if (rc == 0 && entry->se_index == sai->sai_index_wait)
@@ -698,14 +701,15 @@
 		/* release ibits lock ASAP to avoid deadlock when statahead
 		 * thread enqueues lock on parent in readdir and another
 		 * process enqueues lock on child with parent lock held, eg.
-		 * unlink. */
+		 * unlink.
+		 */
 		handle = it->d.lustre.it_lock_handle;
 		ll_intent_drop_lock(it);
 	}
 
 	spin_lock(&lli->lli_sa_lock);
 	/* stale entry */
-	if (unlikely(lli->lli_sai == NULL ||
+	if (unlikely(!lli->lli_sai ||
 		     lli->lli_sai->sai_generation != minfo->mi_generation)) {
 		spin_unlock(&lli->lli_sa_lock);
 		rc = -ESTALE;
@@ -720,7 +724,7 @@
 		}
 
 		entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
-		if (entry == NULL) {
+		if (!entry) {
 			sai->sai_replied++;
 			spin_unlock(&lli->lli_sa_lock);
 			rc = -EIDRM;
@@ -736,7 +740,8 @@
 			/* Release the async ibits lock ASAP to avoid deadlock
 			 * when statahead thread tries to enqueue lock on parent
 			 * for readpage and other tries to enqueue lock on child
-			 * with parent's lock held, for example: unlink. */
+			 * with parent's lock held, for example: unlink.
+			 */
 			entry->se_handle = handle;
 			wakeup = list_empty(&sai->sai_entries_received);
 			list_add_tail(&entry->se_list,
@@ -756,7 +761,7 @@
 		iput(dir);
 		kfree(minfo);
 	}
-	if (sai != NULL)
+	if (sai)
 		ll_sai_put(sai);
 	return rc;
 }
@@ -853,7 +858,7 @@
 	struct ldlm_enqueue_info *einfo;
 	int rc;
 
-	if (unlikely(inode == NULL))
+	if (unlikely(!inode))
 		return 1;
 
 	if (d_mountpoint(dentry))
@@ -908,10 +913,9 @@
 		rc = do_sa_revalidate(dir, entry, dentry);
 		if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
 			ll_agl_add(sai, d_inode(dentry), entry->se_index);
-	}
 
-	if (dentry != NULL)
 		dput(dentry);
+	}
 
 	if (rc) {
 		rc1 = ll_sa_entry_to_stated(sai, entry,
@@ -948,7 +952,8 @@
 	if (thread_is_init(thread))
 		/* If someone else has changed the thread state
 		 * (e.g. already changed to SVC_STOPPING), we can't just
-		 * blindly overwrite that setting. */
+		 * blindly overwrite that setting.
+		 */
 		thread_set_flags(thread, SVC_RUNNING);
 	spin_unlock(&plli->lli_agl_lock);
 	wake_up(&thread->t_ctl_waitq);
@@ -964,7 +969,8 @@
 
 		spin_lock(&plli->lli_agl_lock);
 		/* The statahead thread maybe help to process AGL entries,
-		 * so check whether list empty again. */
+		 * so check whether list empty again.
+		 */
 		if (!list_empty(&sai->sai_entries_agl)) {
 			clli = list_entry(sai->sai_entries_agl.next,
 					  struct ll_inode_info, lli_agl_list);
@@ -1049,7 +1055,8 @@
 	if (thread_is_init(thread))
 		/* If someone else has changed the thread state
 		 * (e.g. already changed to SVC_STOPPING), we can't just
-		 * blindly overwrite that setting. */
+		 * blindly overwrite that setting.
+		 */
 		thread_set_flags(thread, SVC_RUNNING);
 	spin_unlock(&plli->lli_sa_lock);
 	wake_up(&thread->t_ctl_waitq);
@@ -1070,7 +1077,7 @@
 		}
 
 		dp = page_address(page);
-		for (ent = lu_dirent_start(dp); ent != NULL;
+		for (ent = lu_dirent_start(dp); ent;
 		     ent = lu_dirent_next(ent)) {
 			__u64 hash;
 			int namelen;
@@ -1137,7 +1144,8 @@
 
 			/* If no window for metadata statahead, but there are
 			 * some AGL entries to be triggered, then try to help
-			 * to process the AGL entries. */
+			 * to process the AGL entries.
+			 */
 			if (sa_sent_full(sai)) {
 				spin_lock(&plli->lli_agl_lock);
 				while (!list_empty(&sai->sai_entries_agl)) {
@@ -1274,7 +1282,7 @@
 {
 	struct ll_inode_info *lli = ll_i2info(dir);
 
-	if (unlikely(key == NULL))
+	if (unlikely(!key))
 		return;
 
 	spin_lock(&lli->lli_sa_lock);
@@ -1357,7 +1365,7 @@
 		}
 
 		dp = page_address(page);
-		for (ent = lu_dirent_start(dp); ent != NULL;
+		for (ent = lu_dirent_start(dp); ent;
 		     ent = lu_dirent_next(ent)) {
 			__u64 hash;
 			int namelen;
@@ -1365,7 +1373,8 @@
 
 			hash = le64_to_cpu(ent->lde_hash);
 			/* The ll_get_dir_page() can return any page containing
-			 * the given hash which may be not the start hash. */
+			 * the given hash which may be not the start hash.
+			 */
 			if (unlikely(hash < pos))
 				continue;
 
@@ -1448,7 +1457,7 @@
 	struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
 	int		   hit;
 
-	if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
+	if (entry && entry->se_stat == SA_ENTRY_SUCC)
 		hit = 1;
 	else
 		hit = 0;
@@ -1498,6 +1507,7 @@
 	struct ll_sa_entry       *entry;
 	struct ptlrpc_thread     *thread;
 	struct l_wait_info	lwi   = { 0 };
+	struct task_struct *task;
 	int		       rc    = 0;
 	struct ll_inode_info     *plli;
 
@@ -1540,7 +1550,7 @@
 		}
 
 		entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
-		if (entry == NULL || only_unplug) {
+		if (!entry || only_unplug) {
 			ll_sai_unplug(sai, entry);
 			return entry ? 1 : -EAGAIN;
 		}
@@ -1559,8 +1569,7 @@
 			}
 		}
 
-		if (entry->se_stat == SA_ENTRY_SUCC &&
-		    entry->se_inode != NULL) {
+		if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
 			struct inode *inode = entry->se_inode;
 			struct lookup_intent it = { .it_op = IT_GETATTR,
 						    .d.lustre.it_lock_handle =
@@ -1570,7 +1579,7 @@
 			rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
 						ll_inode2fid(inode), &bits);
 			if (rc == 1) {
-				if (d_inode(*dentryp) == NULL) {
+				if (!d_inode(*dentryp)) {
 					struct dentry *alias;
 
 					alias = ll_splice_alias(inode,
@@ -1616,14 +1625,14 @@
 	}
 
 	sai = ll_sai_alloc();
-	if (sai == NULL) {
+	if (!sai) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
 	sai->sai_inode = igrab(dir);
-	if (unlikely(sai->sai_inode == NULL)) {
+	if (unlikely(!sai->sai_inode)) {
 		CWARN("Do not start stat ahead on dying inode "DFID"\n",
 		      PFID(&lli->lli_fid));
 		rc = -ESTALE;
@@ -1651,25 +1660,28 @@
 	 * but as soon as we expose the sai by attaching it to the lli that
 	 * default reference can be dropped by another thread calling
 	 * ll_stop_statahead. We need to take a local reference to protect
-	 * the sai buffer while we intend to access it. */
+	 * the sai buffer while we intend to access it.
+	 */
 	ll_sai_get(sai);
 	lli->lli_sai = sai;
 
 	plli = ll_i2info(d_inode(parent));
-	rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
-				 "ll_sa_%u", plli->lli_opendir_pid));
+	task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
+			   plli->lli_opendir_pid);
 	thread = &sai->sai_thread;
-	if (IS_ERR_VALUE(rc)) {
+	if (IS_ERR(task)) {
+		rc = PTR_ERR(task);
 		CERROR("can't start ll_sa thread, rc: %d\n", rc);
 		dput(parent);
 		lli->lli_opendir_key = NULL;
 		thread_set_flags(thread, SVC_STOPPED);
 		thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
 		/* Drop both our own local reference and the default
-		 * reference from allocation time. */
+		 * reference from allocation time.
+		 */
 		ll_sai_put(sai);
 		ll_sai_put(sai);
-		LASSERT(lli->lli_sai == NULL);
+		LASSERT(!lli->lli_sai);
 		return -EAGAIN;
 	}
 
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 86c371e..63f090a 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -54,7 +54,7 @@
 
 	ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
 	lli = kmem_cache_alloc(ll_inode_cachep, GFP_NOFS | __GFP_ZERO);
-	if (lli == NULL)
+	if (!lli)
 		return NULL;
 
 	inode_init_once(&lli->lli_vfs_inode);
@@ -99,7 +99,8 @@
 
 	/* print an address of _any_ initialized kernel symbol from this
 	 * module, to allow debugging with gdb that doesn't support data
-	 * symbols from modules.*/
+	 * symbols from modules.
+	 */
 	CDEBUG(D_INFO, "Lustre client module (%p).\n",
 	       &lustre_super_operations);
 
@@ -108,26 +109,26 @@
 					    sizeof(struct ll_inode_info),
 					    0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
 					    NULL);
-	if (ll_inode_cachep == NULL)
+	if (!ll_inode_cachep)
 		goto out_cache;
 
 	ll_file_data_slab = kmem_cache_create("ll_file_data",
 						 sizeof(struct ll_file_data), 0,
 						 SLAB_HWCACHE_ALIGN, NULL);
-	if (ll_file_data_slab == NULL)
+	if (!ll_file_data_slab)
 		goto out_cache;
 
 	ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
 						  sizeof(struct ll_remote_perm),
 						      0, 0, NULL);
-	if (ll_remote_perm_cachep == NULL)
+	if (!ll_remote_perm_cachep)
 		goto out_cache;
 
 	ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
 						   REMOTE_PERM_HASHSIZE *
 						   sizeof(struct list_head),
 						   0, 0, NULL);
-	if (ll_rmtperm_hash_cachep == NULL)
+	if (!ll_rmtperm_hash_cachep)
 		goto out_cache;
 
 	llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
@@ -146,7 +147,8 @@
 	cfs_get_random_bytes(seed, sizeof(seed));
 
 	/* Nodes with small feet have little entropy. The NID for this
-	 * node gives the most entropy in the low bits */
+	 * node gives the most entropy in the low bits
+	 */
 	for (i = 0;; i++) {
 		if (LNetGetId(i, &lnet_id) == -ENOENT)
 			break;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 2610348..4435a63 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -59,7 +59,8 @@
 		*symname = lli->lli_symlink_name;
 		/* If the total CDEBUG() size is larger than a page, it
 		 * will print a warning to the console, avoid this by
-		 * printing just the last part of the symlink. */
+		 * printing just the last part of the symlink.
+		 */
 		CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
 		       print_limit < symlen ? "..." : "", print_limit,
 		       (*symname) + symlen - print_limit, symlen);
@@ -81,7 +82,6 @@
 	}
 
 	body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
-	LASSERT(body != NULL);
 	if ((body->valid & OBD_MD_LINKNAME) == 0) {
 		CERROR("OBD_MD_LINKNAME not set on reply\n");
 		rc = -EPROTO;
@@ -97,7 +97,7 @@
 	}
 
 	*symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD);
-	if (*symname == NULL ||
+	if (!*symname ||
 	    strnlen(*symname, symlen) != symlen - 1) {
 		/* not full/NULL terminated */
 		CERROR("inode %lu: symlink not NULL terminated string of length %d\n",
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index fdca4ec..6075ccf 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -80,7 +80,7 @@
 	struct vvp_thread_info *info;
 
 	info = kmem_cache_alloc(vvp_thread_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info == NULL)
+	if (!info)
 		info = ERR_PTR(-ENOMEM);
 	return info;
 }
@@ -99,7 +99,7 @@
 	struct vvp_session *session;
 
 	session = kmem_cache_alloc(vvp_session_kmem, GFP_NOFS | __GFP_ZERO);
-	if (session == NULL)
+	if (!session)
 		session = ERR_PTR(-ENOMEM);
 	return session;
 }
@@ -228,7 +228,7 @@
 	if (!IS_ERR(env)) {
 		cld = sbi->ll_cl;
 
-		if (cld != NULL) {
+		if (cld) {
 			cl_stack_fini(env, cld);
 			sbi->ll_cl = NULL;
 			sbi->ll_site = NULL;
@@ -325,11 +325,11 @@
 
 	cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
 				vvp_pgcache_obj_get, id);
-	if (id->vpi_obj != NULL) {
+	if (id->vpi_obj) {
 		struct lu_object *lu_obj;
 
 		lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
-		if (lu_obj != NULL) {
+		if (lu_obj) {
 			lu_object_ref_add(lu_obj, "dump", current);
 			return lu2cl(lu_obj);
 		}
@@ -355,7 +355,7 @@
 		if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
 			return ~0ULL;
 		clob = vvp_pgcache_obj(env, dev, &id);
-		if (clob != NULL) {
+		if (clob) {
 			struct cl_object_header *hdr;
 			int		      nr;
 			struct cl_page	  *pg;
@@ -443,7 +443,7 @@
 		vvp_pgcache_id_unpack(pos, &id);
 		sbi = f->private;
 		clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
-		if (clob != NULL) {
+		if (clob) {
 			hdr = cl_object_header(clob);
 
 			spin_lock(&hdr->coh_page_guard);
@@ -452,7 +452,7 @@
 
 			seq_printf(f, "%8x@"DFID": ",
 				   id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
-			if (page != NULL) {
+			if (page) {
 				vvp_pgcache_page_show(env, f, page);
 				cl_page_put(env, page);
 			} else
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 0920ac6..cca4b6c 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -78,7 +78,8 @@
 	case CIT_READ:
 	case CIT_WRITE:
 		/* don't need lock here to check lli_layout_gen as we have held
-		 * extent lock and GROUP lock has to hold to swap layout */
+		 * extent lock and GROUP lock has to hold to swap layout
+		 */
 		if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
 			io->ci_need_restart = 1;
 			/* this will return application a short read/write */
@@ -134,7 +135,8 @@
 		 */
 		rc = ll_layout_restore(ccc_object_inode(obj));
 		/* if restore registration failed, no restart,
-		 * we will return -ENODATA */
+		 * we will return -ENODATA
+		 */
 		/* The layout will change after restore, so we need to
 		 * block on layout lock hold by the MDT
 		 * as MDT will not send new layout in lvb (see LU-3124)
@@ -164,8 +166,7 @@
 			       DFID" layout changed from %d to %d.\n",
 			       PFID(lu_object_fid(&obj->co_lu)),
 			       cio->cui_layout_gen, gen);
-			/* today successful restore is the only possible
-			 * case */
+			/* today successful restore is the only possible case */
 			/* restore was done, clear restoring state */
 			ll_i2info(ccc_object_inode(obj))->lli_flags &=
 				~LLIF_FILE_RESTORING;
@@ -181,7 +182,7 @@
 
 	CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
 
-	if (page != NULL) {
+	if (page) {
 		lu_ref_del(&page->cp_reference, "fault", io);
 		cl_page_put(env, page);
 		io->u.ci_fault.ft_page = NULL;
@@ -220,11 +221,11 @@
 	if (!cl_is_normalio(env, io))
 		return 0;
 
-	if (vio->cui_iter == NULL) /* nfs or loop back device write */
+	if (!vio->cui_iter) /* nfs or loop back device write */
 		return 0;
 
 	/* No MM (e.g. NFS)? No vmas too. */
-	if (mm == NULL)
+	if (!mm)
 		return 0;
 
 	iov_for_each(iov, i, *(vio->cui_iter)) {
@@ -456,7 +457,8 @@
 
 	if (cl_io_is_trunc(io))
 		/* Truncate in memory pages - they must be clean pages
-		 * because osc has already notified to destroy osc_extents. */
+		 * because osc has already notified to destroy osc_extents.
+		 */
 		vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
 
 	inode_unlock(inode);
@@ -529,7 +531,8 @@
 				vio->u.splice.cui_flags);
 		/* LU-1109: do splice read stripe by stripe otherwise if it
 		 * may make nfsd stuck if this read occupied all internal pipe
-		 * buffers. */
+		 * buffers.
+		 */
 		io->ci_continue = 0;
 		break;
 	default:
@@ -587,7 +590,7 @@
 
 	CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
 
-	if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
+	if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */
 		result = 0;
 	else
 		result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
@@ -673,7 +676,7 @@
 
 	/* must return locked page */
 	if (fio->ft_mkwrite) {
-		LASSERT(cfio->ft_vmpage != NULL);
+		LASSERT(cfio->ft_vmpage);
 		lock_page(cfio->ft_vmpage);
 	} else {
 		result = vvp_io_kernel_fault(cfio);
@@ -689,13 +692,15 @@
 
 	size = i_size_read(inode);
 	/* Though we have already held a cl_lock upon this page, but
-	 * it still can be truncated locally. */
+	 * it still can be truncated locally.
+	 */
 	if (unlikely((vmpage->mapping != inode->i_mapping) ||
 		     (page_offset(vmpage) > size))) {
 		CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
 
 		/* return +1 to stop cl_io_loop() and ll_fault() will catch
-		 * and retry. */
+		 * and retry.
+		 */
 		result = 1;
 		goto out;
 	}
@@ -736,7 +741,8 @@
 	}
 
 	/* if page is going to be written, we should add this page into cache
-	 * earlier. */
+	 * earlier.
+	 */
 	if (fio->ft_mkwrite) {
 		wait_on_page_writeback(vmpage);
 		if (set_page_dirty(vmpage)) {
@@ -750,7 +756,8 @@
 
 			/* Do not set Dirty bit here so that in case IO is
 			 * started before the page is really made dirty, we
-			 * still have chance to detect it. */
+			 * still have chance to detect it.
+			 */
 			result = cl_page_cache_add(env, io, page, CRT_WRITE);
 			LASSERT(cl_page_is_owned(page, io));
 
@@ -792,7 +799,7 @@
 
 out:
 	/* return unlocked vmpage to avoid deadlocking */
-	if (vmpage != NULL)
+	if (vmpage)
 		unlock_page(vmpage);
 	cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
 	return result;
@@ -803,7 +810,8 @@
 {
 	/* we should mark TOWRITE bit to each dirty page in radix tree to
 	 * verify pages have been written, but this is difficult because of
-	 * race. */
+	 * race.
+	 */
 	return 0;
 }
 
@@ -1003,7 +1011,7 @@
 	 *
 	 *     (3) IO is batched up to the RPC size and is async until the
 	 *     client max cache is hit
-	 *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
+	 *     (/sys/fs/lustre/osc/OSC.../max_dirty_mb)
 	 *
 	 */
 	if (!PageDirty(vmpage)) {
@@ -1153,7 +1161,8 @@
 
 		count = io->u.ci_rw.crw_count;
 		/* "If nbyte is 0, read() will return 0 and have no other
-		 *  results."  -- Single Unix Spec */
+		 *  results."  -- Single Unix Spec
+		 */
 		if (count == 0)
 			result = 1;
 		else
@@ -1173,20 +1182,23 @@
 
 	/* ignore layout change for generic CIT_MISC but not for glimpse.
 	 * io context for glimpse must set ci_verify_layout to true,
-	 * see cl_glimpse_size0() for details. */
+	 * see cl_glimpse_size0() for details.
+	 */
 	if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
 		io->ci_ignore_layout = 1;
 
 	/* Enqueue layout lock and get layout version. We need to do this
 	 * even for operations requiring to open file, such as read and write,
-	 * because it might not grant layout lock in IT_OPEN. */
+	 * because it might not grant layout lock in IT_OPEN.
+	 */
 	if (result == 0 && !io->ci_ignore_layout) {
 		result = ll_layout_refresh(inode, &cio->cui_layout_gen);
 		if (result == -ENOENT)
 			/* If the inode on MDS has been removed, but the objects
 			 * on OSTs haven't been destroyed (async unlink), layout
 			 * fetch will return -ENOENT, we'd ignore this error
-			 * and continue with dirty flush. LU-3230. */
+			 * and continue with dirty flush. LU-3230.
+			 */
 			result = 0;
 		if (result < 0)
 			CERROR("%s: refresh file layout " DFID " error %d.\n",
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index c82714e..03c887d 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -137,7 +137,8 @@
 		 * page may be stale due to layout change, and the process
 		 * will never be notified.
 		 * This operation is expensive but mmap processes have to pay
-		 * a price themselves. */
+		 * a price themselves.
+		 */
 		unmap_mapping_range(conf->coc_inode->i_mapping,
 				    0, OBD_OBJECT_EOF, 0);
 
@@ -147,7 +148,7 @@
 	if (conf->coc_opc != OBJECT_CONF_SET)
 		return 0;
 
-	if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) {
+	if (conf->u.coc_md && conf->u.coc_md->lsm) {
 		CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
 		       PFID(&lli->lli_fid), lli->lli_layout_gen,
 		       conf->u.coc_md->lsm->lsm_layout_gen);
@@ -186,9 +187,8 @@
 	struct cl_object     *obj = lli->lli_clob;
 	struct lu_object     *lu;
 
-	LASSERT(obj != NULL);
 	lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
-	LASSERT(lu != NULL);
+	LASSERT(lu);
 	return lu2ccc(lu);
 }
 
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index a133475..f0b26e3 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -56,7 +56,7 @@
 {
 	struct page *vmpage = cp->cpg_page;
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	page_cache_release(vmpage);
 }
 
@@ -81,7 +81,7 @@
 	struct ccc_page *vpg    = cl2ccc_page(slice);
 	struct page      *vmpage = vpg->cpg_page;
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	if (nonblock) {
 		if (!trylock_page(vmpage))
 			return -EAGAIN;
@@ -105,7 +105,7 @@
 {
 	struct page *vmpage = cl2vm_page(slice);
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 	wait_on_page_writeback(vmpage);
 }
@@ -116,7 +116,7 @@
 {
 	struct page *vmpage = cl2vm_page(slice);
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 }
 
@@ -125,7 +125,7 @@
 {
 	struct page *vmpage = cl2vm_page(slice);
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 
 	unlock_page(cl2vm_page(slice));
@@ -139,7 +139,7 @@
 	struct address_space *mapping;
 	struct ccc_page      *cpg     = cl2ccc_page(slice);
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 
 	mapping = vmpage->mapping;
@@ -161,7 +161,7 @@
 	struct page *vmpage = cl2vm_page(slice);
 	__u64       offset;
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 
 	offset = vmpage->index << PAGE_CACHE_SHIFT;
@@ -199,7 +199,7 @@
 {
 	struct page *vmpage = cl2vm_page(slice);
 
-	LASSERT(vmpage != NULL);
+	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 	if (uptodate)
 		SetPageUptodate(vmpage);
@@ -232,7 +232,8 @@
 	LASSERT(!PageDirty(vmpage));
 
 	/* ll_writepage path is not a sync write, so need to set page writeback
-	 * flag */
+	 * flag
+	 */
 	if (!pg->cp_sync_io)
 		set_page_writeback(vmpage);
 
@@ -290,7 +291,7 @@
 	} else
 		cp->cpg_defer_uptodate = 0;
 
-	if (page->cp_sync_io == NULL)
+	if (!page->cp_sync_io)
 		unlock_page(vmpage);
 }
 
@@ -317,7 +318,7 @@
 	cp->cpg_write_queued = 0;
 	vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
 
-	if (pg->cp_sync_io != NULL) {
+	if (pg->cp_sync_io) {
 		LASSERT(PageLocked(vmpage));
 		LASSERT(!PageWriteback(vmpage));
 	} else {
@@ -356,15 +357,15 @@
 	lock_page(vmpage);
 	if (clear_page_dirty_for_io(vmpage)) {
 		LASSERT(pg->cp_state == CPS_CACHED);
-		/* This actually clears the dirty bit in the radix
-		 * tree. */
+		/* This actually clears the dirty bit in the radix tree. */
 		set_page_writeback(vmpage);
 		vvp_write_pending(cl2ccc(slice->cpl_obj),
 				cl2ccc_page(slice));
 		CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
 	} else if (pg->cp_state == CPS_PAGEOUT) {
 		/* is it possible for osc_flush_async_page() to already
-		 * make it ready? */
+		 * make it ready?
+		 */
 		result = -EALREADY;
 	} else {
 		CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
@@ -385,7 +386,7 @@
 	(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
 		   vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
 		   vp->cpg_write_queued, vmpage);
-	if (vmpage != NULL) {
+	if (vmpage) {
 		(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
 			   (long)vmpage->flags, page_count(vmpage),
 			   page_mapcount(vmpage), vmpage->private,
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 8eb43f1..c34df37 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -148,7 +148,7 @@
 	    (xattr_type == XATTR_ACL_ACCESS_T ||
 	    xattr_type == XATTR_ACL_DEFAULT_T)) {
 		rce = rct_search(&sbi->ll_rct, current_pid());
-		if (rce == NULL ||
+		if (!rce ||
 		    (rce->rce_ops != RMT_LSETFACL &&
 		    rce->rce_ops != RMT_RSETFACL))
 			return -EOPNOTSUPP;
@@ -158,7 +158,6 @@
 
 			ee = et_search_del(&sbi->ll_et, current_pid(),
 					   ll_inode2fid(inode), xattr_type);
-			LASSERT(ee != NULL);
 			if (valid & OBD_MD_FLXATTR) {
 				acl = lustre_acl_xattr_merge2ext(
 						(posix_acl_xattr_header *)value,
@@ -192,12 +191,11 @@
 			 valid, name, pv, size, 0, flags,
 			 ll_i2suppgid(inode), &req);
 #ifdef CONFIG_FS_POSIX_ACL
-	if (new_value != NULL)
-		/*
-		 * Release the posix ACL space.
-		 */
-		kfree(new_value);
-	if (acl != NULL)
+	/*
+	 * Release the posix ACL space.
+	 */
+	kfree(new_value);
+	if (acl)
 		lustre_ext_acl_xattr_free(acl);
 #endif
 	if (rc) {
@@ -239,11 +237,12 @@
 
 		/* Attributes that are saved via getxattr will always have
 		 * the stripe_offset as 0.  Instead, the MDS should be
-		 * allowed to pick the starting OST index.   b=17846 */
-		if (lump != NULL && lump->lmm_stripe_offset == 0)
+		 * allowed to pick the starting OST index.   b=17846
+		 */
+		if (lump && lump->lmm_stripe_offset == 0)
 			lump->lmm_stripe_offset = -1;
 
-		if (lump != NULL && S_ISREG(inode->i_mode)) {
+		if (lump && S_ISREG(inode->i_mode)) {
 			int flags = FMODE_WRITE;
 			int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
 				sizeof(*lump) : sizeof(struct lov_user_md_v3);
@@ -325,7 +324,7 @@
 	    (xattr_type == XATTR_ACL_ACCESS_T ||
 	    xattr_type == XATTR_ACL_DEFAULT_T)) {
 		rce = rct_search(&sbi->ll_rct, current_pid());
-		if (rce == NULL ||
+		if (!rce ||
 		    (rce->rce_ops != RMT_LSETFACL &&
 		    rce->rce_ops != RMT_LGETFACL &&
 		    rce->rce_ops != RMT_RSETFACL &&
@@ -366,7 +365,7 @@
 			goto out_xattr;
 
 		/* Add "system.posix_acl_access" to the list */
-		if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
+		if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) {
 			if (size == 0) {
 				rc += sizeof(XATTR_NAME_ACL_ACCESS);
 			} else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
@@ -482,13 +481,14 @@
 
 		if (size == 0 && S_ISDIR(inode->i_mode)) {
 			/* XXX directory EA is fix for now, optimize to save
-			 * RPC transfer */
+			 * RPC transfer
+			 */
 			rc = sizeof(struct lov_user_md);
 			goto out;
 		}
 
 		lsm = ccc_inode_lsm_get(inode);
-		if (lsm == NULL) {
+		if (!lsm) {
 			if (S_ISDIR(inode->i_mode)) {
 				rc = ll_dir_getstripe(inode, &lmm,
 						      &lmmsize, &request);
@@ -497,7 +497,8 @@
 			}
 		} else {
 			/* LSM is present already after lookup/getattr call.
-			 * we need to grab layout lock once it is implemented */
+			 * we need to grab layout lock once it is implemented
+			 */
 			rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
 			lmmsize = rc;
 		}
@@ -510,7 +511,8 @@
 			/* used to call ll_get_max_mdsize() forward to get
 			 * the maximum buffer size, while some apps (such as
 			 * rsync 3.0.x) care much about the exact xattr value
-			 * size */
+			 * size
+			 */
 			rc = lmmsize;
 			goto out;
 		}
@@ -526,7 +528,8 @@
 		memcpy(lump, lmm, lmmsize);
 		/* do not return layout gen for getxattr otherwise it would
 		 * confuse tar --xattr by recognizing layout gen as stripe
-		 * offset when the file is restored. See LU-2809. */
+		 * offset when the file is restored. See LU-2809.
+		 */
 		lump->lmm_layout_gen = 0;
 
 		rc = lmmsize;
@@ -560,7 +563,7 @@
 	if (rc < 0)
 		goto out;
 
-	if (buffer != NULL) {
+	if (buffer) {
 		struct ll_sb_info *sbi = ll_i2sbi(inode);
 		char *xattr_name = buffer;
 		int xlen, rem = rc;
@@ -598,12 +601,12 @@
 		const size_t name_len   = sizeof("lov") - 1;
 		const size_t total_len  = prefix_len + name_len + 1;
 
-		if (((rc + total_len) > size) && (buffer != NULL)) {
+		if (((rc + total_len) > size) && buffer) {
 			ptlrpc_req_finished(request);
 			return -ERANGE;
 		}
 
-		if (buffer != NULL) {
+		if (buffer) {
 			buffer += rc;
 			memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
 			memcpy(buffer + prefix_len, "lov", name_len);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index d140276..460b7c5 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -23,7 +23,8 @@
  */
 struct ll_xattr_entry {
 	struct list_head	xe_list;    /* protected with
-					     * lli_xattrs_list_rwsem */
+					     * lli_xattrs_list_rwsem
+					     */
 	char			*xe_name;   /* xattr name, \0-terminated */
 	char			*xe_value;  /* xattr value */
 	unsigned		xe_namelen; /* strlen(xe_name) + 1 */
@@ -59,9 +60,6 @@
  */
 static void ll_xattr_cache_init(struct ll_inode_info *lli)
 {
-
-	LASSERT(lli != NULL);
-
 	INIT_LIST_HEAD(&lli->lli_xattrs);
 	lli->lli_flags |= LLIF_XATTR_CACHE;
 }
@@ -83,8 +81,7 @@
 
 	list_for_each_entry(entry, cache, xe_list) {
 		/* xattr_name == NULL means look for any entry */
-		if (xattr_name == NULL ||
-		    strcmp(xattr_name, entry->xe_name) == 0) {
+		if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
 			*xattr = entry;
 			CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
 			       entry->xe_name, entry->xe_vallen,
@@ -118,7 +115,7 @@
 	}
 
 	xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO);
-	if (xattr == NULL) {
+	if (!xattr) {
 		CDEBUG(D_CACHE, "failed to allocate xattr\n");
 		return -ENOMEM;
 	}
@@ -270,7 +267,7 @@
 				  struct lookup_intent *oit,
 				  struct ptlrpc_request **req)
 {
-	ldlm_mode_t mode;
+	enum ldlm_mode mode;
 	struct lustre_handle lockh = { 0 };
 	struct md_op_data *op_data;
 	struct ll_inode_info *lli = ll_i2info(inode);
@@ -284,7 +281,8 @@
 
 	mutex_lock(&lli->lli_xattrs_enq_lock);
 	/* inode may have been shrunk and recreated, so data is gone, match lock
-	 * only when data exists. */
+	 * only when data exists.
+	 */
 	if (ll_xattr_cache_valid(lli)) {
 		/* Try matching first. */
 		mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
@@ -359,7 +357,7 @@
 	}
 
 	/* Matched but no cache? Cancelled on error by a parallel refill. */
-	if (unlikely(req == NULL)) {
+	if (unlikely(!req)) {
 		CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
 		rc = -EIO;
 		goto out_maybe_drop;
@@ -376,7 +374,7 @@
 	}
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL) {
+	if (!body) {
 		CERROR("no MDT BODY in the refill xattr reply\n");
 		rc = -EPROTO;
 		goto out_destroy;
@@ -388,7 +386,7 @@
 						body->aclsize);
 	xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
 					      body->max_mdsize * sizeof(__u32));
-	if (xdata == NULL || xval == NULL || xsizes == NULL) {
+	if (!xdata || !xval || !xsizes) {
 		CERROR("wrong setxattr reply\n");
 		rc = -EPROTO;
 		goto out_destroy;
@@ -404,7 +402,7 @@
 	for (i = 0; i < body->max_mdsize; i++) {
 		CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
 		/* Perform consistency checks: attr names and vals in pill */
-		if (memchr(xdata, 0, xtail - xdata) == NULL) {
+		if (!memchr(xdata, 0, xtail - xdata)) {
 			CERROR("xattr protocol violation (names are broken)\n");
 			rc = -EPROTO;
 		} else if (xval + *xsizes > xvtail) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index ee23592..378691b 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -58,7 +58,8 @@
 	int rc;
 
 	/* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
-	 * this fid_is_local check should be removed once LU-2240 is fixed */
+	 * this fid_is_local check should be removed once LU-2240 is fixed
+	 */
 	LASSERTF((fid_seq_in_fldb(fid_seq(fid)) ||
 		  fid_seq_is_local_file(fid_seq(fid))) &&
 		 fid_is_sane(fid), DFID" is insane!\n", PFID(fid));
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 66de27f..259b211 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -69,7 +69,7 @@
 	int			rc = 0;
 
 	body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL)
+	if (!body)
 		return -EPROTO;
 
 	LASSERT((body->valid & OBD_MD_MDS));
@@ -107,14 +107,16 @@
 
 	op_data->op_fid1 = body->fid1;
 	/* Sent the parent FID to the remote MDT */
-	if (parent_fid != NULL) {
+	if (parent_fid) {
 		/* The parent fid is only for remote open to
 		 * check whether the open is from OBF,
-		 * see mdt_cross_open */
+		 * see mdt_cross_open
+		 */
 		LASSERT(it->it_op & IT_OPEN);
 		op_data->op_fid2 = *parent_fid;
 		/* Add object FID to op_fid3, in case it needs to check stale
-		 * (M_CHECK_STALE), see mdc_finish_intent_lock */
+		 * (M_CHECK_STALE), see mdc_finish_intent_lock
+		 */
 		op_data->op_fid3 = body->fid1;
 	}
 
@@ -173,7 +175,8 @@
 		return PTR_ERR(tgt);
 
 	/* If it is ready to open the file by FID, do not need
-	 * allocate FID at all, otherwise it will confuse MDT */
+	 * allocate FID at all, otherwise it will confuse MDT
+	 */
 	if ((it->it_op & IT_CREAT) &&
 	    !(it->it_flags & MDS_OPEN_BY_FID)) {
 		/*
@@ -204,7 +207,7 @@
 		return rc;
 
 	body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL)
+	if (!body)
 		return -EPROTO;
 	/*
 	 * Not cross-ref case, just get out of here.
@@ -270,7 +273,7 @@
 	rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
 			     flags, reqp, cb_blocking, extra_lock_flags);
 
-	if (rc < 0 || *reqp == NULL)
+	if (rc < 0 || !*reqp)
 		return rc;
 
 	/*
@@ -278,7 +281,7 @@
 	 * remote inode. Let's check this.
 	 */
 	body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL)
+	if (!body)
 		return -EPROTO;
 	/* Not cross-ref case, just get out of here. */
 	if (likely(!(body->valid & OBD_MD_MDS)))
@@ -299,7 +302,6 @@
 	struct obd_device *obd = exp->exp_obd;
 	int		rc;
 
-	LASSERT(it != NULL);
 	LASSERT(fid_is_sane(&op_data->op_fid1));
 
 	CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n",
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index eb8e673..041d30f33 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -66,7 +66,7 @@
 	struct mdt_body	 *body;
 	struct lmv_stripe_md    *mea;
 
-	LASSERT(req != NULL);
+	LASSERT(req);
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
 
@@ -75,8 +75,6 @@
 
 	mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD,
 					   body->eadatasize);
-	LASSERT(mea != NULL);
-
 	if (mea->mea_count == 0)
 		return NULL;
 	if (mea->mea_magic != MEA_MAGIC_LAST_CHAR &&
@@ -101,7 +99,7 @@
 	int i;
 
 	for (i = 0; i < count; i++) {
-		if (lmv->tgts[i] == NULL)
+		if (!lmv->tgts[i])
 			continue;
 
 		if (lmv->tgts[i]->ltd_idx == mds)
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index bbafe0a..67746c9 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -53,6 +53,7 @@
 #include "../include/lprocfs_status.h"
 #include "../include/lustre_lite.h"
 #include "../include/lustre_fid.h"
+#include "../include/lustre_kernelcomm.h"
 #include "lmv_internal.h"
 
 static void lmv_activate_target(struct lmv_obd *lmv,
@@ -87,7 +88,7 @@
 	spin_lock(&lmv->lmv_lock);
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 		tgt = lmv->tgts[i];
-		if (tgt == NULL || tgt->ltd_exp == NULL)
+		if (!tgt || !tgt->ltd_exp)
 			continue;
 
 		CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i,
@@ -103,7 +104,7 @@
 	}
 
 	obd = class_exp2obd(tgt->ltd_exp);
-	if (obd == NULL) {
+	if (!obd) {
 		rc = -ENOTCONN;
 		goto out_lmv_lock;
 	}
@@ -261,7 +262,7 @@
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 		tgt = lmv->tgts[i];
-		if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
+		if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
 			continue;
 
 		obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
@@ -301,8 +302,7 @@
 		return 0;
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL ||
-		    lmv->tgts[i]->ltd_exp == NULL ||
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
 		    lmv->tgts[i]->ltd_active == 0) {
 			CWARN("%s: NULL export for %d\n", obd->obd_name, i);
 			continue;
@@ -409,7 +409,7 @@
 
 static void lmv_del_target(struct lmv_obd *lmv, int index)
 {
-	if (lmv->tgts[index] == NULL)
+	if (!lmv->tgts[index])
 		return;
 
 	kfree(lmv->tgts[index]);
@@ -441,7 +441,7 @@
 		}
 	}
 
-	if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
+	if ((index < lmv->tgts_size) && lmv->tgts[index]) {
 		tgt = lmv->tgts[index];
 		CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
 		       obd->obd_name,
@@ -459,7 +459,7 @@
 		while (newsize < index + 1)
 			newsize <<= 1;
 		newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
-		if (newtgts == NULL) {
+		if (!newtgts) {
 			lmv_init_unlock(lmv);
 			return -ENOMEM;
 		}
@@ -538,11 +538,9 @@
 	CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
 	       lmv->cluuid.uuid, obd->obd_name);
 
-	LASSERT(lmv->tgts != NULL);
-
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 		tgt = lmv->tgts[i];
-		if (tgt == NULL)
+		if (!tgt)
 			continue;
 		rc = lmv_connect_mdc(obd, tgt);
 		if (rc)
@@ -562,7 +560,7 @@
 		int rc2;
 
 		tgt = lmv->tgts[i];
-		if (tgt == NULL)
+		if (!tgt)
 			continue;
 		tgt->ltd_active = 0;
 		if (tgt->ltd_exp) {
@@ -585,9 +583,6 @@
 	struct obd_device      *mdc_obd;
 	int		     rc;
 
-	LASSERT(tgt != NULL);
-	LASSERT(obd != NULL);
-
 	mdc_obd = class_exp2obd(tgt->ltd_exp);
 
 	if (mdc_obd) {
@@ -640,7 +635,7 @@
 		goto out_local;
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
 			continue;
 
 		lmv_disconnect_mdc(obd, lmv->tgts[i]);
@@ -662,7 +657,8 @@
 	return rc;
 }
 
-static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
+static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
+			void __user *uarg)
 {
 	struct obd_device	*obddev = class_exp2obd(exp);
 	struct lmv_obd		*lmv = &obddev->u.lmv;
@@ -683,8 +679,9 @@
 		goto out_fid2path;
 
 	/* If remote_gf != NULL, it means just building the
-	 * path on the remote MDT, copy this path segment to gf */
-	if (remote_gf != NULL) {
+	 * path on the remote MDT, copy this path segment to gf
+	 */
+	if (remote_gf) {
 		struct getinfo_fid2path *ori_gf;
 		char *ptr;
 
@@ -714,7 +711,7 @@
 		goto out_fid2path;
 
 	/* sigh, has to go to another MDT to do path building further */
-	if (remote_gf == NULL) {
+	if (!remote_gf) {
 		remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
 		remote_gf = kzalloc(remote_gf_size, GFP_NOFS);
 		if (!remote_gf) {
@@ -792,14 +789,17 @@
 }
 
 static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
-				 struct lustre_kernelcomm *lk, void *uarg)
+				 struct lustre_kernelcomm *lk,
+				 void __user *uarg)
 {
-	int	i, rc = 0;
+	int rc = 0;
+	__u32 i;
 
 	/* unregister request (call from llapi_hsm_copytool_fini) */
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 		/* best effort: try to clean as much as possible
-		 * (continue on error) */
+		 * (continue on error)
+		 */
 		obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
 	}
 
@@ -808,20 +808,23 @@
 	 * and will unregister automatically.
 	 */
 	rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
+
 	return rc;
 }
 
 static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
-			       struct lustre_kernelcomm *lk, void *uarg)
+			       struct lustre_kernelcomm *lk, void __user *uarg)
 {
-	struct file	*filp;
-	int		 i, j, err;
-	int		 rc = 0;
-	bool		 any_set = false;
+	struct file *filp;
+	__u32 i, j;
+	int err, rc = 0;
+	bool any_set = false;
+	struct kkuc_ct_data kcd = { 0 };
 
 	/* All or nothing: try to register to all MDS.
 	 * In case of failure, unregister from previous MDS,
-	 * except if it because of inactive target. */
+	 * except if it because of inactive target.
+	 */
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 		err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
 				   len, lk, uarg);
@@ -841,8 +844,8 @@
 				return rc;
 			}
 			/* else: transient error.
-			 * kuc will register to the missing MDT
-			 * when it is back */
+			 * kuc will register to the missing MDT when it is back
+			 */
 		} else {
 			any_set = true;
 		}
@@ -854,17 +857,25 @@
 
 	/* at least one registration done, with no failure */
 	filp = fget(lk->lk_wfd);
-	if (filp == NULL) {
+	if (!filp)
 		return -EBADF;
+
+	kcd.kcd_magic = KKUC_CT_DATA_MAGIC;
+	kcd.kcd_uuid = lmv->cluuid;
+	kcd.kcd_archive = lk->lk_data;
+
+	rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group,
+				   &kcd, sizeof(kcd));
+	if (rc) {
+		if (filp)
+			fput(filp);
 	}
-	rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data);
-	if (rc != 0 && filp != NULL)
-		fput(filp);
+
 	return rc;
 }
 
 static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
-			 int len, void *karg, void *uarg)
+			 int len, void *karg, void __user *uarg)
 {
 	struct obd_device    *obddev = class_exp2obd(exp);
 	struct lmv_obd       *lmv = &obddev->u.lmv;
@@ -887,8 +898,7 @@
 		if (index >= count)
 			return -ENODEV;
 
-		if (lmv->tgts[index] == NULL ||
-		    lmv->tgts[index]->ltd_active == 0)
+		if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0)
 			return -ENODATA;
 
 		mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
@@ -922,18 +932,18 @@
 				return -EINVAL;
 
 			tgt = lmv->tgts[qctl->qc_idx];
-			if (tgt == NULL || tgt->ltd_exp == NULL)
+			if (!tgt || !tgt->ltd_exp)
 				return -EINVAL;
 		} else if (qctl->qc_valid == QC_UUID) {
 			for (i = 0; i < count; i++) {
 				tgt = lmv->tgts[i];
-				if (tgt == NULL)
+				if (!tgt)
 					continue;
 				if (!obd_uuid_equals(&tgt->ltd_uuid,
 						     &qctl->obd_uuid))
 					continue;
 
-				if (tgt->ltd_exp == NULL)
+				if (!tgt->ltd_exp)
 					return -EINVAL;
 
 				break;
@@ -967,8 +977,8 @@
 		if (icc->icc_mdtindex >= count)
 			return -ENODEV;
 
-		if (lmv->tgts[icc->icc_mdtindex] == NULL ||
-		    lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL ||
+		if (!lmv->tgts[icc->icc_mdtindex] ||
+		    !lmv->tgts[icc->icc_mdtindex]->ltd_exp ||
 		    lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
 			return -ENODEV;
 		rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
@@ -976,7 +986,7 @@
 		break;
 	}
 	case LL_IOC_GET_CONNECT_FLAGS: {
-		if (lmv->tgts[0] == NULL)
+		if (!lmv->tgts[0])
 			return -ENODATA;
 		rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
 		break;
@@ -993,10 +1003,10 @@
 
 		tgt = lmv_find_target(lmv, &op_data->op_fid1);
 		if (IS_ERR(tgt))
-				return PTR_ERR(tgt);
+			return PTR_ERR(tgt);
 
-		if (tgt->ltd_exp == NULL)
-				return -EINVAL;
+		if (!tgt->ltd_exp)
+			return -EINVAL;
 
 		rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
 		break;
@@ -1021,7 +1031,8 @@
 
 		/* if the request is about a single fid
 		 * or if there is a single MDS, no need to split
-		 * the request. */
+		 * the request.
+		 */
 		if (reqcount == 1 || count == 1) {
 			tgt = lmv_find_target(lmv,
 					      &hur->hur_user_item[0].hui_fid);
@@ -1044,7 +1055,7 @@
 						  hur_user_item[nr])
 					 + hur->hur_request.hr_data_len;
 				req = libcfs_kvzalloc(reqlen, GFP_NOFS);
-				if (req == NULL)
+				if (!req)
 					return -ENOMEM;
 
 				lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
@@ -1070,7 +1081,7 @@
 		if (IS_ERR(tgt2))
 			return PTR_ERR(tgt2);
 
-		if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
+		if (!tgt1->ltd_exp || !tgt2->ltd_exp)
 			return -EINVAL;
 
 		/* only files on same MDT can have their layouts swapped */
@@ -1094,11 +1105,11 @@
 			struct obd_device *mdc_obd;
 			int err;
 
-			if (lmv->tgts[i] == NULL ||
-			    lmv->tgts[i]->ltd_exp == NULL)
+			if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
 				continue;
 			/* ll_umount_begin() sets force flag but for lmv, not
-			 * mdc. Let's pass it through */
+			 * mdc. Let's pass it through
+			 */
 			mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
 			mdc_obd->obd_force = obddev->obd_force;
 			err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
@@ -1175,7 +1186,7 @@
 {
 	struct lmv_obd	  *lmv = &obd->u.lmv;
 
-	LASSERT(mds != NULL);
+	LASSERT(mds);
 
 	if (lmv->desc.ld_tgt_count == 1) {
 		*mds = 0;
@@ -1205,7 +1216,8 @@
 	}
 
 	/* Allocate new fid on target according to operation type and parent
-	 * home mds. */
+	 * home mds.
+	 */
 	*mds = op_data->op_mds;
 	return 0;
 }
@@ -1225,7 +1237,7 @@
 	 */
 	mutex_lock(&tgt->ltd_fid_mutex);
 
-	if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) {
+	if (tgt->ltd_active == 0 || !tgt->ltd_exp) {
 		rc = -ENODEV;
 		goto out;
 	}
@@ -1252,8 +1264,8 @@
 	u32		       mds = 0;
 	int		    rc;
 
-	LASSERT(op_data != NULL);
-	LASSERT(fid != NULL);
+	LASSERT(op_data);
+	LASSERT(fid);
 
 	rc = lmv_placement_policy(obd, op_data, &mds);
 	if (rc) {
@@ -1291,7 +1303,7 @@
 	}
 
 	lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS);
-	if (lmv->tgts == NULL)
+	if (!lmv->tgts)
 		return -ENOMEM;
 	lmv->tgts_size = 32;
 
@@ -1332,11 +1344,11 @@
 	struct lmv_obd   *lmv = &obd->u.lmv;
 
 	fld_client_fini(&lmv->lmv_fld);
-	if (lmv->tgts != NULL) {
+	if (lmv->tgts) {
 		int i;
 
 		for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-			if (lmv->tgts[i] == NULL)
+			if (!lmv->tgts[i])
 				continue;
 			lmv_del_target(lmv, i);
 		}
@@ -1357,7 +1369,8 @@
 	switch (lcfg->lcfg_command) {
 	case LCFG_ADD_MDC:
 		/* modify_mdc_tgts add 0:lustre-clilmv  1:lustre-MDT0000_UUID
-		 * 2:0  3:1  4:lustre-MDT0000-mdc_UUID */
+		 * 2:0  3:1  4:lustre-MDT0000-mdc_UUID
+		 */
 		if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) {
 			rc = -EINVAL;
 			goto out;
@@ -1402,7 +1415,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
 			continue;
 
 		rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp,
@@ -1421,7 +1434,8 @@
 			 * i.e. mount does not need the merged osfs
 			 * from all of MDT.
 			 * And also clients can be mounted as long as
-			 * MDT0 is in service*/
+			 * MDT0 is in service
+			 */
 			if (flags & OBD_STATFS_FOR_MDT0)
 				goto out_free_temp;
 		} else {
@@ -1547,7 +1561,7 @@
 	 * space of MDT storing inode.
 	 */
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
 			continue;
 		md_null_inode(lmv->tgts[i]->ltd_exp, fid);
 	}
@@ -1575,7 +1589,7 @@
 	 * space of MDT storing inode.
 	 */
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
 			continue;
 		rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
 		if (rc)
@@ -1655,7 +1669,7 @@
 		       cap_effective, rdev, request);
 
 	if (rc == 0) {
-		if (*request == NULL)
+		if (!*request)
 			return rc;
 		CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
 	}
@@ -1701,7 +1715,6 @@
 	int			 pmode;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	LASSERT(body != NULL);
 
 	if (!(body->valid & OBD_MD_MDS))
 		return 0;
@@ -1808,7 +1821,6 @@
 
 	body = req_capsule_server_get(&(*request)->rq_pill,
 				      &RMF_MDT_BODY);
-	LASSERT(body != NULL);
 
 	if (body->valid & OBD_MD_MDS) {
 		struct lu_fid rid = body->fid1;
@@ -1842,7 +1854,8 @@
 	 NULL)
 
 static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
-			    int op_tgt, ldlm_mode_t mode, int bits, int flag)
+			    int op_tgt, enum ldlm_mode mode, int bits,
+			    int flag)
 {
 	struct lu_fid	  *fid = md_op_data_fid(op_data, flag);
 	struct obd_device      *obd = exp->exp_obd;
@@ -2097,7 +2110,7 @@
 
 		while (--nlupgs > 0) {
 			ent = lu_dirent_start(dp);
-			for (end_dirent = ent; ent != NULL;
+			for (end_dirent = ent; ent;
 			     end_dirent = ent, ent = lu_dirent_next(ent))
 				;
 
@@ -2117,7 +2130,8 @@
 				break;
 
 			/* Enlarge the end entry lde_reclen from 0 to
-			 * first entry of next lu_dirpage. */
+			 * first entry of next lu_dirpage.
+			 */
 			LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
 			end_dirent->lde_reclen =
 				cpu_to_le16((char *)(dp->ldp_entries) -
@@ -2227,7 +2241,7 @@
 		return rc;
 
 	body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL)
+	if (!body)
 		return -EPROTO;
 
 	/* Not cross-ref case, just get out of here. */
@@ -2255,7 +2269,8 @@
 	 * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times).
 	 *
 	 * In theory, it might try unlimited time here, but it should
-	 * be very rare case.  */
+	 * be very rare case.
+	 */
 	op_data->op_fid2 = body->fid1;
 	ptlrpc_req_finished(*request);
 	*request = NULL;
@@ -2270,7 +2285,8 @@
 	switch (stage) {
 	case OBD_CLEANUP_EARLY:
 		/* XXX: here should be calling obd_precleanup() down to
-		 * stack. */
+		 * stack.
+		 */
 		break;
 	case OBD_CLEANUP_EXPORTS:
 		fld_client_debugfs_fini(&lmv->lmv_fld);
@@ -2291,7 +2307,7 @@
 	int		      rc = 0;
 
 	obd = class_exp2obd(exp);
-	if (obd == NULL) {
+	if (!obd) {
 		CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
 		       exp->exp_handle.h_cookie);
 		return -EINVAL;
@@ -2312,7 +2328,7 @@
 			/*
 			 * All tgts should be connected when this gets called.
 			 */
-			if (tgt == NULL || tgt->ltd_exp == NULL)
+			if (!tgt || !tgt->ltd_exp)
 				continue;
 
 			if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
@@ -2355,7 +2371,7 @@
 	int rc = 0;
 
 	obd = class_exp2obd(exp);
-	if (obd == NULL) {
+	if (!obd) {
 		CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
 		       exp->exp_handle.h_cookie);
 		return -EINVAL;
@@ -2368,7 +2384,7 @@
 		for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 			tgt = lmv->tgts[i];
 
-			if (tgt == NULL || tgt->ltd_exp == NULL)
+			if (!tgt || !tgt->ltd_exp)
 				continue;
 
 			err = obd_set_info_async(env, tgt->ltd_exp,
@@ -2403,9 +2419,9 @@
 		return 0;
 	}
 
-	if (*lmmp == NULL) {
+	if (!*lmmp) {
 		*lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS);
-		if (*lmmp == NULL)
+		if (!*lmmp)
 			return -ENOMEM;
 	}
 
@@ -2443,10 +2459,10 @@
 	__u32		       magic;
 
 	mea_size = lmv_get_easize(lmv);
-	if (lsmp == NULL)
+	if (!lsmp)
 		return mea_size;
 
-	if (*lsmp != NULL && lmm == NULL) {
+	if (*lsmp && !lmm) {
 		kvfree(*tmea);
 		*lsmp = NULL;
 		return 0;
@@ -2455,7 +2471,7 @@
 	LASSERT(mea_size == lmm_size);
 
 	*tmea = libcfs_kvzalloc(mea_size, GFP_NOFS);
-	if (*tmea == NULL)
+	if (!*tmea)
 		return -ENOMEM;
 
 	if (!lmm)
@@ -2485,8 +2501,8 @@
 }
 
 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
-			     ldlm_policy_data_t *policy, ldlm_mode_t mode,
-			     ldlm_cancel_flags_t flags, void *opaque)
+			     ldlm_policy_data_t *policy, enum ldlm_mode mode,
+			     enum ldlm_cancel_flags flags, void *opaque)
 {
 	struct obd_device       *obd = exp->exp_obd;
 	struct lmv_obd	  *lmv = &obd->u.lmv;
@@ -2494,10 +2510,10 @@
 	int		      err;
 	int		      i;
 
-	LASSERT(fid != NULL);
+	LASSERT(fid);
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL ||
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
 		    lmv->tgts[i]->ltd_active == 0)
 			continue;
 
@@ -2519,14 +2535,16 @@
 	return rc;
 }
 
-static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
-				  const struct lu_fid *fid, ldlm_type_t type,
-				  ldlm_policy_data_t *policy, ldlm_mode_t mode,
-				  struct lustre_handle *lockh)
+static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
+				     const struct lu_fid *fid,
+				     enum ldlm_type type,
+				     ldlm_policy_data_t *policy,
+				     enum ldlm_mode mode,
+				     struct lustre_handle *lockh)
 {
 	struct obd_device       *obd = exp->exp_obd;
 	struct lmv_obd	  *lmv = &obd->u.lmv;
-	ldlm_mode_t	      rc;
+	enum ldlm_mode	      rc;
 	int		      i;
 
 	CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
@@ -2538,8 +2556,7 @@
 	 * one fid was created in.
 	 */
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		if (lmv->tgts[i] == NULL ||
-		    lmv->tgts[i]->ltd_exp == NULL ||
+		if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
 		    lmv->tgts[i]->ltd_active == 0)
 			continue;
 
@@ -2695,7 +2712,7 @@
 
 		tgt = lmv->tgts[i];
 
-		if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
+		if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
 			continue;
 		if (!tgt->ltd_active) {
 			CDEBUG(D_HA, "mdt %d is inactive.\n", i);
@@ -2730,7 +2747,7 @@
 		int err;
 
 		tgt = lmv->tgts[i];
-		if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
+		if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
 			CERROR("lmv idx %d inactive\n", i);
 			return -EIO;
 		}
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index 40cf4d9..b39e364 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -138,7 +138,7 @@
 	struct obd_device *dev = (struct obd_device *)m->private;
 	struct lmv_obd	  *lmv;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	lmv = &dev->u.lmv;
 	seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid);
 	return 0;
@@ -171,7 +171,7 @@
 {
 	struct lmv_tgt_desc     *tgt = v;
 
-	if (tgt == NULL)
+	if (!tgt)
 		return 0;
 	seq_printf(p, "%d: %s %sACTIVE\n",
 		   tgt->ltd_idx, tgt->ltd_uuid.uuid,
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 66a2492..b5fc159 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -651,7 +651,7 @@
 	struct lov_session *ses;
 
 	ses = lu_context_key_get(env->le_ses, &lov_session_key);
-	LASSERT(ses != NULL);
+	LASSERT(ses);
 	return ses;
 }
 
@@ -759,7 +759,7 @@
 	const struct cl_lock_slice *slice;
 
 	slice = cl_lock_at(lock, &lovsub_device_type);
-	LASSERT(slice != NULL);
+	LASSERT(slice);
 	return cl2lovsub_lock(slice);
 }
 
@@ -817,7 +817,7 @@
 	struct lov_thread_info *info;
 
 	info = lu_context_key_get(&env->le_ctx, &lov_key);
-	LASSERT(info != NULL);
+	LASSERT(info);
 	return info;
 }
 
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 3733fdc..ee093e0 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -143,7 +143,7 @@
 	struct lov_thread_info *info;
 
 	info = kmem_cache_alloc(lov_thread_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info != NULL)
+	if (info)
 		INIT_LIST_HEAD(&info->lti_closure.clc_list);
 	else
 		info = ERR_PTR(-ENOMEM);
@@ -171,7 +171,7 @@
 	struct lov_session *info;
 
 	info = kmem_cache_alloc(lov_session_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info == NULL)
+	if (!info)
 		info = ERR_PTR(-ENOMEM);
 	return info;
 }
@@ -199,15 +199,15 @@
 	int i;
 	struct lov_device *ld = lu2lov_dev(d);
 
-	LASSERT(ld->ld_lov != NULL);
-	if (ld->ld_target == NULL)
+	LASSERT(ld->ld_lov);
+	if (!ld->ld_target)
 		return NULL;
 
 	lov_foreach_target(ld, i) {
 		struct lovsub_device *lsd;
 
 		lsd = ld->ld_target[i];
-		if (lsd != NULL) {
+		if (lsd) {
 			cl_stack_fini(env, lovsub2cl_dev(lsd));
 			ld->ld_target[i] = NULL;
 		}
@@ -222,8 +222,8 @@
 	int i;
 	int rc = 0;
 
-	LASSERT(d->ld_site != NULL);
-	if (ld->ld_target == NULL)
+	LASSERT(d->ld_site);
+	if (!ld->ld_target)
 		return rc;
 
 	lov_foreach_target(ld, i) {
@@ -232,7 +232,7 @@
 		struct lov_tgt_desc  *desc;
 
 		desc = ld->ld_lov->lov_tgts[i];
-		if (desc == NULL)
+		if (!desc)
 			continue;
 
 		cl = cl_type_setup(env, d->ld_site, &lovsub_device_type,
@@ -262,7 +262,7 @@
 	int result;
 
 	lr = kmem_cache_alloc(lov_req_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lr != NULL) {
+	if (lr) {
 		cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
 		result = 0;
 	} else
@@ -282,9 +282,9 @@
 		struct lov_device_emerg *em;
 
 		em = emrg[i];
-		if (em != NULL) {
+		if (em) {
 			LASSERT(em->emrg_page_list.pl_nr == 0);
-			if (em->emrg_env != NULL)
+			if (em->emrg_env)
 				cl_env_put(em->emrg_env, &em->emrg_refcheck);
 			kfree(em);
 		}
@@ -300,7 +300,7 @@
 
 	cl_device_fini(lu2cl_dev(d));
 	kfree(ld->ld_target);
-	if (ld->ld_emrg != NULL)
+	if (ld->ld_emrg)
 		lov_emerg_free(ld->ld_emrg, nr);
 	kfree(ld);
 	return NULL;
@@ -311,7 +311,7 @@
 {
 	struct lov_device *ld = lu2lov_dev(dev);
 
-	if (ld->ld_target[index] != NULL) {
+	if (ld->ld_target[index]) {
 		cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
 		ld->ld_target[index] = NULL;
 	}
@@ -324,17 +324,17 @@
 	int result;
 
 	emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS);
-	if (emerg == NULL)
+	if (!emerg)
 		return ERR_PTR(-ENOMEM);
 	for (result = i = 0; i < nr && result == 0; i++) {
 		struct lov_device_emerg *em;
 
 		em = kzalloc(sizeof(*em), GFP_NOFS);
-		if (em != NULL) {
+		if (em) {
 			emerg[i] = em;
 			cl_page_list_init(&em->emrg_page_list);
 			em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
-						    LCT_REMEMBER|LCT_NOREF);
+						    LCT_REMEMBER | LCT_NOREF);
 			if (!IS_ERR(em->emrg_env))
 				em->emrg_env->le_ctx.lc_cookie = 0x2;
 			else {
@@ -370,7 +370,7 @@
 			return PTR_ERR(emerg);
 
 		newd = kcalloc(tgt_size, sz, GFP_NOFS);
-		if (newd != NULL) {
+		if (newd) {
 			mutex_lock(&dev->ld_mutex);
 			if (sub_size > 0) {
 				memcpy(newd, dev->ld_target, sub_size * sz);
@@ -379,7 +379,7 @@
 			dev->ld_target    = newd;
 			dev->ld_target_nr = tgt_size;
 
-			if (dev->ld_emrg != NULL)
+			if (dev->ld_emrg)
 				lov_emerg_free(dev->ld_emrg, sub_size);
 			dev->ld_emrg = emerg;
 			mutex_unlock(&dev->ld_mutex);
@@ -404,8 +404,6 @@
 	obd_getref(obd);
 
 	tgt = obd->u.lov.lov_tgts[index];
-	LASSERT(tgt != NULL);
-	LASSERT(tgt->ltd_obd != NULL);
 
 	if (!tgt->ltd_obd->obd_set_up) {
 		CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid));
@@ -414,7 +412,7 @@
 
 	rc = lov_expand_targets(env, ld);
 	if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) {
-		LASSERT(dev->ld_site != NULL);
+		LASSERT(dev->ld_site);
 
 		cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type,
 				   tgt->ltd_obd->obd_lu_dev);
@@ -492,7 +490,7 @@
 
 	/* setup the LOV OBD */
 	obd = class_name2obd(lustre_cfg_string(cfg, 0));
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	rc = lov_setup(obd, cfg);
 	if (rc) {
 		lov_device_free(env, d);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index b3c9c85..c27b884 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -101,7 +101,7 @@
 
 	for (i = 0; i < stripe_count; i++) {
 		loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO);
-		if (loi == NULL)
+		if (!loi)
 			goto err;
 		lsm->lsm_oinfo[i] = loi;
 	}
@@ -162,12 +162,13 @@
 }
 
 /* Find minimum stripe maxbytes value.  For inactive or
- * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */
+ * reconnecting targets use LUSTRE_STRIPE_MAXBYTES.
+ */
 static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
 {
 	struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
 
-	if (imp == NULL || !tgt->ltd_active) {
+	if (!imp || !tgt->ltd_active) {
 		*stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
 		return;
 	}
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 2d00bad..725ebf9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -43,7 +43,8 @@
 /* lov_do_div64(a, b) returns a % b, and a = a / b.
  * The 32-bit code is LOV-specific due to knowing about stripe limits in
  * order to reduce the divisor to a 32-bit number.  If the divisor is
- * already a 32-bit value the compiler handles this directly. */
+ * already a 32-bit value the compiler handles this directly.
+ */
 #if BITS_PER_LONG == 64
 # define lov_do_div64(n, base) ({					\
 	uint64_t __base = (base);					\
@@ -92,7 +93,8 @@
 	atomic_t			set_refcount;
 	struct obd_export		*set_exp;
 	/* XXX: There is @set_exp already, however obd_statfs gets obd_device
-	   only. */
+	 * only.
+	 */
 	struct obd_device		*set_obd;
 	int				set_count;
 	atomic_t			set_completes;
@@ -114,7 +116,6 @@
 
 static inline void lov_get_reqset(struct lov_request_set *set)
 {
-	LASSERT(set != NULL);
 	LASSERT(atomic_read(&set->set_refcount) > 0);
 	atomic_inc(&set->set_refcount);
 }
@@ -197,7 +198,7 @@
 int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
 		 struct lov_mds_md *lmm, int lmm_bytes);
 int lov_getstripe(struct obd_export *exp,
-		  struct lov_stripe_md *lsm, struct lov_user_md *lump);
+		  struct lov_stripe_md *lsm, struct lov_user_md __user *lump);
 int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
 		    int pattern, int magic);
 int lov_free_memmd(struct lov_stripe_md **lsmp);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 93fe69e..50954ce 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -60,7 +60,7 @@
 static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
 			    struct lov_io_sub *sub)
 {
-	if (sub->sub_io != NULL) {
+	if (sub->sub_io) {
 		if (sub->sub_io_initialized) {
 			lov_sub_enter(sub);
 			cl_io_fini(sub->sub_env, sub->sub_io);
@@ -74,7 +74,7 @@
 			kfree(sub->sub_io);
 		sub->sub_io = NULL;
 	}
-	if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
+	if (!IS_ERR_OR_NULL(sub->sub_env)) {
 		if (!sub->sub_borrowed)
 			cl_env_put(sub->sub_env, &sub->sub_refcheck);
 		sub->sub_env = NULL;
@@ -143,11 +143,11 @@
 	int stripe = sub->sub_stripe;
 	int result;
 
-	LASSERT(sub->sub_io == NULL);
-	LASSERT(sub->sub_env == NULL);
+	LASSERT(!sub->sub_io);
+	LASSERT(!sub->sub_env);
 	LASSERT(sub->sub_stripe < lio->lis_stripe_count);
 
-	if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL))
+	if (unlikely(!lov_r0(lov)->lo_sub[stripe]))
 		return -EIO;
 
 	result = 0;
@@ -252,7 +252,6 @@
 	subobj = lu2lovsub(
 		lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
 				 &lovsub_device_type));
-	LASSERT(subobj != NULL);
 	return subobj->lso_index;
 }
 
@@ -263,9 +262,9 @@
 	struct cl_page       *page = slice->cpl_page;
 	int stripe;
 
-	LASSERT(lio->lis_cl.cis_io != NULL);
+	LASSERT(lio->lis_cl.cis_io);
 	LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
-	LASSERT(lsm != NULL);
+	LASSERT(lsm);
 	LASSERT(lio->lis_nr_subios > 0);
 
 	stripe = lov_page_stripe(page);
@@ -278,7 +277,7 @@
 	struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
 	int result;
 
-	LASSERT(lio->lis_object != NULL);
+	LASSERT(lio->lis_object);
 
 	/*
 	 * Need to be optimized, we can't afford to allocate a piece of memory
@@ -288,7 +287,7 @@
 		libcfs_kvzalloc(lsm->lsm_stripe_count *
 				sizeof(lio->lis_subs[0]),
 				GFP_NOFS);
-	if (lio->lis_subs != NULL) {
+	if (lio->lis_subs) {
 		lio->lis_nr_subios = lio->lis_stripe_count;
 		lio->lis_single_subio_index = -1;
 		lio->lis_active_subios = 0;
@@ -304,7 +303,6 @@
 	io->ci_result = 0;
 	lio->lis_object = obj;
 
-	LASSERT(obj->lo_lsm != NULL);
 	lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count;
 
 	switch (io->ci_type) {
@@ -358,7 +356,7 @@
 	struct lov_object *lov = cl2lov(ios->cis_obj);
 	int i;
 
-	if (lio->lis_subs != NULL) {
+	if (lio->lis_subs) {
 		for (i = 0; i < lio->lis_nr_subios; i++)
 			lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
 		kvfree(lio->lis_subs);
@@ -395,7 +393,7 @@
 					   endpos, &start, &end))
 			continue;
 
-		if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) {
+		if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) {
 			if (ios->cis_io->ci_type == CIT_READ ||
 			    ios->cis_io->ci_type == CIT_WRITE ||
 			    ios->cis_io->ci_type == CIT_FAULT)
@@ -601,13 +599,13 @@
 		return rc;
 	}
 
-	LASSERT(lio->lis_subs != NULL);
+	LASSERT(lio->lis_subs);
 	if (alloc) {
 		stripes_qin =
 			libcfs_kvzalloc(sizeof(*stripes_qin) *
 					lio->lis_nr_subios,
 					GFP_NOFS);
-		if (stripes_qin == NULL)
+		if (!stripes_qin)
 			return -ENOMEM;
 
 		for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
@@ -955,7 +953,7 @@
 	struct lov_io *lio = lov_env_io(env);
 	int result;
 
-	LASSERT(lov->lo_lsm != NULL);
+	LASSERT(lov->lo_lsm);
 	lio->lis_object = lov;
 
 	switch (io->ci_type) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index d866791..e0a6438 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -115,7 +115,7 @@
 	/*
 	 * check that sub-lock doesn't have lock link to this top-lock.
 	 */
-	LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
+	LASSERT(!lov_lock_link_find(env, lck, lsl));
 	LASSERT(idx < lck->lls_nr);
 
 	lck->lls_sub[idx].sub_lock = lsl;
@@ -145,7 +145,7 @@
 	LASSERT(idx < lck->lls_nr);
 
 	link = kmem_cache_alloc(lov_lock_link_kmem, GFP_NOFS | __GFP_ZERO);
-	if (link != NULL) {
+	if (link) {
 		struct lov_sublock_env *subenv;
 		struct lov_lock_sub  *lls;
 		struct cl_lock_descr *descr;
@@ -160,7 +160,8 @@
 			 * to remember the subio. This is because lock is able
 			 * to be cached, but this is not true for IO. This
 			 * further means a sublock might be referenced in
-			 * different io context. -jay */
+			 * different io context. -jay
+			 */
 
 			sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
 					       descr, "lov-parent", parent);
@@ -220,7 +221,7 @@
 			LASSERT(!(lls->sub_flags & LSF_HELD));
 
 			link = lov_lock_link_find(env, lck, sublock);
-			LASSERT(link != NULL);
+			LASSERT(link);
 			lov_lock_unlink(env, link, sublock);
 			lov_sublock_unlock(env, sublock, closure, NULL);
 			lck->lls_cancel_race = 1;
@@ -309,14 +310,14 @@
 		 * XXX for wide striping smarter algorithm is desirable,
 		 * breaking out of the loop, early.
 		 */
-		if (likely(r0->lo_sub[i] != NULL) &&
+		if (likely(r0->lo_sub[i]) &&
 		    lov_stripe_intersects(loo->lo_lsm, i,
 					  file_start, file_end, &start, &end))
 			nr++;
 	}
 	LASSERT(nr > 0);
 	lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS);
-	if (lck->lls_sub == NULL)
+	if (!lck->lls_sub)
 		return -ENOMEM;
 
 	lck->lls_nr = nr;
@@ -328,14 +329,14 @@
 	 * top-lock.
 	 */
 	for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
-		if (likely(r0->lo_sub[i] != NULL) &&
+		if (likely(r0->lo_sub[i]) &&
 		    lov_stripe_intersects(loo->lo_lsm, i,
 					  file_start, file_end, &start, &end)) {
 			struct cl_lock_descr *descr;
 
 			descr = &lck->lls_sub[nr].sub_descr;
 
-			LASSERT(descr->cld_obj == NULL);
+			LASSERT(!descr->cld_obj);
 			descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
 			descr->cld_start = cl_index(descr->cld_obj, start);
 			descr->cld_end   = cl_index(descr->cld_obj, end);
@@ -369,7 +370,6 @@
 		struct cl_lock    *sublock;
 		int dying;
 
-		LASSERT(lck->lls_sub[i].sub_lock != NULL);
 		sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
 		LASSERT(cl_lock_is_mutexed(sublock));
 
@@ -413,7 +413,6 @@
 	if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
 		struct cl_lock *sublock;
 
-		LASSERT(lck->lls_sub[i].sub_lock != NULL);
 		sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
 		LASSERT(cl_lock_is_mutexed(sublock));
 		LASSERT(sublock->cll_state != CLS_FREEING);
@@ -435,13 +434,13 @@
 
 	lck = cl2lov_lock(slice);
 	LASSERT(lck->lls_nr_filled == 0);
-	if (lck->lls_sub != NULL) {
+	if (lck->lls_sub) {
 		for (i = 0; i < lck->lls_nr; ++i)
 			/*
 			 * No sub-locks exists at this point, as sub-lock has
 			 * a reference on its parent.
 			 */
-			LASSERT(lck->lls_sub[i].sub_lock == NULL);
+			LASSERT(!lck->lls_sub[i].sub_lock);
 		kvfree(lck->lls_sub);
 	}
 	kmem_cache_free(lov_lock_kmem, lck);
@@ -479,7 +478,8 @@
 	result = cl_enqueue_try(env, sublock, io, enqflags);
 	if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
 		/* if it is enqueued, try to `wait' on it---maybe it's already
-		 * granted */
+		 * granted
+		 */
 		result = cl_wait_try(env, sublock);
 		if (result == CLO_REENQUEUED)
 			result = CLO_WAIT;
@@ -515,12 +515,13 @@
 	if (!IS_ERR(sublock)) {
 		cl_lock_get_trust(sublock);
 		if (parent->cll_state == CLS_QUEUING &&
-		    lck->lls_sub[idx].sub_lock == NULL) {
+		    !lck->lls_sub[idx].sub_lock) {
 			lov_sublock_adopt(env, lck, sublock, idx, link);
 		} else {
 			kmem_cache_free(lov_lock_link_kmem, link);
 			/* other thread allocated sub-lock, or enqueue is no
-			 * longer going on */
+			 * longer going on
+			 */
 			cl_lock_mutex_put(env, parent);
 			cl_lock_unhold(env, sublock, "lov-parent", parent);
 			cl_lock_mutex_get(env, parent);
@@ -574,10 +575,11 @@
 		 * Sub-lock might have been canceled, while top-lock was
 		 * cached.
 		 */
-		if (sub == NULL) {
+		if (!sub) {
 			result = lov_sublock_fill(env, lock, io, lck, i);
 			/* lov_sublock_fill() released @lock mutex,
-			 * restart. */
+			 * restart.
+			 */
 			break;
 		}
 		sublock = sub->lss_cl.cls_lock;
@@ -605,7 +607,8 @@
 					/* take recursive mutex of sublock */
 					cl_lock_mutex_get(env, sublock);
 					/* need to release all locks in closure
-					 * otherwise it may deadlock. LU-2683.*/
+					 * otherwise it may deadlock. LU-2683.
+					 */
 					lov_sublock_unlock(env, sub, closure,
 							   subenv);
 					/* sublock and parent are held. */
@@ -620,7 +623,7 @@
 					break;
 				}
 			} else {
-				LASSERT(sublock->cll_conflict == NULL);
+				LASSERT(!sublock->cll_conflict);
 				lov_sublock_unlock(env, sub, closure, subenv);
 			}
 		}
@@ -649,11 +652,12 @@
 
 		/* top-lock state cannot change concurrently, because single
 		 * thread (one that released the last hold) carries unlocking
-		 * to the completion. */
+		 * to the completion.
+		 */
 		LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
 		lls = &lck->lls_sub[i];
 		sub = lls->sub_lock;
-		if (sub == NULL)
+		if (!sub)
 			continue;
 
 		sublock = sub->lss_cl.cls_lock;
@@ -695,10 +699,11 @@
 
 		/* top-lock state cannot change concurrently, because single
 		 * thread (one that released the last hold) carries unlocking
-		 * to the completion. */
+		 * to the completion.
+		 */
 		lls = &lck->lls_sub[i];
 		sub = lls->sub_lock;
-		if (sub == NULL)
+		if (!sub)
 			continue;
 
 		sublock = sub->lss_cl.cls_lock;
@@ -757,7 +762,6 @@
 
 		lls = &lck->lls_sub[i];
 		sub = lls->sub_lock;
-		LASSERT(sub != NULL);
 		sublock = sub->lss_cl.cls_lock;
 		rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
 		if (rc == 0) {
@@ -776,8 +780,9 @@
 		if (result != 0)
 			break;
 	}
-	/* Each sublock only can be reenqueued once, so will not loop for
-	 * ever. */
+	/* Each sublock only can be reenqueued once, so will not loop
+	 * forever.
+	 */
 	if (result == 0 && reenqueued != 0)
 		goto again;
 	cl_lock_closure_fini(closure);
@@ -805,7 +810,7 @@
 
 		lls = &lck->lls_sub[i];
 		sub = lls->sub_lock;
-		if (sub == NULL) {
+		if (!sub) {
 			/*
 			 * Sub-lock might have been canceled, while top-lock was
 			 * cached.
@@ -826,7 +831,8 @@
 								 i, 1, rc);
 			} else if (sublock->cll_state == CLS_NEW) {
 				/* Sub-lock might have been canceled, while
-				 * top-lock was cached. */
+				 * top-lock was cached.
+				 */
 				result = -ESTALE;
 				lov_sublock_release(env, lck, i, 1, result);
 			}
@@ -852,45 +858,6 @@
 	return result;
 }
 
-#if 0
-static int lock_lock_multi_match()
-{
-	struct cl_lock	  *lock    = slice->cls_lock;
-	struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
-	struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
-	struct lov_layout_raid0 *r0      = lov_r0(loo);
-	struct lov_lock_sub     *sub;
-	struct cl_object	*subobj;
-	u64  fstart;
-	u64  fend;
-	u64  start;
-	u64  end;
-	int i;
-
-	fstart = cl_offset(need->cld_obj, need->cld_start);
-	fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
-	subneed->cld_mode = need->cld_mode;
-	cl_lock_mutex_get(env, lock);
-	for (i = 0; i < lov->lls_nr; ++i) {
-		sub = &lov->lls_sub[i];
-		if (sub->sub_lock == NULL)
-			continue;
-		subobj = sub->sub_descr.cld_obj;
-		if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
-					   fstart, fend, &start, &end))
-			continue;
-		subneed->cld_start = cl_index(subobj, start);
-		subneed->cld_end   = cl_index(subobj, end);
-		subneed->cld_obj   = subobj;
-		if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
-			result = 0;
-			break;
-		}
-	}
-	cl_lock_mutex_put(env, lock);
-}
-#endif
-
 /**
  * Check if the extent region \a descr is covered by \a child against the
  * specific \a stripe.
@@ -922,10 +889,10 @@
 
 		idx = lov_stripe_number(lsm, start);
 		if (idx == stripe ||
-		    unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) {
+		    unlikely(!lov_r0(lov)->lo_sub[idx])) {
 			idx = lov_stripe_number(lsm, end);
 			if (idx == stripe ||
-			    unlikely(lov_r0(lov)->lo_sub[idx] == NULL))
+			    unlikely(!lov_r0(lov)->lo_sub[idx]))
 				result = 1;
 		}
 	}
@@ -970,7 +937,8 @@
 	LASSERT(lov->lls_nr > 0);
 
 	/* for top lock, it's necessary to match enq flags otherwise it will
-	 * run into problem if a sublock is missing and reenqueue. */
+	 * run into problem if a sublock is missing and reenqueue.
+	 */
 	if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
 		return 0;
 
@@ -1074,7 +1042,7 @@
 		struct lov_lock_sub *lls = &lck->lls_sub[i];
 		struct lovsub_lock  *lsl = lls->sub_lock;
 
-		if (lsl == NULL) /* already removed */
+		if (!lsl) /* already removed */
 			continue;
 
 		rc = lov_sublock_lock(env, lck, lls, closure, NULL);
@@ -1090,9 +1058,9 @@
 			lov_sublock_release(env, lck, i, 1, 0);
 
 		link = lov_lock_link_find(env, lck, lsl);
-		LASSERT(link != NULL);
+		LASSERT(link);
 		lov_lock_unlink(env, link, lsl);
-		LASSERT(lck->lls_sub[i].sub_lock == NULL);
+		LASSERT(!lck->lls_sub[i].sub_lock);
 
 		lov_sublock_unlock(env, lsl, closure, NULL);
 	}
@@ -1112,7 +1080,7 @@
 
 		sub = &lck->lls_sub[i];
 		(*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
-		if (sub->sub_lock != NULL)
+		if (sub->sub_lock)
 			cl_lock_print(env, cookie, p,
 				      sub->sub_lock->lss_cl.cls_lock);
 		else
@@ -1140,7 +1108,7 @@
 	int result;
 
 	lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lck != NULL) {
+	if (lck) {
 		cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
 		result = lov_lock_sub_init(env, lck, io);
 	} else
@@ -1176,7 +1144,7 @@
 	int result = -ENOMEM;
 
 	lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lck != NULL) {
+	if (lck) {
 		cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
 		lck->lls_orig = lock->cll_descr;
 		result = 0;
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 97115be..029cd4d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -129,7 +129,8 @@
 			       "stripe %d KMS %sing %llu->%llu\n",
 			       stripe, kms > loi->loi_kms ? "increase":"shrink",
 			       loi->loi_kms, kms);
-			loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
+			loi->loi_lvb.lvb_size = kms;
+			loi_kms_set(loi, loi->loi_lvb.lvb_size);
 		}
 		return 0;
 	}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 6c2bdfe..7ba670a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -61,7 +61,8 @@
 #include "lov_internal.h"
 
 /* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
-   Any function that expects lov_tgts to remain stationary must take a ref. */
+ * Any function that expects lov_tgts to remain stationary must take a ref.
+ */
 static void lov_getref(struct obd_device *obd)
 {
 	struct lov_obd *lov = &obd->u.lov;
@@ -96,7 +97,8 @@
 			list_add(&tgt->ltd_kill, &kill);
 			/* XXX - right now there is a dependency on ld_tgt_count
 			 * being the maximum tgt index for computing the
-			 * mds_max_easize. So we can't shrink it. */
+			 * mds_max_easize. So we can't shrink it.
+			 */
 			lov_ost_pool_remove(&lov->lov_packed, i);
 			lov->lov_tgts[i] = NULL;
 			lov->lov_death_row--;
@@ -158,7 +160,8 @@
 	if (activate) {
 		tgt_obd->obd_no_recov = 0;
 		/* FIXME this is probably supposed to be
-		   ptlrpc_set_import_active.  Horrible naming. */
+		 * ptlrpc_set_import_active.  Horrible naming.
+		 */
 		ptlrpc_activate_import(imp);
 	}
 
@@ -315,7 +318,8 @@
 	}
 
 	/* Let's hold another reference so lov_del_obd doesn't spin through
-	   putref every time */
+	 * putref every time
+	 */
 	obd_getref(obd);
 
 	for (i = 0; i < lov->desc.ld_tgt_count; i++) {
@@ -358,7 +362,7 @@
 		 * LU-642, initially inactive OSC could miss the obd_connect,
 		 * we make up for it here.
 		 */
-		if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL &&
+		if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp &&
 		    obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
 			struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"};
 
@@ -399,10 +403,9 @@
 			CDEBUG(D_INFO, "OSC %s already %sactive!\n",
 			       uuid->uuid, active ? "" : "in");
 			goto out;
-		} else {
-			CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
-			       obd_uuid2str(uuid), active ? "" : "in");
 		}
+		CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
+		       obd_uuid2str(uuid), active ? "" : "in");
 
 		lov->lov_tgts[index]->ltd_active = active;
 		if (active) {
@@ -481,7 +484,8 @@
 				continue;
 
 			/* don't send sync event if target not
-			 * connected/activated */
+			 * connected/activated
+			 */
 			if (is_sync &&  !lov->lov_tgts[i]->ltd_active)
 				continue;
 
@@ -521,12 +525,12 @@
 
 	tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME,
 					&obd->obd_uuid);
-	if (tgt_obd == NULL)
+	if (!tgt_obd)
 		return -EINVAL;
 
 	mutex_lock(&lov->lov_lock);
 
-	if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
+	if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) {
 		tgt = lov->lov_tgts[index];
 		CERROR("UUID %s already assigned at LOV target index %d\n",
 		       obd_uuid2str(&tgt->ltd_uuid), index);
@@ -543,7 +547,7 @@
 		while (newsize < index + 1)
 			newsize <<= 1;
 		newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
-		if (newtgts == NULL) {
+		if (!newtgts) {
 			mutex_unlock(&lov->lov_lock);
 			return -ENOMEM;
 		}
@@ -596,8 +600,9 @@
 
 	if (lov->lov_connects == 0) {
 		/* lov_connect hasn't been called yet. We'll do the
-		   lov_connect_obd on this target when that fn first runs,
-		   because we don't know the connect flags yet. */
+		 * lov_connect_obd on this target when that fn first runs,
+		 * because we don't know the connect flags yet.
+		 */
 		return 0;
 	}
 
@@ -613,7 +618,7 @@
 		goto out;
 	}
 
-	if (lov->lov_cache != NULL) {
+	if (lov->lov_cache) {
 		rc = obd_set_info_async(NULL, tgt->ltd_exp,
 				sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
 				sizeof(struct cl_client_cache), lov->lov_cache,
@@ -702,8 +707,9 @@
 	kfree(tgt);
 
 	/* Manual cleanup - no cleanup logs to clean up the osc's.  We must
-	   do it ourselves. And we can't do it from lov_cleanup,
-	   because we just lost our only reference to it. */
+	 * do it ourselves. And we can't do it from lov_cleanup,
+	 * because we just lost our only reference to it.
+	 */
 	if (osc_obd)
 		class_manual_cleanup(osc_obd);
 }
@@ -773,9 +779,9 @@
 
 	if (desc->ld_magic != LOV_DESC_MAGIC) {
 		if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) {
-			    CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
-				   obd->obd_name, desc);
-			    lustre_swab_lov_desc(desc);
+			CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
+			       obd->obd_name, desc);
+			lustre_swab_lov_desc(desc);
 		} else {
 			CERROR("%s: Bad lov desc magic: %#x\n",
 			       obd->obd_name, desc->ld_magic);
@@ -859,7 +865,8 @@
 		/* free pool structs */
 		CDEBUG(D_INFO, "delete pool %p\n", pool);
 		/* In the function below, .hs_keycmp resolves to
-		 * pool_hashkey_keycmp() */
+		 * pool_hashkey_keycmp()
+		 */
 		/* coverity[overrun-buffer-val] */
 		lov_pool_del(obd, pool->pool_name);
 	}
@@ -879,8 +886,9 @@
 			if (lov->lov_tgts[i]->ltd_active ||
 			    atomic_read(&lov->lov_refcount))
 			    /* We should never get here - these
-			       should have been removed in the
-			     disconnect. */
+			     * should have been removed in the
+			     * disconnect.
+			     */
 				CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
 				       i, lov->lov_death_row,
 				       atomic_read(&lov->lov_refcount));
@@ -981,7 +989,7 @@
 
 	ost_idx = src_oa->o_nlink;
 	lsm = *ea;
-	if (lsm == NULL) {
+	if (!lsm) {
 		rc = -EINVAL;
 		goto out;
 	}
@@ -1025,8 +1033,8 @@
 	struct lov_obd *lov;
 	int rc = 0;
 
-	LASSERT(ea != NULL);
-	if (exp == NULL)
+	LASSERT(ea);
+	if (!exp)
 		return -EINVAL;
 
 	if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
@@ -1043,7 +1051,7 @@
 	/* Recreate a specific object id at the given OST index */
 	if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
 	    (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) {
-		 rc = lov_recreate(exp, src_oa, ea, oti);
+		rc = lov_recreate(exp, src_oa, ea, oti);
 	}
 
 	obd_putref(exp->exp_obd);
@@ -1052,7 +1060,7 @@
 
 #define ASSERT_LSM_MAGIC(lsmp)						  \
 do {									    \
-	LASSERT((lsmp) != NULL);						\
+	LASSERT((lsmp));						\
 	LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 ||			  \
 		 (lsmp)->lsm_magic == LOV_MAGIC_V3),			    \
 		 "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic);	      \
@@ -1065,7 +1073,6 @@
 	struct lov_request_set *set;
 	struct obd_info oinfo;
 	struct lov_request *req;
-	struct list_head *pos;
 	struct lov_obd *lov;
 	int rc = 0, err = 0;
 
@@ -1085,9 +1092,7 @@
 	if (rc)
 		goto out;
 
-	list_for_each(pos, &set->set_list) {
-		req = list_entry(pos, struct lov_request, rq_link);
-
+	list_for_each_entry(req, &set->set_list, rq_link) {
 		if (oa->o_valid & OBD_MD_FLCOOKIE)
 			oti->oti_logcookies = set->set_cookies + req->rq_stripe;
 
@@ -1105,10 +1110,9 @@
 		}
 	}
 
-	if (rc == 0) {
-		LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+	if (rc == 0)
 		rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp);
-	}
+
 	err = lov_fini_destroy_set(set);
 out:
 	obd_putref(exp->exp_obd);
@@ -1133,7 +1137,6 @@
 {
 	struct lov_request_set *lovset;
 	struct lov_obd *lov;
-	struct list_head *pos;
 	struct lov_request *req;
 	int rc = 0, err;
 
@@ -1153,9 +1156,7 @@
 	       POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
 	       oinfo->oi_md->lsm_stripe_size);
 
-	list_for_each(pos, &lovset->set_list) {
-		req = list_entry(pos, struct lov_request, rq_link);
-
+	list_for_each_entry(req, &lovset->set_list, rq_link) {
 		CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
 		       POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
 		       POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
@@ -1174,7 +1175,7 @@
 
 	if (!list_empty(&rqset->set_requests)) {
 		LASSERT(rc == 0);
-		LASSERT(rqset->set_interpret == NULL);
+		LASSERT(!rqset->set_interpret);
 		rqset->set_interpret = lov_getattr_interpret;
 		rqset->set_arg = (void *)lovset;
 		return rc;
@@ -1199,14 +1200,14 @@
 }
 
 /* If @oti is given, the request goes from MDS and responses from OSTs are not
-   needed. Otherwise, a client is waiting for responses. */
+ * needed. Otherwise, a client is waiting for responses.
+ */
 static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
 			     struct obd_trans_info *oti,
 			     struct ptlrpc_request_set *rqset)
 {
 	struct lov_request_set *set;
 	struct lov_request *req;
-	struct list_head *pos;
 	struct lov_obd *lov;
 	int rc = 0;
 
@@ -1230,9 +1231,7 @@
 	       oinfo->oi_md->lsm_stripe_count,
 	       oinfo->oi_md->lsm_stripe_size);
 
-	list_for_each(pos, &set->set_list) {
-		req = list_entry(pos, struct lov_request, rq_link);
-
+	list_for_each_entry(req, &set->set_list, rq_link) {
 		if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
 			oti->oti_logcookies = set->set_cookies + req->rq_stripe;
 
@@ -1262,7 +1261,7 @@
 		return rc ? rc : err;
 	}
 
-	LASSERT(rqset->set_interpret == NULL);
+	LASSERT(!rqset->set_interpret);
 	rqset->set_interpret = lov_setattr_interpret;
 	rqset->set_arg = (void *)set;
 
@@ -1272,7 +1271,8 @@
 /* find any ldlm lock of the inode in lov
  * return 0    not find
  *	1    find one
- *      < 0    error */
+ *      < 0    error
+ */
 static int lov_find_cbdata(struct obd_export *exp,
 			   struct lov_stripe_md *lsm, ldlm_iterator_t it,
 			   void *data)
@@ -1326,20 +1326,17 @@
 	struct obd_device      *obd = class_exp2obd(exp);
 	struct lov_request_set *set;
 	struct lov_request *req;
-	struct list_head *pos;
 	struct lov_obd *lov;
 	int rc = 0;
 
-	LASSERT(oinfo != NULL);
-	LASSERT(oinfo->oi_osfs != NULL);
+	LASSERT(oinfo->oi_osfs);
 
 	lov = &obd->u.lov;
 	rc = lov_prep_statfs_set(obd, oinfo, &set);
 	if (rc)
 		return rc;
 
-	list_for_each(pos, &set->set_list) {
-		req = list_entry(pos, struct lov_request, rq_link);
+	list_for_each_entry(req, &set->set_list, rq_link) {
 		rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
 				      &req->rq_oi, max_age, rqset);
 		if (rc)
@@ -1355,7 +1352,7 @@
 		return rc ? rc : err;
 	}
 
-	LASSERT(rqset->set_interpret == NULL);
+	LASSERT(!rqset->set_interpret);
 	rqset->set_interpret = lov_statfs_interpret;
 	rqset->set_arg = (void *)set;
 	return 0;
@@ -1369,9 +1366,10 @@
 	int rc = 0;
 
 	/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
-	 * statfs requests */
+	 * statfs requests
+	 */
 	set = ptlrpc_prep_set();
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 
 	oinfo.oi_osfs = osfs;
@@ -1385,7 +1383,7 @@
 }
 
 static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
-			 void *karg, void *uarg)
+			 void *karg, void __user *uarg)
 {
 	struct obd_device *obddev = class_exp2obd(exp);
 	struct lov_obd *lov = &obddev->u.lov;
@@ -1420,7 +1418,9 @@
 					 (int) sizeof(struct obd_uuid))))
 			return -EFAULT;
 
-		flags = uarg ? *(__u32 *)uarg : 0;
+		memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
+		flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0;
+
 		/* got statfs data */
 		rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
 				cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
@@ -1501,7 +1501,7 @@
 						     &qctl->obd_uuid))
 					continue;
 
-				if (tgt->ltd_exp == NULL)
+				if (!tgt->ltd_exp)
 					return -EINVAL;
 
 				break;
@@ -1543,14 +1543,15 @@
 				continue;
 
 			/* ll_umount_begin() sets force flag but for lov, not
-			 * osc. Let's pass it through */
+			 * osc. Let's pass it through
+			 */
 			osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
 			osc_obd->obd_force = obddev->obd_force;
 			err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
 					    len, karg, uarg);
-			if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
+			if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK)
 				return err;
-			} else if (err) {
+			if (err) {
 				if (lov->lov_tgts[i]->ltd_active) {
 					CDEBUG(err == -ENOTTY ?
 					       D_IOCTL : D_WARNING,
@@ -1620,7 +1621,8 @@
 		return -EINVAL;
 
 	/* If we have finished mapping on previous device, shift logical
-	 * offset to start of next device */
+	 * offset to start of next device
+	 */
 	if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
 				   &lun_start, &lun_end)) != 0 &&
 				   local_end < lun_end) {
@@ -1628,7 +1630,8 @@
 		*start_stripe = stripe_no;
 	} else {
 		/* This is a special value to indicate that caller should
-		 * calculate offset in next stripe. */
+		 * calculate offset in next stripe.
+		 */
 		fm_end_offset = 0;
 		*start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
 	}
@@ -1739,7 +1742,7 @@
 		buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
 
 	fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
-	if (fm_local == NULL) {
+	if (!fm_local) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1796,7 +1799,8 @@
 
 		/* If this is a continuation FIEMAP call and we are on
 		 * starting stripe then lun_start needs to be set to
-		 * fm_end_offset */
+		 * fm_end_offset
+		 */
 		if (fm_end_offset != 0 && cur_stripe == start_stripe)
 			lun_start = fm_end_offset;
 
@@ -1818,7 +1822,8 @@
 		len_mapped_single_call = 0;
 
 		/* If the output buffer is very large and the objects have many
-		 * extents we may need to loop on a single OST repeatedly */
+		 * extents we may need to loop on a single OST repeatedly
+		 */
 		ost_eof = 0;
 		ost_done = 0;
 		do {
@@ -1874,7 +1879,8 @@
 			if (ext_count == 0) {
 				ost_done = 1;
 				/* If last stripe has hole at the end,
-				 * then we need to return */
+				 * then we need to return
+				 */
 				if (cur_stripe_wrap == last_stripe) {
 					fiemap->fm_mapped_extents = 0;
 					goto finish;
@@ -1896,7 +1902,8 @@
 				ost_done = 1;
 
 			/* Clear the EXTENT_LAST flag which can be present on
-			 * last extent */
+			 * last extent
+			 */
 			if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
 				lcl_fm_ext[ext_count - 1].fe_flags &=
 							    ~FIEMAP_EXTENT_LAST;
@@ -1925,7 +1932,8 @@
 
 finish:
 	/* Indicate that we are returning device offsets unless file just has
-	 * single stripe */
+	 * single stripe
+	 */
 	if (lsm->lsm_stripe_count > 1)
 		fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
 
@@ -1933,7 +1941,8 @@
 		goto skip_last_device_calc;
 
 	/* Check if we have reached the last stripe and whether mapping for that
-	 * stripe is done. */
+	 * stripe is done.
+	 */
 	if (cur_stripe_wrap == last_stripe) {
 		if (ost_done || ost_eof)
 			fiemap->fm_extents[current_extent - 1].fe_flags |=
@@ -1978,10 +1987,12 @@
 
 		/* XXX This is another one of those bits that will need to
 		 * change if we ever actually support nested LOVs.  It uses
-		 * the lock's export to find out which stripe it is. */
+		 * the lock's export to find out which stripe it is.
+		 */
 		/* XXX - it's assumed all the locks for deleted OSTs have
 		 * been cancelled. Also, the export for deleted OSTs will
-		 * be NULL and won't match the lock's export. */
+		 * be NULL and won't match the lock's export.
+		 */
 		for (i = 0; i < lsm->lsm_stripe_count; i++) {
 			loi = lsm->lsm_oinfo[i];
 			if (lov_oinfo_is_dummy(loi))
@@ -2070,7 +2081,7 @@
 	unsigned next_id = 0,  mds_con = 0;
 
 	incr = check_uuid = do_inactive = no_set = 0;
-	if (set == NULL) {
+	if (!set) {
 		no_set = 1;
 		set = ptlrpc_prep_set();
 		if (!set)
@@ -2093,7 +2104,7 @@
 	} else if (KEY_IS(KEY_MDS_CONN)) {
 		mds_con = 1;
 	} else if (KEY_IS(KEY_CACHE_SET)) {
-		LASSERT(lov->lov_cache == NULL);
+		LASSERT(!lov->lov_cache);
 		lov->lov_cache = val;
 		do_inactive = 1;
 	}
@@ -2317,7 +2328,8 @@
 
 	/* print an address of _any_ initialized kernel symbol from this
 	 * module, to allow debugging with gdb that doesn't support data
-	 * symbols from modules.*/
+	 * symbols from modules.
+	 */
 	CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);
 
 	rc = lu_kmem_init(lov_caches);
@@ -2327,7 +2339,7 @@
 	lov_oinfo_slab = kmem_cache_create("lov_oinfo",
 					      sizeof(struct lov_oinfo),
 					      0, SLAB_HWCACHE_ALIGN, NULL);
-	if (lov_oinfo_slab == NULL) {
+	if (!lov_oinfo_slab) {
 		lu_kmem_fini(lov_caches);
 		return -ENOMEM;
 	}
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 3b79ebc..4101696 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -135,7 +135,8 @@
 		 * Do not leave the object in cache to avoid accessing
 		 * freed memory. This is because osc_object is referring to
 		 * lov_oinfo of lsm_stripe_data which will be freed due to
-		 * this failure. */
+		 * this failure.
+		 */
 		cl_object_kill(env, stripe);
 		cl_object_put(env, stripe);
 		return -EIO;
@@ -154,7 +155,7 @@
 	/* reuse ->coh_attr_guard to protect coh_parent change */
 	spin_lock(&subhdr->coh_attr_guard);
 	parent = subhdr->coh_parent;
-	if (parent == NULL) {
+	if (!parent) {
 		subhdr->coh_parent = hdr;
 		spin_unlock(&subhdr->coh_attr_guard);
 		subhdr->coh_nesting = hdr->coh_nesting + 1;
@@ -170,11 +171,12 @@
 
 		spin_unlock(&subhdr->coh_attr_guard);
 		old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
-		LASSERT(old_obj != NULL);
+		LASSERT(old_obj);
 		old_lov = cl2lov(lu2cl(old_obj));
 		if (old_lov->lo_layout_invalid) {
 			/* the object's layout has already changed but isn't
-			 * refreshed */
+			 * refreshed
+			 */
 			lu_object_unhash(env, &stripe->co_lu);
 			result = -EAGAIN;
 		} else {
@@ -212,14 +214,14 @@
 			 LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
 	}
 
-	LASSERT(lov->lo_lsm == NULL);
+	LASSERT(!lov->lo_lsm);
 	lov->lo_lsm = lsm_addref(lsm);
 	r0->lo_nr  = lsm->lsm_stripe_count;
 	LASSERT(r0->lo_nr <= lov_targets_nr(dev));
 
 	r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
 				     GFP_NOFS);
-	if (r0->lo_sub != NULL) {
+	if (r0->lo_sub) {
 		result = 0;
 		subconf->coc_inode = conf->coc_inode;
 		spin_lock_init(&r0->lo_sub_lock);
@@ -241,9 +243,10 @@
 
 			subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
 			subconf->u.coc_oinfo = oinfo;
-			LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
+			LASSERTF(subdev, "not init ost %d\n", ost_idx);
 			/* In the function below, .hs_keycmp resolves to
-			 * lu_obj_hop_keycmp() */
+			 * lu_obj_hop_keycmp()
+			 */
 			/* coverity[overrun-buffer-val] */
 			stripe = lov_sub_find(env, subdev, ofid, subconf);
 			if (!IS_ERR(stripe)) {
@@ -269,9 +272,9 @@
 {
 	struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
 
-	LASSERT(lsm != NULL);
+	LASSERT(lsm);
 	LASSERT(lsm_is_released(lsm));
-	LASSERT(lov->lo_lsm == NULL);
+	LASSERT(!lov->lo_lsm);
 
 	lov->lo_lsm = lsm_addref(lsm);
 	return 0;
@@ -310,7 +313,8 @@
 	cl_object_put(env, sub);
 
 	/* ... wait until it is actually destroyed---sub-object clears its
-	 * ->lo_sub[] slot in lovsub_object_fini() */
+	 * ->lo_sub[] slot in lovsub_object_fini()
+	 */
 	if (r0->lo_sub[idx] == los) {
 		waiter = &lov_env_info(env)->lti_waiter;
 		init_waitqueue_entry(waiter, current);
@@ -318,7 +322,8 @@
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		while (1) {
 			/* this wait-queue is signaled at the end of
-			 * lu_object_free(). */
+			 * lu_object_free().
+			 */
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			spin_lock(&r0->lo_sub_lock);
 			if (r0->lo_sub[idx] == los) {
@@ -332,7 +337,7 @@
 		}
 		remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
 	}
-	LASSERT(r0->lo_sub[idx] == NULL);
+	LASSERT(!r0->lo_sub[idx]);
 }
 
 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
@@ -345,11 +350,11 @@
 	dump_lsm(D_INODE, lsm);
 
 	lov_layout_wait(env, lov);
-	if (r0->lo_sub != NULL) {
+	if (r0->lo_sub) {
 		for (i = 0; i < r0->lo_nr; ++i) {
 			struct lovsub_object *los = r0->lo_sub[i];
 
-			if (los != NULL) {
+			if (los) {
 				cl_locks_prune(env, &los->lso_cl, 1);
 				/*
 				 * If top-level object is to be evicted from
@@ -374,7 +379,7 @@
 {
 	struct lov_layout_raid0 *r0 = &state->raid0;
 
-	if (r0->lo_sub != NULL) {
+	if (r0->lo_sub) {
 		kvfree(r0->lo_sub);
 		r0->lo_sub = NULL;
 	}
@@ -412,7 +417,7 @@
 	for (i = 0; i < r0->lo_nr; ++i) {
 		struct lu_object *sub;
 
-		if (r0->lo_sub[i] != NULL) {
+		if (r0->lo_sub[i]) {
 			sub = lovsub2lu(r0->lo_sub[i]);
 			lu_object_print(env, cookie, p, sub);
 		} else {
@@ -465,7 +470,8 @@
 	 * context, and this function is called in ccc_lock_state(), it will
 	 * hit this assertion.
 	 * Anyway, it's still okay to call attr_get w/o type guard as layout
-	 * can't go if locks exist. */
+	 * can't go if locks exist.
+	 */
 	/* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
 
 	if (!r0->lo_attr_valid) {
@@ -475,7 +481,8 @@
 
 		memset(lvb, 0, sizeof(*lvb));
 		/* XXX: timestamps can be negative by sanity:test_39m,
-		 * how can it be? */
+		 * how can it be?
+		 */
 		lvb->lvb_atime = LLONG_MIN;
 		lvb->lvb_ctime = LLONG_MIN;
 		lvb->lvb_mtime = LLONG_MIN;
@@ -569,7 +576,7 @@
  */
 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
 {
-	if (lsm == NULL)
+	if (!lsm)
 		return LLT_EMPTY;
 	if (lsm_is_released(lsm))
 		return LLT_RELEASED;
@@ -624,7 +631,7 @@
 {
 	LASSERT(lov->lo_owner != current);
 	down_write(&lov->lo_type_guard);
-	LASSERT(lov->lo_owner == NULL);
+	LASSERT(!lov->lo_owner);
 	lov->lo_owner = current;
 }
 
@@ -666,7 +673,7 @@
 
 	LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
 
-	if (conf->u.coc_md != NULL)
+	if (conf->u.coc_md)
 		llt = lov_type(conf->u.coc_md->lsm);
 	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
 
@@ -689,7 +696,7 @@
 		old_ops->llo_fini(env, lov, &lov->u);
 
 		LASSERT(atomic_read(&lov->lo_active_ios) == 0);
-		LASSERT(hdr->coh_tree.rnode == NULL);
+		LASSERT(!hdr->coh_tree.rnode);
 		LASSERT(hdr->coh_pages == 0);
 
 		lov->lo_type = LLT_EMPTY;
@@ -767,10 +774,10 @@
 
 	LASSERT(conf->coc_opc == OBJECT_CONF_SET);
 
-	if (conf->u.coc_md != NULL)
+	if (conf->u.coc_md)
 		lsm = conf->u.coc_md->lsm;
-	if ((lsm == NULL && lov->lo_lsm == NULL) ||
-	    ((lsm != NULL && lov->lo_lsm != NULL) &&
+	if ((!lsm && !lov->lo_lsm) ||
+	    ((lsm && lov->lo_lsm) &&
 	     (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
 	     (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
 		/* same version of layout */
@@ -845,7 +852,8 @@
 			struct cl_attr *attr)
 {
 	/* do not take lock, as this function is called under a
-	 * spin-lock. Layout is protected from changing by ongoing IO. */
+	 * spin-lock. Layout is protected from changing by ongoing IO.
+	 */
 	return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
 }
 
@@ -892,7 +900,7 @@
 	struct lu_object  *obj;
 
 	lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lov != NULL) {
+	if (lov) {
 		obj = lov2lu(lov);
 		lu_object_init(obj, NULL, dev);
 		lov->lo_cl.co_ops = &lov_ops;
@@ -913,7 +921,7 @@
 	struct lov_stripe_md *lsm = NULL;
 
 	lov_conf_freeze(lov);
-	if (lov->lo_lsm != NULL) {
+	if (lov->lo_lsm) {
 		lsm = lsm_addref(lov->lo_lsm);
 		CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
 			lsm, atomic_read(&lsm->lsm_refc),
@@ -928,12 +936,12 @@
 	struct lu_object *luobj;
 	struct lov_stripe_md *lsm = NULL;
 
-	if (clobj == NULL)
+	if (!clobj)
 		return NULL;
 
 	luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
 				 &lov_device_type);
-	if (luobj != NULL)
+	if (luobj)
 		lsm = lov_lsm_addref(lu2lov(luobj));
 	return lsm;
 }
@@ -941,7 +949,7 @@
 
 void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
 {
-	if (lsm != NULL)
+	if (lsm)
 		lov_free_memmd(&lsm);
 }
 EXPORT_SYMBOL(lov_lsm_put);
@@ -953,7 +961,7 @@
 
 	luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
 				 &lov_device_type);
-	if (luobj != NULL) {
+	if (luobj) {
 		struct lov_object *lov = lu2lov(luobj);
 
 		lov_conf_freeze(lov);
@@ -963,7 +971,6 @@
 			int i;
 
 			lsm = lov->lo_lsm;
-			LASSERT(lsm != NULL);
 			for (i = 0; i < lsm->lsm_stripe_count; i++) {
 				struct lov_oinfo *loi = lsm->lsm_oinfo[i];
 
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index aa520aa..1ceea3d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -55,7 +55,6 @@
 	if (ost_size == 0)
 		return 0;
 
-	LASSERT(lsm_op_find(magic) != NULL);
 	lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
 
 	/* lov_do_div64(a, b) returns a % b, and a = a / b */
@@ -115,7 +114,8 @@
  * this function returns < 0 when the offset was "before" the stripe and
  * was moved forward to the start of the stripe in question;  0 when it
  * falls in the stripe and no shifting was done; > 0 when the offset
- * was outside the stripe and was pulled back to its final byte. */
+ * was outside the stripe and was pulled back to its final byte.
+ */
 int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
 		      int stripeno, u64 *obdoff)
 {
@@ -129,8 +129,6 @@
 		return 0;
 	}
 
-	LASSERT(lsm_op_find(magic) != NULL);
-
 	lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
 						&swidth);
 
@@ -183,7 +181,6 @@
 	if (file_size == OBD_OBJECT_EOF)
 		return OBD_OBJECT_EOF;
 
-	LASSERT(lsm_op_find(magic) != NULL);
 	lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
 						&swidth);
 
@@ -213,7 +210,8 @@
 
 /* given an extent in an lov and a stripe, calculate the extent of the stripe
  * that is contained within the lov extent.  this returns true if the given
- * stripe does intersect with the lov extent. */
+ * stripe does intersect with the lov extent.
+ */
 int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
 			  u64 start, u64 end, u64 *obd_start, u64 *obd_end)
 {
@@ -227,7 +225,8 @@
 
 	/* this stripe doesn't intersect the file extent when neither
 	 * start or the end intersected the stripe and obd_start and
-	 * obd_end got rounded up to the save value. */
+	 * obd_end got rounded up to the save value.
+	 */
 	if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
 		return 0;
 
@@ -238,7 +237,8 @@
 	 * in the wrong direction and touch it up.
 	 * interestingly, this can't underflow since end must be > start
 	 * if we passed through the previous check.
-	 * (should we assert for that somewhere?) */
+	 * (should we assert for that somewhere?)
+	 */
 	if (end_side != 0)
 		(*obd_end)--;
 
@@ -252,7 +252,6 @@
 	u64 stripe_off, swidth;
 	int magic = lsm->lsm_magic;
 
-	LASSERT(lsm_op_find(magic) != NULL);
 	lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
 
 	stripe_off = lov_do_div64(lov_off, swidth);
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 6b2d100..9235d7d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -141,7 +141,8 @@
 
 	if (lsm) {
 		/* If we are just sizing the EA, limit the stripe count
-		 * to the actual number of OSTs in this filesystem. */
+		 * to the actual number of OSTs in this filesystem.
+		 */
 		if (!lmmp) {
 			stripe_count = lov_get_stripecnt(lov, lmm_magic,
 							lsm->lsm_stripe_count);
@@ -155,7 +156,8 @@
 		/* No need to allocate more than maximum supported stripes.
 		 * Anyway, this is pretty inaccurate since ld_tgt_count now
 		 * represents max index and we should rely on the actual number
-		 * of OSTs instead */
+		 * of OSTs instead
+		 */
 		stripe_count = lov_mds_md_max_stripe_count(
 			lov->lov_ocd.ocd_max_easize, lmm_magic);
 
@@ -183,7 +185,7 @@
 			return -ENOMEM;
 	}
 
-	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
+	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
 	       lmm_magic, lmm_size);
 
 	lmmv1 = *lmmp;
@@ -241,7 +243,8 @@
 		stripe_count = 1;
 
 	/* stripe count is based on whether ldiskfs can handle
-	 * larger EA sizes */
+	 * larger EA sizes
+	 */
 	if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
 	    lov->lov_ocd.ocd_max_easize)
 		max_stripes = lov_mds_md_max_stripe_count(
@@ -257,7 +260,7 @@
 {
 	int rc;
 
-	if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
+	if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) {
 		CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
 		       le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
 		CERROR("%*phN\n", lmm_bytes, lmm);
@@ -306,10 +309,9 @@
 	*lsmp = NULL;
 	LASSERT(atomic_read(&lsm->lsm_refc) > 0);
 	refc = atomic_dec_return(&lsm->lsm_refc);
-	if (refc == 0) {
-		LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+	if (refc == 0)
 		lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
-	}
+
 	return refc;
 }
 
@@ -359,7 +361,6 @@
 	if (!lmm)
 		return lsm_size;
 
-	LASSERT(lsm_op_find(magic) != NULL);
 	rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
 	if (rc) {
 		lov_free_memmd(lsmp);
@@ -376,7 +377,7 @@
  * lmm_magic must be LOV_USER_MAGIC.
  */
 int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
-		  struct lov_user_md *lump)
+		  struct lov_user_md __user *lump)
 {
 	/*
 	 * XXX huge struct allocated on stack.
@@ -399,13 +400,15 @@
 	set_fs(KERNEL_DS);
 
 	/* we only need the header part from user space to get lmm_magic and
-	 * lmm_stripe_count, (the header part is common to v1 and v3) */
+	 * lmm_stripe_count, (the header part is common to v1 and v3)
+	 */
 	lum_size = sizeof(struct lov_user_md_v1);
 	if (copy_from_user(&lum, lump, lum_size)) {
 		rc = -EFAULT;
 		goto out_set;
-	} else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
-		 (lum.lmm_magic != LOV_USER_MAGIC_V3)) {
+	}
+	if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+	    (lum.lmm_magic != LOV_USER_MAGIC_V3)) {
 		rc = -EINVAL;
 		goto out_set;
 	}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 037ae91..17ae450 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -57,7 +57,7 @@
 	const struct cl_page  *page = slice->cpl_page;
 	const struct cl_page  *sub  = lov_sub_page(slice);
 
-	return ergo(sub != NULL,
+	return ergo(sub,
 		    page->cp_child == sub &&
 		    sub->cp_parent == page &&
 		    page->cp_state == sub->cp_state);
@@ -70,7 +70,7 @@
 
 	LINVRNT(lov_page_invariant(slice));
 
-	if (sub != NULL) {
+	if (sub) {
 		LASSERT(sub->cp_state == CPS_FREEING);
 		lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
 		sub->cp_parent = NULL;
@@ -151,7 +151,7 @@
 static void lov_empty_page_fini(const struct lu_env *env,
 				struct cl_page_slice *slice)
 {
-	LASSERT(slice->cpl_page->cp_child == NULL);
+	LASSERT(!slice->cpl_page->cp_child);
 }
 
 int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index b43ce6c..210304f 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -64,7 +64,7 @@
 	if (atomic_dec_and_test(&pool->pool_refcount)) {
 		LASSERT(hlist_unhashed(&pool->pool_hash));
 		LASSERT(list_empty(&pool->pool_list));
-		LASSERT(pool->pool_debugfs_entry == NULL);
+		LASSERT(!pool->pool_debugfs_entry);
 		lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
 		lov_ost_pool_free(&(pool->pool_obds));
 		kfree(pool);
@@ -154,7 +154,7 @@
 
 /* ifdef needed for liblustre support */
 /*
- * pool /proc seq_file methods
+ * pool debugfs seq_file methods
  */
 /*
  * iterator is used to go through the target pool entries
@@ -204,7 +204,8 @@
 	if ((pool_tgt_count(pool) == 0) ||
 	    (*pos >= pool_tgt_count(pool))) {
 		/* iter is not created, so stop() has no way to
-		 * find pool to dec ref */
+		 * find pool to dec ref
+		 */
 		lov_pool_putref(pool);
 		return NULL;
 	}
@@ -217,7 +218,8 @@
 	iter->idx = 0;
 
 	/* we use seq_file private field to memorized iterator so
-	 * we can free it at stop() */
+	 * we can free it at stop()
+	 */
 	/* /!\ do not forget to restore it to pool before freeing it */
 	s->private = iter;
 	if (*pos > 0) {
@@ -226,8 +228,8 @@
 
 		i = 0;
 		do {
-		     ptr = pool_proc_next(s, &iter, &i);
-		} while ((i < *pos) && (ptr != NULL));
+			ptr = pool_proc_next(s, &iter, &i);
+		} while ((i < *pos) && ptr);
 		return ptr;
 	}
 	return iter;
@@ -239,15 +241,16 @@
 
 	/* in some cases stop() method is called 2 times, without
 	 * calling start() method (see seq_read() from fs/seq_file.c)
-	 * we have to free only if s->private is an iterator */
+	 * we have to free only if s->private is an iterator
+	 */
 	if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
 		/* we restore s->private so next call to pool_proc_start()
-		 * will work */
+		 * will work
+		 */
 		s->private = iter->pool;
 		lov_pool_putref(iter->pool);
 		kfree(iter);
 	}
-	return;
 }
 
 static int pool_proc_show(struct seq_file *s, void *v)
@@ -256,7 +259,7 @@
 	struct lov_tgt_desc *tgt;
 
 	LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
-	LASSERT(iter->pool != NULL);
+	LASSERT(iter->pool);
 	LASSERT(iter->idx <= pool_tgt_count(iter->pool));
 
 	down_read(&pool_tgt_rw_sem(iter->pool));
@@ -305,7 +308,7 @@
 	init_rwsem(&op->op_rw_sem);
 	op->op_size = count;
 	op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS);
-	if (op->op_array == NULL) {
+	if (!op->op_array) {
 		op->op_size = 0;
 		return -ENOMEM;
 	}
@@ -325,7 +328,7 @@
 
 	new_size = max(min_count, 2 * op->op_size);
 	new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS);
-	if (new == NULL)
+	if (!new)
 		return -ENOMEM;
 
 	/* copy old array to new one */
@@ -430,7 +433,7 @@
 	INIT_HLIST_NODE(&new_pool->pool_hash);
 
 	/* we need this assert seq_file is not implemented for liblustre */
-	/* get ref for /proc file */
+	/* get ref for debugfs file */
 	lov_pool_getref(new_pool);
 	new_pool->pool_debugfs_entry = ldebugfs_add_simple(
 						lov->lov_pool_debugfs_entry,
@@ -487,7 +490,7 @@
 
 	/* lookup and kill hash reference */
 	pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
-	if (pool == NULL)
+	if (!pool)
 		return -ENOENT;
 
 	if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) {
@@ -518,7 +521,7 @@
 	lov = &(obd->u.lov);
 
 	pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
-	if (pool == NULL)
+	if (!pool)
 		return -ENOENT;
 
 	obd_str2uuid(&ost_uuid, ostname);
@@ -564,7 +567,7 @@
 	lov = &(obd->u.lov);
 
 	pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
-	if (pool == NULL)
+	if (!pool)
 		return -ENOENT;
 
 	obd_str2uuid(&ost_uuid, ostname);
@@ -632,10 +635,10 @@
 	pool = NULL;
 	if (poolname[0] != '\0') {
 		pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
-		if (pool == NULL)
+		if (!pool)
 			CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
 			      poolname);
-		if ((pool != NULL) && (pool_tgt_count(pool) == 0)) {
+		if (pool && (pool_tgt_count(pool) == 0)) {
 			CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
 			       poolname);
 			/* pool is ignored, so we remove ref on it */
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 42deda7..42660f2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -156,7 +156,7 @@
 
 	tgt = lov->lov_tgts[ost_idx];
 
-	if (unlikely(tgt == NULL)) {
+	if (unlikely(!tgt)) {
 		rc = 0;
 		goto out;
 	}
@@ -178,7 +178,7 @@
 				   cfs_time_seconds(1), NULL, NULL);
 
 	rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi);
-	if (tgt != NULL && tgt->ltd_active)
+	if (tgt && tgt->ltd_active)
 		return 1;
 
 	return 0;
@@ -190,28 +190,23 @@
 
 static int common_attr_done(struct lov_request_set *set)
 {
-	struct list_head *pos;
 	struct lov_request *req;
 	struct obdo *tmp_oa;
 	int rc = 0, attrset = 0;
 
-	LASSERT(set->set_oi != NULL);
-
-	if (set->set_oi->oi_oa == NULL)
+	if (!set->set_oi->oi_oa)
 		return 0;
 
 	if (!atomic_read(&set->set_success))
 		return -EIO;
 
 	tmp_oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
-	if (tmp_oa == NULL) {
+	if (!tmp_oa) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
-	list_for_each(pos, &set->set_list) {
-		req = list_entry(pos, struct lov_request, rq_link);
-
+	list_for_each_entry(req, &set->set_list, rq_link) {
 		if (!req->rq_complete || req->rq_rc)
 			continue;
 		if (req->rq_oi.oi_oa->o_valid == 0)   /* inactive stripe */
@@ -227,7 +222,8 @@
 	if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
 	    (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
 		/* When we take attributes of some epoch, we require all the
-		 * ost to be active. */
+		 * ost to be active.
+		 */
 		CERROR("Not all the stripes had valid attrs\n");
 		rc = -EIO;
 		goto out;
@@ -246,7 +242,7 @@
 {
 	int rc = 0;
 
-	if (set == NULL)
+	if (!set)
 		return 0;
 	LASSERT(set->set_exp);
 	if (atomic_read(&set->set_completes))
@@ -258,7 +254,8 @@
 }
 
 /* The callback for osc_getattr_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
 static int cb_getattr_update(void *cookie, int rc)
 {
 	struct obd_info *oinfo = cookie;
@@ -312,7 +309,7 @@
 
 		req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
 						    GFP_NOFS | __GFP_ZERO);
-		if (req->rq_oi.oi_oa == NULL) {
+		if (!req->rq_oi.oi_oa) {
 			kfree(req);
 			rc = -ENOMEM;
 			goto out_set;
@@ -337,7 +334,7 @@
 
 int lov_fini_destroy_set(struct lov_request_set *set)
 {
-	if (set == NULL)
+	if (!set)
 		return 0;
 	LASSERT(set->set_exp);
 	if (atomic_read(&set->set_completes)) {
@@ -368,7 +365,7 @@
 	set->set_oi->oi_md = lsm;
 	set->set_oi->oi_oa = src_oa;
 	set->set_oti = oti;
-	if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE)
+	if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
 		set->set_cookies = oti->oti_logcookies;
 
 	for (i = 0; i < lsm->lsm_stripe_count; i++) {
@@ -395,7 +392,7 @@
 
 		req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
 						    GFP_NOFS | __GFP_ZERO);
-		if (req->rq_oi.oi_oa == NULL) {
+		if (!req->rq_oi.oi_oa) {
 			kfree(req);
 			rc = -ENOMEM;
 			goto out_set;
@@ -419,7 +416,7 @@
 {
 	int rc = 0;
 
-	if (set == NULL)
+	if (!set)
 		return 0;
 	LASSERT(set->set_exp);
 	if (atomic_read(&set->set_completes)) {
@@ -460,7 +457,8 @@
 }
 
 /* The callback for osc_setattr_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
 static int cb_setattr_update(void *cookie, int rc)
 {
 	struct obd_info *oinfo = cookie;
@@ -486,7 +484,7 @@
 	set->set_exp = exp;
 	set->set_oti = oti;
 	set->set_oi = oinfo;
-	if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+	if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
 		set->set_cookies = oti->oti_logcookies;
 
 	for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
@@ -511,7 +509,7 @@
 
 		req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
 						    GFP_NOFS | __GFP_ZERO);
-		if (req->rq_oi.oi_oa == NULL) {
+		if (!req->rq_oi.oi_oa) {
 			kfree(req);
 			rc = -ENOMEM;
 			goto out_set;
@@ -581,7 +579,7 @@
 {
 	int rc = 0;
 
-	if (set == NULL)
+	if (!set)
 		return 0;
 
 	if (atomic_read(&set->set_completes)) {
@@ -648,7 +646,8 @@
 }
 
 /* The callback for osc_statfs_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
 static int cb_statfs_update(void *cookie, int rc)
 {
 	struct obd_info *oinfo = cookie;
@@ -668,7 +667,8 @@
 	lov_sfs = oinfo->oi_osfs;
 	success = atomic_read(&set->set_success);
 	/* XXX: the same is done in lov_update_common_set, however
-	   lovset->set_exp is not initialized. */
+	 * lovset->set_exp is not initialized.
+	 */
 	lov_update_set(set, lovreq, rc);
 	if (rc)
 		goto out;
@@ -718,7 +718,7 @@
 	for (i = 0; i < lov->desc.ld_tgt_count; i++) {
 		struct lov_request *req;
 
-		if (lov->lov_tgts[i] == NULL ||
+		if (!lov->lov_tgts[i] ||
 		    (!lov_check_and_wait_active(lov, i) &&
 		     (oinfo->oi_flags & OBD_STATFS_NODELAY))) {
 			CDEBUG(D_HA, "lov idx %d inactive\n", i);
@@ -726,7 +726,8 @@
 		}
 
 		/* skip targets that have been explicitly disabled by the
-		 * administrator */
+		 * administrator
+		 */
 		if (!lov->lov_tgts[i]->ltd_exp) {
 			CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
 			continue;
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index f1795c3..233740c 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -101,7 +101,6 @@
 
 	next->ld_site = d->ld_site;
 	ldt = next->ld_type;
-	LASSERT(ldt != NULL);
 	rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
 	if (rc) {
 		next->ld_site = NULL;
@@ -149,7 +148,7 @@
 	int result;
 
 	lsr = kmem_cache_alloc(lovsub_req_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lsr != NULL) {
+	if (lsr) {
 		cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
 		result = 0;
 	} else
@@ -175,7 +174,7 @@
 	struct lovsub_device *lsd;
 
 	lsd = kzalloc(sizeof(*lsd), GFP_NOFS);
-	if (lsd != NULL) {
+	if (lsd) {
 		int result;
 
 		result = cl_device_init(&lsd->acid_cl, t);
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 1a3e30a..0046812 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -148,7 +148,8 @@
 {
 	pgoff_t size; /* stripe size in pages */
 	pgoff_t skip; /* how many pages in every stripe are occupied by
-		       * "other" stripes */
+		       * "other" stripes
+		       */
 	pgoff_t start;
 	pgoff_t end;
 
@@ -284,7 +285,8 @@
 	switch (parent->cll_state) {
 	case CLS_ENQUEUED:
 		/* See LU-1355 for the case that a glimpse lock is
-		 * interrupted by signal */
+		 * interrupted by signal
+		 */
 		LASSERT(parent->cll_flags & CLF_CANCELLED);
 		break;
 	case CLS_QUEUING:
@@ -429,7 +431,7 @@
 	list_for_each_entry(scan, &sub->lss_parents, lll_list) {
 		lov = scan->lll_super;
 		(*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
-		if (lov != NULL)
+		if (lov)
 			cl_lock_descr_print(env, cookie, p,
 					    &lov->lls_cl.cls_lock->cll_descr);
 		(*p)(env, cookie, "] ");
@@ -454,7 +456,7 @@
 	int result;
 
 	lsk = kmem_cache_alloc(lovsub_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lsk != NULL) {
+	if (lsk) {
 		INIT_LIST_HEAD(&lsk->lss_parents);
 		cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
 		result = 0;
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 5ba5ee1..0a9bf51 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -63,7 +63,7 @@
 
 	under = &dev->acid_next->cd_lu_dev;
 	below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
-	if (below != NULL) {
+	if (below) {
 		lu_object_add(obj, below);
 		cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
 		result = 0;
@@ -144,7 +144,7 @@
 	struct lu_object     *obj;
 
 	los = kmem_cache_alloc(lovsub_object_kmem, GFP_NOFS | __GFP_ZERO);
-	if (los != NULL) {
+	if (los) {
 		struct cl_object_header *hdr;
 
 		obj = lovsub2lu(los);
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 337241d..bb81d04 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -46,7 +46,7 @@
 	struct obd_device *dev = (struct obd_device *)m->private;
 	struct lov_desc *desc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	seq_printf(m, "%llu\n", desc->ld_default_stripe_size);
 	return 0;
@@ -61,7 +61,7 @@
 	__u64 val;
 	int rc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	rc = lprocfs_write_u64_helper(buffer, count, &val);
 	if (rc)
@@ -79,7 +79,7 @@
 	struct obd_device *dev = (struct obd_device *)m->private;
 	struct lov_desc *desc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	seq_printf(m, "%llu\n", desc->ld_default_stripe_offset);
 	return 0;
@@ -94,7 +94,7 @@
 	__u64 val;
 	int rc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	rc = lprocfs_write_u64_helper(buffer, count, &val);
 	if (rc)
@@ -111,7 +111,7 @@
 	struct obd_device *dev = (struct obd_device *)m->private;
 	struct lov_desc *desc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	seq_printf(m, "%u\n", desc->ld_pattern);
 	return 0;
@@ -125,7 +125,7 @@
 	struct lov_desc *desc;
 	int val, rc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	rc = lprocfs_write_helper(buffer, count, &val);
 	if (rc)
@@ -143,7 +143,7 @@
 	struct obd_device *dev = (struct obd_device *)m->private;
 	struct lov_desc *desc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1);
 	return 0;
@@ -157,7 +157,7 @@
 	struct lov_desc *desc;
 	int val, rc;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	desc = &dev->u.lov.desc;
 	rc = lprocfs_write_helper(buffer, count, &val);
 	if (rc)
@@ -199,7 +199,7 @@
 	struct obd_device *dev = (struct obd_device *)m->private;
 	struct lov_obd *lov;
 
-	LASSERT(dev != NULL);
+	LASSERT(dev);
 	lov = &dev->u.lov;
 	seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid);
 	return 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 3d2997a..54f4ca6 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -90,7 +90,7 @@
 		struct ptlrpc_request **req, __u64 extra_lock_flags);
 
 int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
-			    struct list_head *cancels, ldlm_mode_t mode,
+			    struct list_head *cancels, enum ldlm_mode  mode,
 			    __u64 bits);
 /* mdc/mdc_request.c */
 int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
@@ -119,8 +119,8 @@
 int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
 	       struct ptlrpc_request **request);
 int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
-		      ldlm_policy_data_t *policy, ldlm_mode_t mode,
-		      ldlm_cancel_flags_t flags, void *opaque);
+		      ldlm_policy_data_t *policy, enum ldlm_mode mode,
+		      enum ldlm_cancel_flags flags, void *opaque);
 
 int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
 			struct lu_fid *fid, __u64 *bits);
@@ -129,10 +129,10 @@
 			     struct md_enqueue_info *minfo,
 			     struct ldlm_enqueue_info *einfo);
 
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
-			   const struct lu_fid *fid, ldlm_type_t type,
-			   ldlm_policy_data_t *policy, ldlm_mode_t mode,
-			   struct lustre_handle *lockh);
+enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
+			      const struct lu_fid *fid, enum ldlm_type type,
+			      ldlm_policy_data_t *policy, enum ldlm_mode mode,
+			      struct lustre_handle *lockh);
 
 static inline int mdc_prep_elc_req(struct obd_export *exp,
 				   struct ptlrpc_request *req, int opc,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 7218532..b3bfdcb7 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -41,8 +41,6 @@
 
 static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
 {
-	LASSERT(b != NULL);
-
 	b->suppgid = suppgid;
 	b->uid = from_kuid(&init_user_ns, current_uid());
 	b->gid = from_kgid(&init_user_ns, current_gid());
@@ -83,7 +81,6 @@
 {
 	struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
 						    &RMF_MDT_BODY);
-	LASSERT(b != NULL);
 	b->valid = valid;
 	b->eadatasize = ea_size;
 	b->flags = flags;
@@ -323,7 +320,7 @@
 		return;
 
 	lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
-	if (ea == NULL) { /* Remove LOV EA */
+	if (!ea) { /* Remove LOV EA */
 		lum->lmm_magic = LOV_USER_MAGIC_V1;
 		lum->lmm_stripe_size = 0;
 		lum->lmm_stripe_count = 0;
@@ -346,7 +343,6 @@
 
 	CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink));
 	rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-	LASSERT(rec != NULL);
 
 	rec->ul_opcode   = op_data->op_cli_flags & CLI_RM_ENTRY ?
 					REINT_RMENTRY : REINT_UNLINK;
@@ -362,7 +358,7 @@
 	rec->ul_bias     = op_data->op_bias;
 
 	tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
-	LASSERT(tmp != NULL);
+	LASSERT(tmp);
 	LOGL0(op_data->op_name, op_data->op_namelen, tmp);
 }
 
@@ -373,7 +369,6 @@
 
 	CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link));
 	rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-	LASSERT(rec != NULL);
 
 	rec->lk_opcode   = REINT_LINK;
 	rec->lk_fsuid    = op_data->op_fsuid; /* current->fsuid; */
@@ -456,10 +451,9 @@
 		struct ldlm_lock *lock;
 
 		data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
-		LASSERT(data != NULL);
 
 		lock = ldlm_handle2lock(&op_data->op_lease_handle);
-		if (lock != NULL) {
+		if (lock) {
 			data->cd_handle = lock->l_remote_handle;
 			ldlm_lock_put(lock);
 		}
@@ -495,7 +489,8 @@
 
 /* We record requests in flight in cli->cl_r_in_flight here.
  * There is only one write rpc possible in mdc anyway. If this to change
- * in the future - the code may need to be revisited. */
+ * in the future - the code may need to be revisited.
+ */
 int mdc_enter_request(struct client_obd *cli)
 {
 	int rc = 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index ef9a1e1..6dae574 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -129,7 +129,7 @@
 
 	lock = ldlm_handle2lock((struct lustre_handle *)lockh);
 
-	LASSERT(lock != NULL);
+	LASSERT(lock);
 	lock_res_and_lock(lock);
 	if (lock->l_resource->lr_lvb_inode &&
 	    lock->l_resource->lr_lvb_inode != data) {
@@ -151,13 +151,13 @@
 	return 0;
 }
 
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
-			   const struct lu_fid *fid, ldlm_type_t type,
-			   ldlm_policy_data_t *policy, ldlm_mode_t mode,
-			   struct lustre_handle *lockh)
+enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
+			      const struct lu_fid *fid, enum ldlm_type type,
+			      ldlm_policy_data_t *policy, enum ldlm_mode mode,
+			      struct lustre_handle *lockh)
 {
 	struct ldlm_res_id res_id;
-	ldlm_mode_t rc;
+	enum ldlm_mode rc;
 
 	fid_build_reg_res_name(fid, &res_id);
 	/* LU-4405: Clear bits not supported by server */
@@ -170,8 +170,8 @@
 int mdc_cancel_unused(struct obd_export *exp,
 		      const struct lu_fid *fid,
 		      ldlm_policy_data_t *policy,
-		      ldlm_mode_t mode,
-		      ldlm_cancel_flags_t flags,
+		      enum ldlm_mode mode,
+		      enum ldlm_cancel_flags flags,
 		      void *opaque)
 {
 	struct ldlm_res_id res_id;
@@ -191,12 +191,12 @@
 	struct ldlm_resource *res;
 	struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
 
-	LASSERTF(ns != NULL, "no namespace passed\n");
+	LASSERTF(ns, "no namespace passed\n");
 
 	fid_build_reg_res_name(fid, &res_id);
 
 	res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
-	if (res == NULL)
+	if (!res)
 		return 0;
 
 	lock_res(res);
@@ -210,7 +210,8 @@
 /* find any ldlm lock of the inode in mdc
  * return 0    not find
  *	1    find one
- *      < 0    error */
+ *      < 0    error
+ */
 int mdc_find_cbdata(struct obd_export *exp,
 		    const struct lu_fid *fid,
 		    ldlm_iterator_t it, void *data)
@@ -252,7 +253,8 @@
  * OOM here may cause recovery failure if lmm is needed (only for the
  * original open if the MDS crashed just when this client also OOM'd)
  * but this is incredibly unlikely, and questionable whether the client
- * could do MDS recovery under OOM anyways... */
+ * could do MDS recovery under OOM anyways...
+ */
 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
 				struct mdt_body *body)
 {
@@ -317,7 +319,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_LDLM_INTENT_OPEN);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return ERR_PTR(-ENOMEM);
 	}
@@ -365,7 +367,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 					&RQF_LDLM_INTENT_GETXATTR);
-	if (req == NULL)
+	if (!req)
 		return ERR_PTR(-ENOMEM);
 
 	rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
@@ -409,7 +411,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_LDLM_INTENT_UNLINK);
-	if (req == NULL)
+	if (!req)
 		return ERR_PTR(-ENOMEM);
 
 	req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -453,7 +455,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_LDLM_INTENT_GETATTR);
-	if (req == NULL)
+	if (!req)
 		return ERR_PTR(-ENOMEM);
 
 	req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -497,7 +499,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				&RQF_LDLM_INTENT_LAYOUT);
-	if (req == NULL)
+	if (!req)
 		return ERR_PTR(-ENOMEM);
 
 	req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
@@ -514,7 +516,8 @@
 	/* pack the layout intent request */
 	layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT);
 	/* LAYOUT_INTENT_ACCESS is generic, specific operation will be
-	 * set for replication */
+	 * set for replication
+	 */
 	layout->li_opc = LAYOUT_INTENT_ACCESS;
 
 	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
@@ -530,7 +533,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
-	if (req == NULL)
+	if (!req)
 		return ERR_PTR(-ENOMEM);
 
 	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
@@ -561,7 +564,8 @@
 
 	LASSERT(rc >= 0);
 	/* Similarly, if we're going to replay this request, we don't want to
-	 * actually get a lock, just perform the intent. */
+	 * actually get a lock, just perform the intent.
+	 */
 	if (req->rq_transno || req->rq_replay) {
 		lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
 		lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY);
@@ -573,10 +577,10 @@
 		rc = 0;
 	} else { /* rc = 0 */
 		lock = ldlm_handle2lock(lockh);
-		LASSERT(lock != NULL);
 
 		/* If the server gave us back a different lock mode, we should
-		 * fix up our variables. */
+		 * fix up our variables.
+		 */
 		if (lock->l_req_mode != einfo->ei_mode) {
 			ldlm_lock_addref(lockh, lock->l_req_mode);
 			ldlm_lock_decref(lockh, einfo->ei_mode);
@@ -586,7 +590,6 @@
 	}
 
 	lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
-	LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
 
 	intent->it_disposition = (int)lockrep->lock_policy_res1;
 	intent->it_status = (int)lockrep->lock_policy_res2;
@@ -595,7 +598,8 @@
 	intent->it_data = req;
 
 	/* Technically speaking rq_transno must already be zero if
-	 * it_status is in error, so the check is a bit redundant */
+	 * it_status is in error, so the check is a bit redundant
+	 */
 	if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
 		mdc_clear_replay_flag(req, intent->it_status);
 
@@ -605,7 +609,8 @@
 	 *
 	 * It's important that we do this first!  Otherwise we might exit the
 	 * function without doing so, and try to replay a failed create
-	 * (bug 3440) */
+	 * (bug 3440)
+	 */
 	if (it->it_op & IT_OPEN && req->rq_replay &&
 	    (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0))
 		mdc_clear_replay_flag(req, intent->it_status);
@@ -618,7 +623,7 @@
 		struct mdt_body *body;
 
 		body = req_capsule_server_get(pill, &RMF_MDT_BODY);
-		if (body == NULL) {
+		if (!body) {
 			CERROR("Can't swab mdt_body\n");
 			return -EPROTO;
 		}
@@ -645,11 +650,12 @@
 			 */
 			eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
 							      body->eadatasize);
-			if (eadata == NULL)
+			if (!eadata)
 				return -EPROTO;
 
 			/* save lvb data and length in case this is for layout
-			 * lock */
+			 * lock
+			 */
 			lvb_data = eadata;
 			lvb_len = body->eadatasize;
 
@@ -690,31 +696,32 @@
 			LASSERT(client_is_remote(exp));
 			perm = req_capsule_server_swab_get(pill, &RMF_ACL,
 						lustre_swab_mdt_remote_perm);
-			if (perm == NULL)
+			if (!perm)
 				return -EPROTO;
 		}
 	} else if (it->it_op & IT_LAYOUT) {
 		/* maybe the lock was granted right away and layout
-		 * is packed into RMF_DLM_LVB of req */
+		 * is packed into RMF_DLM_LVB of req
+		 */
 		lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
 		if (lvb_len > 0) {
 			lvb_data = req_capsule_server_sized_get(pill,
 							&RMF_DLM_LVB, lvb_len);
-			if (lvb_data == NULL)
+			if (!lvb_data)
 				return -EPROTO;
 		}
 	}
 
 	/* fill in stripe data for layout lock */
 	lock = ldlm_handle2lock(lockh);
-	if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) {
+	if (lock && ldlm_has_layout(lock) && lvb_data) {
 		void *lmm;
 
 		LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
 			ldlm_it2str(it->it_op), lvb_len);
 
 		lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS);
-		if (lmm == NULL) {
+		if (!lmm) {
 			LDLM_LOCK_PUT(lock);
 			return -ENOMEM;
 		}
@@ -722,24 +729,25 @@
 
 		/* install lvb_data */
 		lock_res_and_lock(lock);
-		if (lock->l_lvb_data == NULL) {
+		if (!lock->l_lvb_data) {
 			lock->l_lvb_type = LVB_T_LAYOUT;
 			lock->l_lvb_data = lmm;
 			lock->l_lvb_len = lvb_len;
 			lmm = NULL;
 		}
 		unlock_res_and_lock(lock);
-		if (lmm != NULL)
+		if (lmm)
 			kvfree(lmm);
 	}
-	if (lock != NULL)
+	if (lock)
 		LDLM_LOCK_PUT(lock);
 
 	return rc;
 }
 
 /* We always reserve enough space in the reply packet for a stripe MD, because
- * we don't know in advance the file type. */
+ * we don't know in advance the file type.
+ */
 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
 		struct lookup_intent *it, struct md_op_data *op_data,
 		struct lustre_handle *lockh, void *lmm, int lmmsize,
@@ -782,14 +790,15 @@
 			policy = &getxattr_policy;
 	}
 
-	LASSERT(reqp == NULL);
+	LASSERT(!reqp);
 
 	generation = obddev->u.cli.cl_import->imp_generation;
 resend:
 	flags = saved_flags;
 	if (!it) {
 		/* The only way right now is FLOCK, in this case we hide flock
-		   policy as lmm, but lmmsize is 0 */
+		 * policy as lmm, but lmmsize is 0
+		 */
 		LASSERT(lmm && lmmsize == 0);
 		LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
 			 einfo->ei_type);
@@ -823,9 +832,10 @@
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	if (req != NULL && it && it->it_op & IT_CREAT)
+	if (req && it && it->it_op & IT_CREAT)
 		/* ask ptlrpc not to resend on EINPROGRESS since we have our own
-		 * retry logic */
+		 * retry logic
+		 */
 		req->rq_no_retry_einprogress = 1;
 
 	if (resends) {
@@ -836,7 +846,8 @@
 
 	/* It is important to obtain rpc_lock first (if applicable), so that
 	 * threads that are serialised with rpc_lock are not polluting our
-	 * rpcs in flight counter. We do not do flock request limiting, though*/
+	 * rpcs in flight counter. We do not do flock request limiting, though
+	 */
 	if (it) {
 		mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
 		rc = mdc_enter_request(&obddev->u.cli);
@@ -852,13 +863,14 @@
 			      0, lvb_type, lockh, 0);
 	if (!it) {
 		/* For flock requests we immediately return without further
-		   delay and let caller deal with the rest, since rest of
-		   this function metadata processing makes no sense for flock
-		   requests anyway. But in case of problem during comms with
-		   Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
-		   can not rely on caller and this mainly for F_UNLCKs
-		   (explicits or automatically generated by Kernel to clean
-		   current FLocks upon exit) that can't be trashed */
+		 * delay and let caller deal with the rest, since rest of
+		 * this function metadata processing makes no sense for flock
+		 * requests anyway. But in case of problem during comms with
+		 * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
+		 * can not rely on caller and this mainly for F_UNLCKs
+		 * (explicits or automatically generated by Kernel to clean
+		 * current FLocks upon exit) that can't be trashed
+		 */
 		if ((rc == -EINTR) || (rc == -ETIMEDOUT))
 			goto resend;
 		return rc;
@@ -878,13 +890,13 @@
 	}
 
 	lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-	LASSERT(lockrep != NULL);
 
 	lockrep->lock_policy_res2 =
 		ptlrpc_status_ntoh(lockrep->lock_policy_res2);
 
 	/* Retry the create infinitely when we get -EINPROGRESS from
-	 * server. This is required by the new quota design. */
+	 * server. This is required by the new quota design.
+	 */
 	if (it->it_op & IT_CREAT &&
 	    (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
 		mdc_clear_replay_flag(req, rc);
@@ -930,13 +942,13 @@
 	struct ldlm_lock *lock;
 	int rc;
 
-	LASSERT(request != NULL);
 	LASSERT(request != LP_POISON);
 	LASSERT(request->rq_repmsg != LP_POISON);
 
 	if (!it_disposition(it, DISP_IT_EXECD)) {
 		/* The server failed before it even started executing the
-		 * intent, i.e. because it couldn't unpack the request. */
+		 * intent, i.e. because it couldn't unpack the request.
+		 */
 		LASSERT(it->d.lustre.it_status != 0);
 		return it->d.lustre.it_status;
 	}
@@ -945,10 +957,11 @@
 		return rc;
 
 	mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
-	LASSERT(mdt_body != NULL);      /* mdc_enqueue checked */
+	LASSERT(mdt_body);      /* mdc_enqueue checked */
 
 	/* If we were revalidating a fid/name pair, mark the intent in
-	 * case we fail and get called again from lookup */
+	 * case we fail and get called again from lookup
+	 */
 	if (fid_is_sane(&op_data->op_fid2) &&
 	    it->it_create_mode & M_CHECK_STALE &&
 	    it->it_op != IT_GETATTR) {
@@ -957,7 +970,8 @@
 		/* sever can return one of two fids:
 		 * op_fid2 - new allocated fid - if file is created.
 		 * op_fid3 - existent fid - if file only open.
-		 * op_fid3 is saved in lmv_intent_open */
+		 * op_fid3 is saved in lmv_intent_open
+		 */
 		if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) &&
 		    (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) {
 			CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
@@ -1001,7 +1015,8 @@
 	 * one.  We have to set the data here instead of in
 	 * mdc_enqueue, because we need to use the child's inode as
 	 * the l_ast_data to match, and that's not available until
-	 * intent_finish has performed the iget().) */
+	 * intent_finish has performed the iget().)
+	 */
 	lock = ldlm_handle2lock(lockh);
 	if (lock) {
 		ldlm_policy_data_t policy = lock->l_policy_data;
@@ -1036,11 +1051,12 @@
 {
 	/* We could just return 1 immediately, but since we should only
 	 * be called in revalidate_it if we already have a lock, let's
-	 * verify that. */
+	 * verify that.
+	 */
 	struct ldlm_res_id res_id;
 	struct lustre_handle lockh;
 	ldlm_policy_data_t policy;
-	ldlm_mode_t mode;
+	enum ldlm_mode mode;
 
 	if (it->d.lustre.it_lock_handle) {
 		lockh.cookie = it->d.lustre.it_lock_handle;
@@ -1059,10 +1075,12 @@
 			 * Unfortunately, if the bits are split across multiple
 			 * locks, there's no easy way to match all of them here,
 			 * so an extra RPC would be performed to fetch all
-			 * of those bits at once for now. */
+			 * of those bits at once for now.
+			 */
 			/* For new MDTs(> 2.4), UPDATE|PERM should be enough,
 			 * but for old MDTs (< 2.4), permission is covered
-			 * by LOOKUP lock, so it needs to match all bits here.*/
+			 * by LOOKUP lock, so it needs to match all bits here.
+			 */
 			policy.l_inodebits.bits = MDS_INODELOCK_UPDATE |
 						  MDS_INODELOCK_LOOKUP |
 						  MDS_INODELOCK_PERM;
@@ -1147,11 +1165,13 @@
 	    (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
 		/* We could just return 1 immediately, but since we should only
 		 * be called in revalidate_it if we already have a lock, let's
-		 * verify that. */
+		 * verify that.
+		 */
 		it->d.lustre.it_lock_handle = 0;
 		rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
 		/* Only return failure if it was not GETATTR by cfid
-		   (from inode_revalidate) */
+		 * (from inode_revalidate)
+		 */
 		if (rc || op_data->op_namelen != 0)
 			return rc;
 	}
@@ -1206,7 +1226,6 @@
 	}
 
 	lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-	LASSERT(lockrep != NULL);
 
 	lockrep->lock_policy_res2 =
 		ptlrpc_status_ntoh(lockrep->lock_policy_res2);
@@ -1235,7 +1254,8 @@
 	struct ldlm_res_id       res_id;
 	/*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed
 	 *     for statahead currently. Consider CMD in future, such two bits
-	 *     maybe managed by different MDS, should be adjusted then. */
+	 *     maybe managed by different MDS, should be adjusted then.
+	 */
 	ldlm_policy_data_t       policy = {
 					.l_inodebits = { MDS_INODELOCK_LOOKUP |
 							 MDS_INODELOCK_UPDATE }
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index ac7695a..019b2373 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -65,9 +65,10 @@
 
 /* Find and cancel locally locks matched by inode @bits & @mode in the resource
  * found by @fid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list. */
+ * locks added to @cancels list.
+ */
 int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
-			    struct list_head *cancels, ldlm_mode_t mode,
+			    struct list_head *cancels, enum ldlm_mode mode,
 			    __u64 bits)
 {
 	struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
@@ -81,14 +82,15 @@
 	 *
 	 * This distinguishes from a case when ELC is not supported originally,
 	 * when we still want to cancel locks in advance and just cancel them
-	 * locally, without sending any RPC. */
+	 * locally, without sending any RPC.
+	 */
 	if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
 		return 0;
 
 	fid_build_reg_res_name(fid, &res_id);
 	res = ldlm_resource_get(exp->exp_obd->obd_namespace,
 				NULL, &res_id, 0, 0);
-	if (res == NULL)
+	if (!res)
 		return 0;
 	LDLM_RESOURCE_ADDREF(res);
 	/* Initialize ibits lock policy. */
@@ -111,8 +113,6 @@
 	int count = 0, rc;
 	__u64 bits;
 
-	LASSERT(op_data != NULL);
-
 	bits = MDS_INODELOCK_UPDATE;
 	if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
 		bits |= MDS_INODELOCK_LOOKUP;
@@ -123,7 +123,7 @@
 						&cancels, LCK_EX, bits);
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_REINT_SETATTR);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
@@ -151,10 +151,10 @@
 	ptlrpc_request_set_replen(req);
 	if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
 	    req->rq_import->imp_replayable) {
-		LASSERT(*mod == NULL);
+		LASSERT(!*mod);
 
 		*mod = obd_mod_alloc();
-		if (*mod == NULL) {
+		if (!*mod) {
 			DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
 		} else {
 			req->rq_replay = 1;
@@ -181,8 +181,6 @@
 
 		epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
 		body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-		LASSERT(epoch != NULL);
-		LASSERT(body != NULL);
 		epoch->handle = body->handle;
 		epoch->ioepoch = body->ioepoch;
 		req->rq_replay_cb = mdc_replay_open;
@@ -195,7 +193,7 @@
 	*request = req;
 	if (rc && req->rq_commit_cb) {
 		/* Put an extra reference on \var mod on error case. */
-		if (mod != NULL && *mod != NULL)
+		if (mod && *mod)
 			obd_mod_put(*mod);
 		req->rq_commit_cb(req);
 	}
@@ -237,7 +235,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_REINT_CREATE_RMT_ACL);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
@@ -262,7 +260,8 @@
 	ptlrpc_request_set_replen(req);
 
 	/* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
-	 * logic here */
+	 * logic here
+	 */
 	req->rq_no_retry_einprogress = 1;
 
 	if (resends) {
@@ -280,7 +279,8 @@
 		goto resend;
 	} else if (rc == -EINPROGRESS) {
 		/* Retry create infinitely until succeed or get other
-		 * error code. */
+		 * error code.
+		 */
 		ptlrpc_req_finished(req);
 		resends++;
 
@@ -308,7 +308,7 @@
 	struct ptlrpc_request *req = *request;
 	int count = 0, rc;
 
-	LASSERT(req == NULL);
+	LASSERT(!req);
 
 	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
 	    (fid_is_sane(&op_data->op_fid1)) &&
@@ -324,7 +324,7 @@
 						 MDS_INODELOCK_FULL);
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_REINT_UNLINK);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
@@ -373,7 +373,7 @@
 						 MDS_INODELOCK_UPDATE);
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
@@ -429,7 +429,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_REINT_RENAME);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 57e0fc1..c7a0fa1 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -48,6 +48,7 @@
 #include "../include/lprocfs_status.h"
 #include "../include/lustre_param.h"
 #include "../include/lustre_log.h"
+#include "../include/lustre_kernelcomm.h"
 
 #include "mdc_internal.h"
 
@@ -62,7 +63,8 @@
 
 	/* mdc_enter_request() ensures that this client has no more
 	 * than cl_max_rpcs_in_flight RPCs simultaneously inf light
-	 * against an MDT. */
+	 * against an MDT.
+	 */
 	rc = mdc_enter_request(cli);
 	if (rc != 0)
 		return rc;
@@ -82,7 +84,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_MDS_GETSTATUS,
 					LUSTRE_MDS_VERSION, MDS_GETSTATUS);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	mdc_pack_body(req, NULL, 0, 0, -1, 0);
@@ -95,7 +97,7 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -135,7 +137,7 @@
 
 	/* sanity check for the reply */
 	body = req_capsule_server_get(pill, &RMF_MDT_BODY);
-	if (body == NULL)
+	if (!body)
 		return -EPROTO;
 
 	CDEBUG(D_NET, "mode: %o\n", body->mode);
@@ -145,7 +147,7 @@
 
 		eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
 						      body->eadatasize);
-		if (eadata == NULL)
+		if (!eadata)
 			return -EPROTO;
 	}
 
@@ -155,7 +157,7 @@
 		LASSERT(client_is_remote(exp));
 		perm = req_capsule_server_swab_get(pill, &RMF_ACL,
 						lustre_swab_mdt_remote_perm);
-		if (perm == NULL)
+		if (!perm)
 			return -EPROTO;
 	}
 
@@ -175,7 +177,7 @@
 	}
 	*request = NULL;
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
@@ -213,7 +215,7 @@
 	*request = NULL;
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_GETATTR_NAME);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -260,7 +262,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
 					MDS_IS_SUBDIR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	mdc_is_subdir_pack(req, pfid, cfid, 0);
@@ -289,7 +291,7 @@
 
 	*request = NULL;
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	if (xattr_name) {
@@ -424,7 +426,7 @@
 		return -EPROTO;
 
 	acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
-	if (acl == NULL)
+	if (!acl)
 		return 0;
 
 	if (IS_ERR(acl)) {
@@ -460,7 +462,6 @@
 	memset(md, 0, sizeof(*md));
 
 	md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
-	LASSERT(md->body != NULL);
 
 	if (md->body->valid & OBD_MD_FLEASIZE) {
 		int lmmsize;
@@ -592,17 +593,16 @@
 	struct lustre_handle old;
 	struct mdt_body *body;
 
-	if (mod == NULL) {
+	if (!mod) {
 		DEBUG_REQ(D_ERROR, req,
 			  "Can't properly replay without open data.");
 		return;
 	}
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-	LASSERT(body != NULL);
 
 	och = mod->mod_och;
-	if (och != NULL) {
+	if (och) {
 		struct lustre_handle *file_fh;
 
 		LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
@@ -614,7 +614,7 @@
 		*file_fh = body->handle;
 	}
 	close_req = mod->mod_close_req;
-	if (close_req != NULL) {
+	if (close_req) {
 		__u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
 		struct mdt_ioepoch *epoch;
 
@@ -623,7 +623,7 @@
 					       &RMF_MDT_EPOCH);
 		LASSERT(epoch);
 
-		if (och != NULL)
+		if (och)
 			LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
 		DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
 		epoch->handle = body->handle;
@@ -634,7 +634,7 @@
 {
 	struct md_open_data *mod = req->rq_cb_data;
 
-	if (mod == NULL)
+	if (!mod)
 		return;
 
 	/**
@@ -674,15 +674,15 @@
 
 	rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
 	body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
-	LASSERT(rec != NULL);
+	LASSERT(rec);
 	/* Incoming message in my byte order (it's been swabbed). */
 	/* Outgoing messages always in my byte order. */
-	LASSERT(body != NULL);
+	LASSERT(body);
 
 	/* Only if the import is replayable, we set replay_open data */
 	if (och && imp->imp_replayable) {
 		mod = obd_mod_alloc();
-		if (mod == NULL) {
+		if (!mod) {
 			DEBUG_REQ(D_ERROR, open_req,
 				  "Can't allocate md_open_data");
 			return 0;
@@ -748,11 +748,11 @@
 	 * It is possible to not have \var mod in a case of eviction between
 	 * lookup and ll_file_open().
 	 **/
-	if (mod == NULL)
+	if (!mod)
 		return 0;
 
 	LASSERT(mod != LP_POISON);
-	LASSERT(mod->mod_open_req != NULL);
+	LASSERT(mod->mod_open_req);
 	mdc_free_open(mod);
 
 	mod->mod_och = NULL;
@@ -803,7 +803,7 @@
 
 	*request = NULL;
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
@@ -814,13 +814,14 @@
 
 	/* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
 	 * portal whose threads are not taking any DLM locks and are therefore
-	 * always progressing */
+	 * always progressing
+	 */
 	req->rq_request_portal = MDS_READPAGE_PORTAL;
 	ptlrpc_at_set_req_timeout(req);
 
 	/* Ensure that this close's handle is fixed up during replay. */
-	if (likely(mod != NULL)) {
-		LASSERTF(mod->mod_open_req != NULL &&
+	if (likely(mod)) {
+		LASSERTF(mod->mod_open_req &&
 			 mod->mod_open_req->rq_type != LI_POISON,
 			 "POISONED open %p!\n", mod->mod_open_req);
 
@@ -828,7 +829,8 @@
 
 		DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
 		/* We no longer want to preserve this open for replay even
-		 * though the open was committed. b=3632, b=3633 */
+		 * though the open was committed. b=3632, b=3633
+		 */
 		spin_lock(&mod->mod_open_req->rq_lock);
 		mod->mod_open_req->rq_replay = 0;
 		spin_unlock(&mod->mod_open_req->rq_lock);
@@ -850,7 +852,7 @@
 	rc = ptlrpc_queue_wait(req);
 	mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
 
-	if (req->rq_repmsg == NULL) {
+	if (!req->rq_repmsg) {
 		CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
 		       req->rq_status);
 		if (rc == 0)
@@ -866,7 +868,7 @@
 				rc = -rc;
 		}
 		body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-		if (body == NULL)
+		if (!body)
 			rc = -EPROTO;
 	} else if (rc == -ESTALE) {
 		/**
@@ -876,7 +878,6 @@
 		 */
 		if (mod) {
 			DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
-			LASSERT(mod->mod_open_req != NULL);
 			if (mod->mod_open_req->rq_committed)
 				rc = 0;
 		}
@@ -886,7 +887,8 @@
 		if (rc != 0)
 			mod->mod_close_req = NULL;
 		/* Since now, mod is accessed through open_req only,
-		 * thus close req does not keep a reference on mod anymore. */
+		 * thus close req does not keep a reference on mod anymore.
+		 */
 		obd_mod_put(mod);
 	}
 	*request = req;
@@ -903,7 +905,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_DONE_WRITING);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
@@ -912,15 +914,16 @@
 		return rc;
 	}
 
-	if (mod != NULL) {
-		LASSERTF(mod->mod_open_req != NULL &&
+	if (mod) {
+		LASSERTF(mod->mod_open_req &&
 			 mod->mod_open_req->rq_type != LI_POISON,
 			 "POISONED setattr %p!\n", mod->mod_open_req);
 
 		mod->mod_close_req = req;
 		DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
 		/* We no longer want to preserve this setattr for replay even
-		 * though the open was committed. b=3632, b=3633 */
+		 * though the open was committed. b=3632, b=3633
+		 */
 		spin_lock(&mod->mod_open_req->rq_lock);
 		mod->mod_open_req->rq_replay = 0;
 		spin_unlock(&mod->mod_open_req->rq_lock);
@@ -940,7 +943,6 @@
 		 * Let's check if mod exists and return no error in that case
 		 */
 		if (mod) {
-			LASSERT(mod->mod_open_req != NULL);
 			if (mod->mod_open_req->rq_committed)
 				rc = 0;
 		}
@@ -949,11 +951,12 @@
 	if (mod) {
 		if (rc != 0)
 			mod->mod_close_req = NULL;
-		LASSERT(mod->mod_open_req != NULL);
+		LASSERT(mod->mod_open_req);
 		mdc_free_open(mod);
 
 		/* Since now, mod is accessed through setattr req only,
-		 * thus DW req does not keep a reference on mod anymore. */
+		 * thus DW req does not keep a reference on mod anymore.
+		 */
 		obd_mod_put(mod);
 	}
 
@@ -978,7 +981,7 @@
 
 restart_bulk:
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
@@ -992,7 +995,7 @@
 
 	desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
 				    MDS_BULK_PORTAL);
-	if (desc == NULL) {
+	if (!desc) {
 		ptlrpc_request_free(req);
 		return -ENOMEM;
 	}
@@ -1066,7 +1069,7 @@
 
 	req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
 					LUSTRE_MDS_VERSION, MDS_STATFS);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto output;
 	}
@@ -1088,7 +1091,7 @@
 	}
 
 	msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
-	if (msfs == NULL) {
+	if (!msfs) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1161,7 +1164,7 @@
 
 	req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
 					LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1170,7 +1173,7 @@
 
 	/* Copy hsm_progress struct */
 	req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
-	if (req_hpk == NULL) {
+	if (!req_hpk) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1195,7 +1198,7 @@
 	req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
 					LUSTRE_MDS_VERSION,
 					MDS_HSM_CT_REGISTER);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1205,7 +1208,7 @@
 	/* Copy hsm_progress struct */
 	archive_mask = req_capsule_client_get(&req->rq_pill,
 					      &RMF_MDS_HSM_ARCHIVE);
-	if (archive_mask == NULL) {
+	if (!archive_mask) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1230,7 +1233,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_HSM_ACTION);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
@@ -1250,7 +1253,7 @@
 
 	req_hca = req_capsule_server_get(&req->rq_pill,
 					 &RMF_MDS_HSM_CURRENT_ACTION);
-	if (req_hca == NULL) {
+	if (!req_hca) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1270,7 +1273,7 @@
 	req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
 					LUSTRE_MDS_VERSION,
 					MDS_HSM_CT_UNREGISTER);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1295,7 +1298,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_HSM_STATE_GET);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
@@ -1314,7 +1317,7 @@
 		goto out;
 
 	req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
-	if (req_hus == NULL) {
+	if (!req_hus) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1336,7 +1339,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_HSM_STATE_SET);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
@@ -1350,7 +1353,7 @@
 
 	/* Copy states */
 	req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
-	if (req_hss == NULL) {
+	if (!req_hss) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1375,7 +1378,7 @@
 	int			 rc;
 
 	req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1396,7 +1399,7 @@
 
 	/* Copy hsm_request struct */
 	req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
-	if (req_hr == NULL) {
+	if (!req_hr) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1404,7 +1407,7 @@
 
 	/* Copy hsm_user_item structs */
 	req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
-	if (req_hui == NULL) {
+	if (!req_hui) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1413,7 +1416,7 @@
 
 	/* Copy opaque field */
 	req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
-	if (req_opaque == NULL) {
+	if (!req_opaque) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -1512,7 +1515,7 @@
 
 	/* Set up the remote catalog handle */
 	ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
-	if (ctxt == NULL) {
+	if (!ctxt) {
 		rc = -ENOENT;
 		goto out;
 	}
@@ -1553,6 +1556,7 @@
 				  struct ioc_changelog *icc)
 {
 	struct changelog_show *cs;
+	struct task_struct *task;
 	int rc;
 
 	/* Freed in mdc_changelog_send_thread */
@@ -1570,15 +1574,20 @@
 	 * New thread because we should return to user app before
 	 * writing into our pipe
 	 */
-	rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
-				 "mdc_clg_send_thread"));
-	if (!IS_ERR_VALUE(rc)) {
-		CDEBUG(D_CHANGELOG, "start changelog thread\n");
-		return 0;
+	task = kthread_run(mdc_changelog_send_thread, cs,
+			   "mdc_clg_send_thread");
+	if (IS_ERR(task)) {
+		rc = PTR_ERR(task);
+		CERROR("%s: can't start changelog thread: rc = %d\n",
+		       obd->obd_name, rc);
+		kfree(cs);
+	} else {
+		rc = 0;
+		CDEBUG(D_CHANGELOG, "%s: started changelog thread\n",
+		       obd->obd_name);
 	}
 
 	CERROR("Failed to start changelog thread: %d\n", rc);
-	kfree(cs);
 	return rc;
 }
 
@@ -1596,7 +1605,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
 					MDS_QUOTACHECK);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -1605,7 +1614,8 @@
 	ptlrpc_request_set_replen(req);
 
 	/* the next poll will find -ENODATA, that means quotacheck is
-	 * going on */
+	 * going on
+	 */
 	cli->cl_qchk_stat = -ENODATA;
 	rc = ptlrpc_queue_wait(req);
 	if (rc)
@@ -1640,7 +1650,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
 					MDS_QUOTACTL);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -1694,7 +1704,7 @@
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 				   &RQF_MDS_SWAP_LAYOUTS);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
@@ -1721,7 +1731,7 @@
 }
 
 static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
-			 void *karg, void *uarg)
+			 void *karg, void __user *uarg)
 {
 	struct obd_device *obd = exp->exp_obd;
 	struct obd_ioctl_data *data = karg;
@@ -1880,7 +1890,7 @@
 	int		     rc = -EINVAL;
 
 	req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
@@ -1905,7 +1915,8 @@
 
 	rc = ptlrpc_queue_wait(req);
 	/* -EREMOTE means the get_info result is partial, and it needs to
-	 * continue on another MDT, see fid2path part in lmv_iocontrol */
+	 * continue on another MDT, see fid2path part in lmv_iocontrol
+	 */
 	if (rc == 0 || rc == -EREMOTE) {
 		tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
 		memcpy(val, tmp, vallen);
@@ -2013,21 +2024,27 @@
 /**
  * callback function passed to kuc for re-registering each HSM copytool
  * running on MDC, after MDT shutdown/recovery.
- * @param data archive id served by the copytool
+ * @param data copytool registration data
  * @param cb_arg callback argument (obd_import)
  */
-static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg)
+static int mdc_hsm_ct_reregister(void *data, void *cb_arg)
 {
+	struct kkuc_ct_data	*kcd = data;
 	struct obd_import	*imp = (struct obd_import *)cb_arg;
-	__u32			 archive = data;
 	int			 rc;
 
-	CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n",
-	       archive);
-	rc = mdc_ioc_hsm_ct_register(imp, archive);
+	if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC)
+		return -EPROTO;
+
+	if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid))
+		return 0;
+
+	CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n",
+	       imp->imp_obd->obd_name, kcd->kcd_archive);
+	rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive);
 
 	/* ignore error if the copytool is already registered */
-	return ((rc != 0) && (rc != -EEXIST)) ? rc : 0;
+	return (rc == -EEXIST) ? 0 : rc;
 }
 
 static int mdc_set_info_async(const struct lu_env *env,
@@ -2133,7 +2150,7 @@
 
 	*request = NULL;
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
@@ -2175,7 +2192,7 @@
 		 * Flush current sequence to make client obtain new one
 		 * from server in case of disconnect/reconnect.
 		 */
-		if (cli->cl_seq != NULL)
+		if (cli->cl_seq)
 			seq_client_flush(cli->cl_seq);
 
 		rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
@@ -2238,7 +2255,8 @@
 
 	/* FIXME: if we ever get into a situation where there are too many
 	 * opened files with open locks on a single node, then we really
-	 * should replay these open locks to reget it */
+	 * should replay these open locks to reget it
+	 */
 	if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
 		return 0;
 
@@ -2422,7 +2440,7 @@
 
 	*request = NULL;
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index ab4800c..6fc8225 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -90,7 +90,8 @@
 int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
 {
 	/* fsname is at most 8 chars long, maybe contain "-".
-	 * e.g. "lustre", "SUN-000" */
+	 * e.g. "lustre", "SUN-000"
+	 */
 	return mgc_name2resid(fsname, strlen(fsname), res_id, type);
 }
 EXPORT_SYMBOL(mgc_fsname2resid);
@@ -102,7 +103,8 @@
 
 	/* logname consists of "fsname-nodetype".
 	 * e.g. "lustre-MDT0001", "SUN-000-client"
-	 * there is an exception: llog "params" */
+	 * there is an exception: llog "params"
+	 */
 	name_end = strrchr(logname, '-');
 	if (!name_end)
 		len = strlen(logname);
@@ -125,7 +127,8 @@
 }
 
 /* Drop a reference to a config log.  When no longer referenced,
-   we can free the config log data */
+ * we can free the config log data
+ */
 static void config_log_put(struct config_llog_data *cld)
 {
 	CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
@@ -162,7 +165,7 @@
 	struct config_llog_data *found = NULL;
 	void *instance;
 
-	LASSERT(logname != NULL);
+	LASSERT(logname);
 
 	instance = cfg ? cfg->cfg_instance : NULL;
 	spin_lock(&config_list_lock);
@@ -252,7 +255,8 @@
 	char logname[32];
 
 	/* we have to use different llog for clients and mdts for cmd
-	 * where only clients are notified if one of cmd server restarts */
+	 * where only clients are notified if one of cmd server restarts
+	 */
 	LASSERT(strlen(fsname) < sizeof(logname) / 2);
 	strcpy(logname, fsname);
 	LASSERT(lcfg.cfg_instance);
@@ -300,7 +304,7 @@
 	 * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
 	 */
 	ptr = strrchr(logname, '-');
-	if (ptr == NULL || ptr - logname > 8) {
+	if (!ptr || ptr - logname > 8) {
 		CERROR("logname %s is too long\n", logname);
 		return -EINVAL;
 	}
@@ -309,7 +313,7 @@
 	strcpy(seclogname + (ptr - logname), "-sptlrpc");
 
 	sptlrpc_cld = config_log_find(seclogname, NULL);
-	if (sptlrpc_cld == NULL) {
+	if (!sptlrpc_cld) {
 		sptlrpc_cld = do_config_log_add(obd, seclogname,
 						CONFIG_T_SPTLRPC, NULL, NULL);
 		if (IS_ERR(sptlrpc_cld)) {
@@ -376,7 +380,7 @@
 	int rc = 0;
 
 	cld = config_log_find(logname, cfg);
-	if (cld == NULL)
+	if (!cld)
 		return -ENOENT;
 
 	mutex_lock(&cld->cld_lock);
@@ -455,7 +459,7 @@
 
 	spin_lock(&config_list_lock);
 	list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
-		if (cld->cld_recover == NULL)
+		if (!cld->cld_recover)
 			continue;
 		seq_printf(m,  "    - { client: %s, nidtbl_version: %u }\n",
 			       cld->cld_logname,
@@ -483,8 +487,9 @@
 	LASSERT(atomic_read(&cld->cld_refcount) > 0);
 
 	/* Do not run mgc_process_log on a disconnected export or an
-	   export which is being disconnected. Take the client
-	   semaphore to make the check non-racy. */
+	 * export which is being disconnected. Take the client
+	 * semaphore to make the check non-racy.
+	 */
 	down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
 	if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
 		CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
@@ -529,8 +534,9 @@
 		}
 
 		/* Always wait a few seconds to allow the server who
-		   caused the lock revocation to finish its setup, plus some
-		   random so everyone doesn't try to reconnect at once. */
+		 * caused the lock revocation to finish its setup, plus some
+		 * random so everyone doesn't try to reconnect at once.
+		 */
 		to = MGC_TIMEOUT_MIN_SECONDS * HZ;
 		to += rand * HZ / 100; /* rand is centi-seconds */
 		lwi = LWI_TIMEOUT(to, NULL, NULL);
@@ -559,7 +565,8 @@
 			LASSERT(atomic_read(&cld->cld_refcount) > 0);
 
 			/* Whether we enqueued again or not in mgc_process_log,
-			 * we're done with the ref from the old enqueue */
+			 * we're done with the ref from the old enqueue
+			 */
 			if (cld_prev)
 				config_log_put(cld_prev);
 			cld_prev = cld;
@@ -575,7 +582,8 @@
 			config_log_put(cld_prev);
 
 		/* break after scanning the list so that we can drop
-		 * refcount to losing lock clds */
+		 * refcount to losing lock clds
+		 */
 		if (unlikely(stopped)) {
 			spin_lock(&config_list_lock);
 			break;
@@ -598,7 +606,8 @@
 }
 
 /* Add a cld to the list to requeue.  Start the requeue thread if needed.
-   We are responsible for dropping the config log reference from here on out. */
+ * We are responsible for dropping the config log reference from here on out.
+ */
 static void mgc_requeue_add(struct config_llog_data *cld)
 {
 	CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
@@ -635,7 +644,8 @@
 	int			 rc;
 
 	/* setup only remote ctxt, the local disk context is switched per each
-	 * filesystem during mgc_fs_setup() */
+	 * filesystem during mgc_fs_setup()
+	 */
 	rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
 			&llog_client_ops);
 	if (rc)
@@ -697,7 +707,8 @@
 static int mgc_cleanup(struct obd_device *obd)
 {
 	/* COMPAT_146 - old config logs may have added profiles we don't
-	   know about */
+	 * know about
+	 */
 	if (obd->obd_type->typ_refcnt <= 1)
 		/* Only for the last mgc */
 		class_del_profiles();
@@ -711,6 +722,7 @@
 static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 {
 	struct lprocfs_static_vars lvars = { NULL };
+	struct task_struct *task;
 	int rc;
 
 	ptlrpcd_addref();
@@ -734,10 +746,10 @@
 		init_waitqueue_head(&rq_waitq);
 
 		/* start requeue thread */
-		rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
-					     "ll_cfg_requeue"));
-		if (IS_ERR_VALUE(rc)) {
-			CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n",
+		task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue");
+		if (IS_ERR(task)) {
+			rc = PTR_ERR(task);
+			CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n",
 			       obd->obd_name, rc);
 			goto err_cleanup;
 		}
@@ -793,7 +805,8 @@
 			break;
 		}
 		/* Make sure not to re-enqueue when the mgc is stopping
-		   (we get called from client_disconnect_export) */
+		 * (we get called from client_disconnect_export)
+		 */
 		if (!lock->l_conn_export ||
 		    !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
 			CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
@@ -815,7 +828,8 @@
 
 /* Not sure where this should go... */
 /* This is the timeout value for MGS_CONNECT request plus a ping interval, such
- * that we can have a chance to try the secondary MGS if any. */
+ * that we can have a chance to try the secondary MGS if any.
+ */
 #define  MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
 				+ PING_INTERVAL)
 #define  MGC_TARGET_REG_LIMIT 10
@@ -879,11 +893,12 @@
 	       cld->cld_resid.name[0]);
 
 	/* We need a callback for every lockholder, so don't try to
-	   ldlm_lock_match (see rev 1.1.2.11.2.47) */
+	 * ldlm_lock_match (see rev 1.1.2.11.2.47)
+	 */
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
 					LDLM_ENQUEUE);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
@@ -894,7 +909,8 @@
 	rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
 			      NULL, 0, LVB_T_NONE, lockh, 0);
 	/* A failed enqueue should still call the mgc_blocking_ast,
-	   where it will be requeued if needed ("grant failed"). */
+	 * where it will be requeued if needed ("grant failed").
+	 */
 	ptlrpc_req_finished(req);
 	return rc;
 }
@@ -921,7 +937,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
 					MGS_TARGET_REG);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
@@ -1109,7 +1125,7 @@
 	int   rc  = 0;
 	int   off = 0;
 
-	LASSERT(cfg->cfg_instance != NULL);
+	LASSERT(cfg->cfg_instance);
 	LASSERT(cfg->cfg_sb == cfg->cfg_instance);
 
 	inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
@@ -1195,7 +1211,7 @@
 		/* lustre-OST0001-osc-<instance #> */
 		strcpy(obdname, cld->cld_logname);
 		cname = strrchr(obdname, '-');
-		if (cname == NULL) {
+		if (!cname) {
 			CERROR("mgc %s: invalid logname %s\n",
 			       mgc->obd_name, obdname);
 			break;
@@ -1212,7 +1228,7 @@
 
 		/* find the obd by obdname */
 		obd = class_name2obd(obdname);
-		if (obd == NULL) {
+		if (!obd) {
 			CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
 			       mgc->obd_name, obdname);
 			rc = 0;
@@ -1227,7 +1243,7 @@
 		uuid = buf + pos;
 
 		down_read(&obd->u.cli.cl_sem);
-		if (obd->u.cli.cl_import == NULL) {
+		if (!obd->u.cli.cl_import) {
 			/* client does not connect to the OST yet */
 			up_read(&obd->u.cli.cl_sem);
 			rc = 0;
@@ -1257,7 +1273,7 @@
 
 		rc = -ENOMEM;
 		lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
-		if (lcfg == NULL) {
+		if (!lcfg) {
 			CERROR("mgc: cannot allocate memory\n");
 			break;
 		}
@@ -1309,14 +1325,14 @@
 		nrpages = CONFIG_READ_NRPAGES_INIT;
 
 	pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL);
-	if (pages == NULL) {
+	if (!pages) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	for (i = 0; i < nrpages; i++) {
 		pages[i] = alloc_page(GFP_KERNEL);
-		if (pages[i] == NULL) {
+		if (!pages[i]) {
 			rc = -ENOMEM;
 			goto out;
 		}
@@ -1327,7 +1343,7 @@
 	LASSERT(mutex_is_locked(&cld->cld_lock));
 	req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
 				   &RQF_MGS_CONFIG_READ);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1338,7 +1354,6 @@
 
 	/* pack request */
 	body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
-	LASSERT(body != NULL);
 	LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
 	if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
 	    >= sizeof(body->mcb_name)) {
@@ -1353,7 +1368,7 @@
 	/* allocate bulk transfer descriptor */
 	desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
 				    MGS_BULK_PORTAL);
-	if (desc == NULL) {
+	if (!desc) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1373,7 +1388,8 @@
 	}
 
 	/* always update the index even though it might have errors with
-	 * handling the recover logs */
+	 * handling the recover logs
+	 */
 	cfg->cfg_last_idx = res->mcr_offset;
 	eof = res->mcr_offset == res->mcr_size;
 
@@ -1400,7 +1416,8 @@
 	mne_swab = !!ptlrpc_rep_need_swab(req);
 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
 	/* This import flag means the server did an extra swab of IR MNE
-	 * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
+	 * records (fixed in LU-1252), reverse it here if needed. LU-1644
+	 */
 	if (unlikely(req->rq_import->imp_need_mne_swab))
 		mne_swab = !mne_swab;
 #else
@@ -1434,7 +1451,7 @@
 
 	if (pages) {
 		for (i = 0; i < nrpages; i++) {
-			if (pages[i] == NULL)
+			if (!pages[i])
 				break;
 			__free_page(pages[i]);
 		}
@@ -1489,7 +1506,8 @@
 
 	/* logname and instance info should be the same, so use our
 	 * copy of the instance for the update.  The cfg_last_idx will
-	 * be updated here. */
+	 * be updated here.
+	 */
 	rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
 				     &cld->cld_cfg);
 
@@ -1529,9 +1547,10 @@
 	LASSERT(cld);
 
 	/* I don't want multiple processes running process_log at once --
-	   sounds like badness.  It actually might be fine, as long as
-	   we're not trying to update from the same log
-	   simultaneously (in which case we should use a per-log sem.) */
+	 * sounds like badness.  It actually might be fine, as long as
+	 * we're not trying to update from the same log
+	 * simultaneously (in which case we should use a per-log sem.)
+	 */
 	mutex_lock(&cld->cld_lock);
 	if (cld->cld_stopping) {
 		mutex_unlock(&cld->cld_lock);
@@ -1556,7 +1575,8 @@
 		CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
 
 		/* mark cld_lostlock so that it will requeue
-		 * after MGC becomes available. */
+		 * after MGC becomes available.
+		 */
 		cld->cld_lostlock = 1;
 		/* Get extra reference, it will be put in requeue thread */
 		config_log_get(cld);
@@ -1635,18 +1655,19 @@
 		if (rc)
 			break;
 		cld = config_log_find(logname, cfg);
-		if (cld == NULL) {
+		if (!cld) {
 			rc = -ENOENT;
 			break;
 		}
 
 		/* COMPAT_146 */
 		/* FIXME only set this for old logs!  Right now this forces
-		   us to always skip the "inside markers" check */
+		 * us to always skip the "inside markers" check
+		 */
 		cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
 
 		rc = mgc_process_log(obd, cld);
-		if (rc == 0 && cld->cld_recover != NULL) {
+		if (rc == 0 && cld->cld_recover) {
 			if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
 					 imp_connect_data, IMP_RECOV)) {
 				rc = mgc_process_log(obd, cld->cld_recover);
@@ -1660,7 +1681,7 @@
 				CERROR("Cannot process recover llog %d\n", rc);
 		}
 
-		if (rc == 0 && cld->cld_params != NULL) {
+		if (rc == 0 && cld->cld_params) {
 			rc = mgc_process_log(obd, cld->cld_params);
 			if (rc == -ENOENT) {
 				CDEBUG(D_MGC,
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index acc6857..c404eb3 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -2,8 +2,8 @@
 
 obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
 	      llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
-	      genops.o uuid.o lprocfs_status.o \
-	      lustre_handles.o lustre_peer.o \
-	      statfs_pack.o obdo.o obd_config.o obd_mount.o \
-	      lu_object.o cl_object.o   \
-	      cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o lprocfs_counters.o
+	      genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
+	      lustre_handles.o lustre_peer.o statfs_pack.o \
+	      obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
+	      cl_object.o cl_page.o cl_lock.o cl_io.o \
+	      acl.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index 49ba885..0e02ae9 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -104,7 +104,7 @@
 		return old_size;
 
 	new = kmemdup(*header, new_size, GFP_NOFS);
-	if (unlikely(new == NULL))
+	if (unlikely(!new))
 		return -ENOMEM;
 
 	kfree(*header);
@@ -124,7 +124,7 @@
 		return 0;
 
 	new = kmemdup(*header, ext_size, GFP_NOFS);
-	if (unlikely(new == NULL))
+	if (unlikely(!new))
 		return -ENOMEM;
 
 	kfree(*header);
@@ -149,7 +149,7 @@
 		count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
 	esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
 	new = kzalloc(esize, GFP_NOFS);
-	if (unlikely(new == NULL))
+	if (unlikely(!new))
 		return ERR_PTR(-ENOMEM);
 
 	new->a_count = cpu_to_le32(count);
@@ -180,7 +180,7 @@
 		return -EINVAL;
 
 	new = kzalloc(size, GFP_NOFS);
-	if (unlikely(new == NULL))
+	if (unlikely(!new))
 		return -ENOMEM;
 
 	new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
@@ -300,7 +300,7 @@
 	ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
 
 	new = kzalloc(ext_size, GFP_NOFS);
-	if (unlikely(new == NULL))
+	if (unlikely(!new))
 		return ERR_PTR(-ENOMEM);
 
 	for (i = 0, j = 0; i < posix_count; i++) {
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 63246ba..191ec43 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -44,6 +44,7 @@
 #include "../include/obd_support.h"
 #include "../include/lustre_fid.h"
 #include <linux/list.h>
+#include <linux/sched.h>
 #include "../include/cl_object.h"
 #include "cl_internal.h"
 
@@ -93,7 +94,7 @@
 		 * CIS_IO_GOING.
 		 */
 		ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
-		     (io->ci_state == CIS_LOCKED && up != NULL));
+		     (io->ci_state == CIS_LOCKED && up));
 }
 
 /**
@@ -111,7 +112,7 @@
 		slice = container_of(io->ci_layers.prev, struct cl_io_slice,
 				     cis_linkage);
 		list_del_init(&slice->cis_linkage);
-		if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
+		if (slice->cis_iop->op[io->ci_type].cio_fini)
 			slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
 		/*
 		 * Invalidate slice to catch use after free. This assumes that
@@ -164,7 +165,7 @@
 
 	result = 0;
 	cl_object_for_each(scan, obj) {
-		if (scan->co_ops->coo_io_init != NULL) {
+		if (scan->co_ops->coo_io_init) {
 			result = scan->co_ops->coo_io_init(env, scan, io);
 			if (result != 0)
 				break;
@@ -186,7 +187,7 @@
 	struct cl_thread_info *info = cl_env_info(env);
 
 	LASSERT(obj != cl_object_top(obj));
-	if (info->clt_current_io == NULL)
+	if (!info->clt_current_io)
 		info->clt_current_io = io;
 	return cl_io_init0(env, io, iot, obj);
 }
@@ -208,7 +209,7 @@
 	struct cl_thread_info *info = cl_env_info(env);
 
 	LASSERT(obj == cl_object_top(obj));
-	LASSERT(info->clt_current_io == NULL);
+	LASSERT(!info->clt_current_io);
 
 	info->clt_current_io = io;
 	return cl_io_init0(env, io, iot, obj);
@@ -224,7 +225,7 @@
 		  enum cl_io_type iot, loff_t pos, size_t count)
 {
 	LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
-	LINVRNT(io->ci_obj != NULL);
+	LINVRNT(io->ci_obj);
 
 	LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
 			 "io range: %u [%llu, %llu) %u %u\n",
@@ -292,7 +293,7 @@
 		list_for_each_entry_safe(curr, temp,
 					     &io->ci_lockset.cls_todo,
 					     cill_linkage) {
-			if (prev != NULL) {
+			if (prev) {
 				switch (cl_lock_descr_sort(&prev->cill_descr,
 							  &curr->cill_descr)) {
 				case 0:
@@ -308,7 +309,8 @@
 							   &prev->cill_linkage);
 					done = 0;
 					continue; /* don't change prev: it's
-						   * still "previous" */
+						   * still "previous"
+						   */
 				case -1: /* already in order */
 					break;
 				}
@@ -327,32 +329,31 @@
 int cl_queue_match(const struct list_head *queue,
 		   const struct cl_lock_descr *need)
 {
-       struct cl_io_lock_link *scan;
+	struct cl_io_lock_link *scan;
 
-       list_for_each_entry(scan, queue, cill_linkage) {
-	       if (cl_lock_descr_match(&scan->cill_descr, need))
-		       return 1;
-       }
-       return 0;
+	list_for_each_entry(scan, queue, cill_linkage) {
+		if (cl_lock_descr_match(&scan->cill_descr, need))
+			return 1;
+	}
+	return 0;
 }
 EXPORT_SYMBOL(cl_queue_match);
 
 static int cl_queue_merge(const struct list_head *queue,
 			  const struct cl_lock_descr *need)
 {
-       struct cl_io_lock_link *scan;
+	struct cl_io_lock_link *scan;
 
-       list_for_each_entry(scan, queue, cill_linkage) {
-	       if (cl_lock_descr_cmp(&scan->cill_descr, need))
-		       continue;
-	       cl_lock_descr_merge(&scan->cill_descr, need);
-	       CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
-		      scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
-		      scan->cill_descr.cld_end);
-	       return 1;
-       }
-       return 0;
-
+	list_for_each_entry(scan, queue, cill_linkage) {
+		if (cl_lock_descr_cmp(&scan->cill_descr, need))
+			continue;
+		cl_lock_descr_merge(&scan->cill_descr, need);
+		CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+		       scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
+		       scan->cill_descr.cld_end);
+		return 1;
+	}
+	return 0;
 }
 
 static int cl_lockset_match(const struct cl_lockset *set,
@@ -399,11 +400,11 @@
 	struct cl_lock *lock = link->cill_lock;
 
 	list_del_init(&link->cill_linkage);
-	if (lock != NULL) {
+	if (lock) {
 		cl_lock_release(env, lock, "io", io);
 		link->cill_lock = NULL;
 	}
-	if (link->cill_fini != NULL)
+	if (link->cill_fini)
 		link->cill_fini(env, link);
 }
 
@@ -419,7 +420,8 @@
 	list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
 		if (!cl_lockset_match(set, &link->cill_descr)) {
 			/* XXX some locking to guarantee that locks aren't
-			 * expanded in between. */
+			 * expanded in between.
+			 */
 			result = cl_lockset_lock_one(env, io, set, link);
 			if (result != 0)
 				break;
@@ -458,7 +460,7 @@
 	LINVRNT(cl_io_invariant(io));
 
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
+		if (!scan->cis_iop->op[io->ci_type].cio_lock)
 			continue;
 		result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
 		if (result != 0)
@@ -503,7 +505,7 @@
 		cl_lock_link_fini(env, io, link);
 	}
 	cl_io_for_each_reverse(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
+		if (scan->cis_iop->op[io->ci_type].cio_unlock)
 			scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
 	}
 	io->ci_state = CIS_UNLOCKED;
@@ -529,7 +531,7 @@
 
 	result = 0;
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
+		if (!scan->cis_iop->op[io->ci_type].cio_iter_init)
 			continue;
 		result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
 								      scan);
@@ -556,7 +558,7 @@
 	LINVRNT(cl_io_invariant(io));
 
 	cl_io_for_each_reverse(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
+		if (scan->cis_iop->op[io->ci_type].cio_iter_fini)
 			scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
 	}
 	io->ci_state = CIS_IT_ENDED;
@@ -581,7 +583,7 @@
 
 	/* layers have to be notified. */
 	cl_io_for_each_reverse(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
+		if (scan->cis_iop->op[io->ci_type].cio_advance)
 			scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
 								   nob);
 	}
@@ -621,7 +623,7 @@
 	int result;
 
 	link = kzalloc(sizeof(*link), GFP_NOFS);
-	if (link != NULL) {
+	if (link) {
 		link->cill_descr     = *descr;
 		link->cill_fini      = cl_free_io_lock_link;
 		result = cl_io_lock_add(env, io, link);
@@ -648,7 +650,7 @@
 
 	io->ci_state = CIS_IO_GOING;
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
+		if (!scan->cis_iop->op[io->ci_type].cio_start)
 			continue;
 		result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
 		if (result != 0)
@@ -673,7 +675,7 @@
 	LINVRNT(cl_io_invariant(io));
 
 	cl_io_for_each_reverse(scan, io) {
-		if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
+		if (scan->cis_iop->op[io->ci_type].cio_end)
 			scan->cis_iop->op[io->ci_type].cio_end(env, scan);
 		/* TODO: error handling. */
 	}
@@ -687,7 +689,7 @@
 	const struct cl_page_slice *slice;
 
 	slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
-	LINVRNT(slice != NULL);
+	LINVRNT(slice);
 	return slice;
 }
 
@@ -759,11 +761,11 @@
 	 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
 	 */
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->cio_read_page != NULL) {
+		if (scan->cis_iop->cio_read_page) {
 			const struct cl_page_slice *slice;
 
 			slice = cl_io_slice_page(scan, page);
-			LINVRNT(slice != NULL);
+			LINVRNT(slice);
 			result = scan->cis_iop->cio_read_page(env, scan, slice);
 			if (result != 0)
 				break;
@@ -798,7 +800,7 @@
 	LASSERT(cl_page_in_io(page, io));
 
 	cl_io_for_each_reverse(scan, io) {
-		if (scan->cis_iop->cio_prepare_write != NULL) {
+		if (scan->cis_iop->cio_prepare_write) {
 			const struct cl_page_slice *slice;
 
 			slice = cl_io_slice_page(scan, page);
@@ -833,11 +835,11 @@
 	 * state. Better (and more general) way of dealing with such situation
 	 * is needed.
 	 */
-	LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
+	LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
 	LASSERT(cl_page_in_io(page, io));
 
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->cio_commit_write != NULL) {
+		if (scan->cis_iop->cio_commit_write) {
 			const struct cl_page_slice *slice;
 
 			slice = cl_io_slice_page(scan, page);
@@ -872,7 +874,7 @@
 	LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
 
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->req_op[crt].cio_submit == NULL)
+		if (!scan->cis_iop->req_op[crt].cio_submit)
 			continue;
 		result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
 							       queue);
@@ -900,7 +902,7 @@
 	int rc;
 
 	cl_page_list_for_each(pg, &queue->c2_qin) {
-		LASSERT(pg->cp_sync_io == NULL);
+		LASSERT(!pg->cp_sync_io);
 		pg->cp_sync_io = anchor;
 	}
 
@@ -913,14 +915,14 @@
 		 * clean pages), count them as completed to avoid infinite
 		 * wait.
 		 */
-		 cl_page_list_for_each(pg, &queue->c2_qin) {
+		cl_page_list_for_each(pg, &queue->c2_qin) {
 			pg->cp_sync_io = NULL;
 			cl_sync_io_note(anchor, 1);
-		 }
+		}
 
-		 /* wait for the IO to be finished. */
-		 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
-				      anchor, timeout);
+		/* wait for the IO to be finished. */
+		rc = cl_sync_io_wait(env, io, &queue->c2_qout,
+				     anchor, timeout);
 	} else {
 		LASSERT(list_empty(&queue->c2_qout.pl_pages));
 		cl_page_list_for_each(pg, &queue->c2_qin)
@@ -1026,7 +1028,7 @@
 {
 	struct list_head *linkage = &slice->cis_linkage;
 
-	LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
+	LASSERT((!linkage->prev && !linkage->next) ||
 		list_empty(linkage));
 
 	list_add_tail(linkage, &io->ci_layers);
@@ -1053,8 +1055,9 @@
 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
 {
 	/* it would be better to check that page is owned by "current" io, but
-	 * it is not passed here. */
-	LASSERT(page->cp_owner != NULL);
+	 * it is not passed here.
+	 */
+	LASSERT(page->cp_owner);
 	LINVRNT(plist->pl_owner == current);
 
 	lockdep_off();
@@ -1263,7 +1266,7 @@
  */
 struct cl_io *cl_io_top(struct cl_io *io)
 {
-	while (io->ci_parent != NULL)
+	while (io->ci_parent)
 		io = io->ci_parent;
 	return io;
 }
@@ -1296,13 +1299,13 @@
 	LASSERT(list_empty(&req->crq_pages));
 	LASSERT(req->crq_nrpages == 0);
 	LINVRNT(list_empty(&req->crq_layers));
-	LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
+	LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
 
-	if (req->crq_o != NULL) {
+	if (req->crq_o) {
 		for (i = 0; i < req->crq_nrobjs; ++i) {
 			struct cl_object *obj = req->crq_o[i].ro_obj;
 
-			if (obj != NULL) {
+			if (obj) {
 				lu_object_ref_del_at(&obj->co_lu,
 						     &req->crq_o[i].ro_obj_ref,
 						     "cl_req", req);
@@ -1326,7 +1329,7 @@
 	do {
 		list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
 			dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
-			if (dev->cd_ops->cdo_req_init != NULL) {
+			if (dev->cd_ops->cdo_req_init) {
 				result = dev->cd_ops->cdo_req_init(env,
 								   dev, req);
 				if (result != 0)
@@ -1334,7 +1337,7 @@
 			}
 		}
 		page = page->cp_child;
-	} while (page != NULL && result == 0);
+	} while (page && result == 0);
 	return result;
 }
 
@@ -1353,7 +1356,7 @@
 		slice = list_entry(req->crq_layers.prev,
 				       struct cl_req_slice, crs_linkage);
 		list_del_init(&slice->crs_linkage);
-		if (slice->crs_ops->cro_completion != NULL)
+		if (slice->crs_ops->cro_completion)
 			slice->crs_ops->cro_completion(env, slice, rc);
 	}
 	cl_req_free(env, req);
@@ -1371,7 +1374,7 @@
 	LINVRNT(nr_objects > 0);
 
 	req = kzalloc(sizeof(*req), GFP_NOFS);
-	if (req != NULL) {
+	if (req) {
 		int result;
 
 		req->crq_type = crt;
@@ -1380,7 +1383,7 @@
 
 		req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
 				     GFP_NOFS);
-		if (req->crq_o != NULL) {
+		if (req->crq_o) {
 			req->crq_nrobjs = nr_objects;
 			result = cl_req_init(env, req, page);
 		} else
@@ -1408,7 +1411,7 @@
 	page = cl_page_top(page);
 
 	LASSERT(list_empty(&page->cp_flight));
-	LASSERT(page->cp_req == NULL);
+	LASSERT(!page->cp_req);
 
 	CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
 		      req, req->crq_type, req->crq_nrpages);
@@ -1418,7 +1421,7 @@
 	page->cp_req = req;
 	obj = cl_object_top(page->cp_obj);
 	for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
-		if (rqo->ro_obj == NULL) {
+		if (!rqo->ro_obj) {
 			rqo->ro_obj = obj;
 			cl_object_get(obj);
 			lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
@@ -1463,11 +1466,11 @@
 	 * of objects.
 	 */
 	for (i = 0; i < req->crq_nrobjs; ++i)
-		LASSERT(req->crq_o[i].ro_obj != NULL);
+		LASSERT(req->crq_o[i].ro_obj);
 
 	result = 0;
 	list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
-		if (slice->crs_ops->cro_prep != NULL) {
+		if (slice->crs_ops->cro_prep) {
 			result = slice->crs_ops->cro_prep(env, slice);
 			if (result != 0)
 				break;
@@ -1501,9 +1504,8 @@
 
 			scan = cl_page_at(page,
 					  slice->crs_dev->cd_lu_dev.ld_type);
-			LASSERT(scan != NULL);
 			obj = scan->cpl_obj;
-			if (slice->crs_ops->cro_attr_set != NULL)
+			if (slice->crs_ops->cro_attr_set)
 				slice->crs_ops->cro_attr_set(env, slice, obj,
 							     attr + i, flags);
 		}
@@ -1511,9 +1513,6 @@
 }
 EXPORT_SYMBOL(cl_req_attr_set);
 
-/* XXX complete(), init_completion(), and wait_for_completion(), until they are
- * implemented in libcfs. */
-# include <linux/sched.h>
 
 /**
  * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 1836dc0..7b7f344 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -96,7 +96,7 @@
 
 	result = atomic_read(&lock->cll_ref) > 0 &&
 		cl_lock_invariant_trusted(env, lock);
-	if (!result && env != NULL)
+	if (!result && env)
 		CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
 	return result;
 }
@@ -288,7 +288,7 @@
 
 	LINVRNT(cl_lock_invariant(env, lock));
 	obj = lock->cll_descr.cld_obj;
-	LINVRNT(obj != NULL);
+	LINVRNT(obj);
 
 	CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
 	       atomic_read(&lock->cll_ref), lock, RETIP);
@@ -362,7 +362,7 @@
 	struct lu_object_header *head;
 
 	lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (lock != NULL) {
+	if (lock) {
 		atomic_set(&lock->cll_ref, 1);
 		lock->cll_descr = *descr;
 		lock->cll_state = CLS_NEW;
@@ -461,7 +461,7 @@
 
 	LINVRNT(cl_lock_invariant_trusted(env, lock));
 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
-		if (slice->cls_ops->clo_fits_into != NULL &&
+		if (slice->cls_ops->clo_fits_into &&
 		    !slice->cls_ops->clo_fits_into(env, slice, need, io))
 			return 0;
 	}
@@ -524,14 +524,14 @@
 	lock = cl_lock_lookup(env, obj, io, need);
 	spin_unlock(&head->coh_lock_guard);
 
-	if (lock == NULL) {
+	if (!lock) {
 		lock = cl_lock_alloc(env, obj, io, need);
 		if (!IS_ERR(lock)) {
 			struct cl_lock *ghost;
 
 			spin_lock(&head->coh_lock_guard);
 			ghost = cl_lock_lookup(env, obj, io, need);
-			if (ghost == NULL) {
+			if (!ghost) {
 				cl_lock_get_trust(lock);
 				list_add_tail(&lock->cll_linkage,
 						  &head->coh_locks);
@@ -572,7 +572,7 @@
 		spin_lock(&head->coh_lock_guard);
 		lock = cl_lock_lookup(env, obj, io, need);
 		spin_unlock(&head->coh_lock_guard);
-		if (lock == NULL)
+		if (!lock)
 			return NULL;
 
 		cl_lock_mutex_get(env, lock);
@@ -584,7 +584,7 @@
 			cl_lock_put(env, lock);
 			lock = NULL;
 		}
-	} while (lock == NULL);
+	} while (!lock);
 
 	cl_lock_hold_add(env, lock, scope, source);
 	cl_lock_user_add(env, lock);
@@ -775,7 +775,7 @@
 		lock->cll_flags |= CLF_CANCELLED;
 		list_for_each_entry_reverse(slice, &lock->cll_layers,
 						cls_linkage) {
-			if (slice->cls_ops->clo_cancel != NULL)
+			if (slice->cls_ops->clo_cancel)
 				slice->cls_ops->clo_cancel(env, slice);
 		}
 	}
@@ -812,7 +812,7 @@
 		 */
 		list_for_each_entry_reverse(slice, &lock->cll_layers,
 						cls_linkage) {
-			if (slice->cls_ops->clo_delete != NULL)
+			if (slice->cls_ops->clo_delete)
 				slice->cls_ops->clo_delete(env, slice);
 		}
 		/*
@@ -935,7 +935,8 @@
 	if (result == 0) {
 		/* To avoid being interrupted by the 'non-fatal' signals
 		 * (SIGCHLD, for instance), we'd block them temporarily.
-		 * LU-305 */
+		 * LU-305
+		 */
 		blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
 
 		init_waitqueue_entry(&waiter, current);
@@ -946,7 +947,8 @@
 		LASSERT(cl_lock_nr_mutexed(env) == 0);
 
 		/* Returning ERESTARTSYS instead of EINTR so syscalls
-		 * can be restarted if signals are pending here */
+		 * can be restarted if signals are pending here
+		 */
 		result = -ERESTARTSYS;
 		if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
 			schedule();
@@ -974,7 +976,7 @@
 	LINVRNT(cl_lock_invariant(env, lock));
 
 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
-		if (slice->cls_ops->clo_state != NULL)
+		if (slice->cls_ops->clo_state)
 			slice->cls_ops->clo_state(env, slice, state);
 	wake_up_all(&lock->cll_wq);
 }
@@ -1039,7 +1041,7 @@
 		result = -ENOSYS;
 		list_for_each_entry_reverse(slice, &lock->cll_layers,
 						cls_linkage) {
-			if (slice->cls_ops->clo_unuse != NULL) {
+			if (slice->cls_ops->clo_unuse) {
 				result = slice->cls_ops->clo_unuse(env, slice);
 				if (result != 0)
 					break;
@@ -1072,7 +1074,7 @@
 	result = -ENOSYS;
 	state = cl_lock_intransit(env, lock);
 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
-		if (slice->cls_ops->clo_use != NULL) {
+		if (slice->cls_ops->clo_use) {
 			result = slice->cls_ops->clo_use(env, slice);
 			if (result != 0)
 				break;
@@ -1125,7 +1127,7 @@
 
 	result = -ENOSYS;
 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
-		if (slice->cls_ops->clo_enqueue != NULL) {
+		if (slice->cls_ops->clo_enqueue) {
 			result = slice->cls_ops->clo_enqueue(env,
 							     slice, io, flags);
 			if (result != 0)
@@ -1170,7 +1172,8 @@
 			/* kick layers. */
 			result = cl_enqueue_kick(env, lock, io, flags);
 			/* For AGL case, the cl_lock::cll_state may
-			 * become CLS_HELD already. */
+			 * become CLS_HELD already.
+			 */
 			if (result == 0 && lock->cll_state == CLS_QUEUING)
 				cl_lock_state_set(env, lock, CLS_ENQUEUED);
 			break;
@@ -1215,7 +1218,7 @@
 
 	LASSERT(cl_lock_is_mutexed(lock));
 	LASSERT(lock->cll_state == CLS_QUEUING);
-	LASSERT(lock->cll_conflict != NULL);
+	LASSERT(lock->cll_conflict);
 
 	conflict = lock->cll_conflict;
 	lock->cll_conflict = NULL;
@@ -1258,7 +1261,7 @@
 	do {
 		result = cl_enqueue_try(env, lock, io, enqflags);
 		if (result == CLO_WAIT) {
-			if (lock->cll_conflict != NULL)
+			if (lock->cll_conflict)
 				result = cl_lock_enqueue_wait(env, lock, 1);
 			else
 				result = cl_lock_state_wait(env, lock);
@@ -1300,7 +1303,8 @@
 	}
 
 	/* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
-	 * underlying resources. */
+	 * underlying resources.
+	 */
 	if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
 		cl_lock_user_del(env, lock);
 		return 0;
@@ -1416,7 +1420,7 @@
 
 		result = -ENOSYS;
 		list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
-			if (slice->cls_ops->clo_wait != NULL) {
+			if (slice->cls_ops->clo_wait) {
 				result = slice->cls_ops->clo_wait(env, slice);
 				if (result != 0)
 					break;
@@ -1449,7 +1453,7 @@
 
 	LINVRNT(cl_lock_invariant(env, lock));
 	LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
-		 "Wrong state %d \n", lock->cll_state);
+		 "Wrong state %d\n", lock->cll_state);
 	LASSERT(lock->cll_holds > 0);
 
 	do {
@@ -1487,7 +1491,7 @@
 
 	pound = 0;
 	list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
-		if (slice->cls_ops->clo_weigh != NULL) {
+		if (slice->cls_ops->clo_weigh) {
 			ounce = slice->cls_ops->clo_weigh(env, slice);
 			pound += ounce;
 			if (pound < ounce) /* over-weight^Wflow */
@@ -1523,7 +1527,7 @@
 	LINVRNT(cl_lock_invariant(env, lock));
 
 	list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
-		if (slice->cls_ops->clo_modify != NULL) {
+		if (slice->cls_ops->clo_modify) {
 			result = slice->cls_ops->clo_modify(env, slice, desc);
 			if (result != 0)
 				return result;
@@ -1584,7 +1588,7 @@
 	result = cl_lock_enclosure(env, lock, closure);
 	if (result == 0) {
 		list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
-			if (slice->cls_ops->clo_closure != NULL) {
+			if (slice->cls_ops->clo_closure) {
 				result = slice->cls_ops->clo_closure(env, slice,
 								     closure);
 				if (result != 0)
@@ -1777,13 +1781,15 @@
 	lock = NULL;
 
 	need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
-				    * not PHANTOM */
+				    * not PHANTOM
+				    */
 	need->cld_start = need->cld_end = index;
 	need->cld_enq_flags = 0;
 
 	spin_lock(&head->coh_lock_guard);
 	/* It is fine to match any group lock since there could be only one
-	 * with a uniq gid and it conflicts with all other lock modes too */
+	 * with a uniq gid and it conflicts with all other lock modes too
+	 */
 	list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
 		if (scan != except &&
 		    (scan->cll_descr.cld_mode == CLM_GROUP ||
@@ -1798,7 +1804,8 @@
 		    (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
 		    (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
 			/* Don't increase cs_hit here since this
-			 * is just a helper function. */
+			 * is just a helper function.
+			 */
 			cl_lock_get_trust(scan);
 			lock = scan;
 			break;
@@ -1820,7 +1827,6 @@
 
 	dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
 	slice = cl_page_at(page, dtype);
-	LASSERT(slice != NULL);
 	return slice->cpl_page->cp_index;
 }
 
@@ -1840,11 +1846,12 @@
 		/* refresh non-overlapped index */
 		tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
 					lock, 1, 0);
-		if (tmp != NULL) {
+		if (tmp) {
 			/* Cache the first-non-overlapped index so as to skip
 			 * all pages within [index, clt_fn_index). This
 			 * is safe because if tmp lock is canceled, it will
-			 * discard these pages. */
+			 * discard these pages.
+			 */
 			info->clt_fn_index = tmp->cll_descr.cld_end + 1;
 			if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
 				info->clt_fn_index = CL_PAGE_EOF;
@@ -1950,7 +1957,7 @@
 	 * already destroyed (as otherwise they will be left unprotected).
 	 */
 	LASSERT(ergo(!cancel,
-		     head->coh_tree.rnode == NULL && head->coh_pages == 0));
+		     !head->coh_tree.rnode && head->coh_pages == 0));
 
 	spin_lock(&head->coh_lock_guard);
 	while (!list_empty(&head->coh_locks)) {
@@ -2194,7 +2201,7 @@
 		(*printer)(env, cookie, "    %s@%p: ",
 			   slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
 			   slice);
-		if (slice->cls_ops->clo_print != NULL)
+		if (slice->cls_ops->clo_print)
 			slice->cls_ops->clo_print(env, cookie, printer, slice);
 		(*printer)(env, cookie, "\n");
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 57c8d54..39b4fd0 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -152,7 +152,7 @@
 	struct cl_object_header *hdr = cl_object_header(o);
 	struct cl_object *top;
 
-	while (hdr->coh_parent != NULL)
+	while (hdr->coh_parent)
 		hdr = hdr->coh_parent;
 
 	top = lu2cl(lu_object_top(&hdr->coh_lu));
@@ -217,7 +217,7 @@
 	top = obj->co_lu.lo_header;
 	result = 0;
 	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
-		if (obj->co_ops->coo_attr_get != NULL) {
+		if (obj->co_ops->coo_attr_get) {
 			result = obj->co_ops->coo_attr_get(env, obj, attr);
 			if (result != 0) {
 				if (result > 0)
@@ -249,7 +249,7 @@
 	result = 0;
 	list_for_each_entry_reverse(obj, &top->loh_layers,
 					co_lu.lo_linkage) {
-		if (obj->co_ops->coo_attr_set != NULL) {
+		if (obj->co_ops->coo_attr_set) {
 			result = obj->co_ops->coo_attr_set(env, obj, attr, v);
 			if (result != 0) {
 				if (result > 0)
@@ -280,7 +280,7 @@
 	result = 0;
 	list_for_each_entry_reverse(obj, &top->loh_layers,
 					co_lu.lo_linkage) {
-		if (obj->co_ops->coo_glimpse != NULL) {
+		if (obj->co_ops->coo_glimpse) {
 			result = obj->co_ops->coo_glimpse(env, obj, lvb);
 			if (result != 0)
 				break;
@@ -306,7 +306,7 @@
 	top = obj->co_lu.lo_header;
 	result = 0;
 	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
-		if (obj->co_ops->coo_conf_set != NULL) {
+		if (obj->co_ops->coo_conf_set) {
 			result = obj->co_ops->coo_conf_set(env, obj, conf);
 			if (result != 0)
 				break;
@@ -328,7 +328,7 @@
 	struct cl_object_header *hdr;
 
 	hdr = cl_object_header(obj);
-	LASSERT(hdr->coh_tree.rnode == NULL);
+	LASSERT(!hdr->coh_tree.rnode);
 	LASSERT(hdr->coh_pages == 0);
 
 	set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
@@ -362,7 +362,8 @@
 		atomic_set(&cs->cs_stats[i], 0);
 }
 
-int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h)
+static int cache_stats_print(const struct cache_stats *cs,
+			     struct seq_file *m, int h)
 {
 	int i;
 	/*
@@ -540,7 +541,7 @@
 {
 	LASSERT(cle->ce_ref == 0);
 	LASSERT(cle->ce_magic == &cl_env_init0);
-	LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
+	LASSERT(!cle->ce_debug && !cle->ce_owner);
 
 	cle->ce_ref = 1;
 	cle->ce_debug = debug;
@@ -575,7 +576,7 @@
 {
 	struct cl_env *cle = cl_env_hops_obj(hn);
 
-	LASSERT(cle->ce_owner != NULL);
+	LASSERT(cle->ce_owner);
 	return (key == cle->ce_owner);
 }
 
@@ -609,7 +610,7 @@
 	if (cle) {
 		int rc;
 
-		LASSERT(cle->ce_owner == NULL);
+		LASSERT(!cle->ce_owner);
 		cle->ce_owner = (void *) (long) current->pid;
 		rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
 					 &cle->ce_node);
@@ -637,7 +638,7 @@
 				      CFS_HASH_MAX_THETA,
 				      &cl_env_hops,
 				      CFS_HASH_RW_BKTLOCK);
-	return cl_env_hash != NULL ? 0 : -ENOMEM;
+	return cl_env_hash ? 0 : -ENOMEM;
 }
 
 static void cl_env_store_fini(void)
@@ -647,7 +648,7 @@
 
 static inline struct cl_env *cl_env_detach(struct cl_env *cle)
 {
-	if (cle == NULL)
+	if (!cle)
 		cle = cl_env_fetch();
 
 	if (cle && cle->ce_owner)
@@ -662,7 +663,7 @@
 	struct cl_env *cle;
 
 	cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO);
-	if (cle != NULL) {
+	if (cle) {
 		int rc;
 
 		INIT_LIST_HEAD(&cle->ce_linkage);
@@ -716,7 +717,7 @@
 
 	env = NULL;
 	cle = cl_env_fetch();
-	if (cle != NULL) {
+	if (cle) {
 		CL_ENV_INC(hit);
 		env = &cle->ce_lu;
 		*refcheck = ++cle->ce_ref;
@@ -741,7 +742,7 @@
 	struct lu_env *env;
 
 	env = cl_env_peek(refcheck);
-	if (env == NULL) {
+	if (!env) {
 		env = cl_env_new(lu_context_tags_default,
 				 lu_session_tags_default,
 				 __builtin_return_address(0));
@@ -768,7 +769,7 @@
 {
 	struct lu_env *env;
 
-	LASSERT(cl_env_peek(refcheck) == NULL);
+	LASSERT(!cl_env_peek(refcheck));
 	env = cl_env_new(tags, tags, __builtin_return_address(0));
 	if (!IS_ERR(env)) {
 		struct cl_env *cle;
@@ -783,7 +784,7 @@
 
 static void cl_env_exit(struct cl_env *cle)
 {
-	LASSERT(cle->ce_owner == NULL);
+	LASSERT(!cle->ce_owner);
 	lu_context_exit(&cle->ce_lu.le_ctx);
 	lu_context_exit(&cle->ce_ses);
 }
@@ -802,7 +803,7 @@
 	cle = cl_env_container(env);
 
 	LASSERT(cle->ce_ref > 0);
-	LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
+	LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
 
 	CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
 	if (--cle->ce_ref == 0) {
@@ -877,7 +878,7 @@
 
 	nest->cen_cookie = NULL;
 	env = cl_env_peek(&nest->cen_refcheck);
-	if (env != NULL) {
+	if (env) {
 		if (!cl_io_is_going(env))
 			return env;
 		cl_env_put(env, &nest->cen_refcheck);
@@ -929,14 +930,12 @@
 	const char       *typename;
 	struct lu_device *d;
 
-	LASSERT(ldt != NULL);
-
 	typename = ldt->ldt_name;
 	d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
 	if (!IS_ERR(d)) {
 		int rc;
 
-		if (site != NULL)
+		if (site)
 			d->ld_site = site;
 		rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
 		if (rc == 0) {
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 61f28eb..72f9924 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -69,7 +69,7 @@
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
-	while (page->cp_parent != NULL)
+	while (page->cp_parent)
 		page = page->cp_parent;
 	return page;
 }
@@ -110,7 +110,7 @@
 				return slice;
 		}
 		page = page->cp_child;
-	} while (page != NULL);
+	} while (page);
 	return NULL;
 }
 
@@ -127,7 +127,7 @@
 	assert_spin_locked(&hdr->coh_page_guard);
 
 	page = radix_tree_lookup(&hdr->coh_tree, index);
-	if (page != NULL)
+	if (page)
 		cl_page_get_trust(page);
 	return page;
 }
@@ -188,7 +188,7 @@
 			 * Pages for lsm-less file has no underneath sub-page
 			 * for osc, in case of ...
 			 */
-			PASSERT(env, page, slice != NULL);
+			PASSERT(env, page, slice);
 
 			page = slice->cpl_page;
 			/*
@@ -245,9 +245,9 @@
 	struct cl_object *obj  = page->cp_obj;
 
 	PASSERT(env, page, list_empty(&page->cp_batch));
-	PASSERT(env, page, page->cp_owner == NULL);
-	PASSERT(env, page, page->cp_req == NULL);
-	PASSERT(env, page, page->cp_parent == NULL);
+	PASSERT(env, page, !page->cp_owner);
+	PASSERT(env, page, !page->cp_req);
+	PASSERT(env, page, !page->cp_parent);
 	PASSERT(env, page, page->cp_state == CPS_FREEING);
 
 	might_sleep();
@@ -284,7 +284,7 @@
 	struct lu_object_header *head;
 
 	page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS);
-	if (page != NULL) {
+	if (page) {
 		int result = 0;
 
 		atomic_set(&page->cp_ref, 1);
@@ -305,7 +305,7 @@
 		head = o->co_lu.lo_header;
 		list_for_each_entry(o, &head->loh_layers,
 					co_lu.lo_linkage) {
-			if (o->co_ops->coo_page_init != NULL) {
+			if (o->co_ops->coo_page_init) {
 				result = o->co_ops->coo_page_init(env, o,
 								  page, vmpage);
 				if (result != 0) {
@@ -369,13 +369,13 @@
 		 */
 		page = cl_vmpage_page(vmpage, o);
 		PINVRNT(env, page,
-			ergo(page != NULL,
+			ergo(page,
 			     cl_page_vmpage(env, page) == vmpage &&
 			     (void *)radix_tree_lookup(&hdr->coh_tree,
 						       idx) == page));
 	}
 
-	if (page != NULL)
+	if (page)
 		return page;
 
 	/* allocate and initialize cl_page */
@@ -385,7 +385,7 @@
 
 	if (type == CPT_TRANSIENT) {
 		if (parent) {
-			LASSERT(page->cp_parent == NULL);
+			LASSERT(!page->cp_parent);
 			page->cp_parent = parent;
 			parent->cp_child = page;
 		}
@@ -418,7 +418,7 @@
 			      "fail to insert into radix tree: %d\n", err);
 	} else {
 		if (parent) {
-			LASSERT(page->cp_parent == NULL);
+			LASSERT(!page->cp_parent);
 			page->cp_parent = parent;
 			parent->cp_child = page;
 		}
@@ -426,7 +426,7 @@
 	}
 	spin_unlock(&hdr->coh_page_guard);
 
-	if (unlikely(ghost != NULL)) {
+	if (unlikely(ghost)) {
 		cl_page_delete0(env, ghost, 0);
 		cl_page_free(env, ghost);
 	}
@@ -467,14 +467,13 @@
 	owner  = pg->cp_owner;
 
 	return cl_page_in_use(pg) &&
-		ergo(parent != NULL, parent->cp_child == pg) &&
-		ergo(child != NULL, child->cp_parent == pg) &&
-		ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
-		ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
-		ergo(owner != NULL && parent != NULL,
+		ergo(parent, parent->cp_child == pg) &&
+		ergo(child, child->cp_parent == pg) &&
+		ergo(child, pg->cp_obj != child->cp_obj) &&
+		ergo(parent, pg->cp_obj != parent->cp_obj) &&
+		ergo(owner && parent,
 		     parent->cp_owner == pg->cp_owner->ci_parent) &&
-		ergo(owner != NULL && child != NULL,
-		     child->cp_owner->ci_parent == owner) &&
+		ergo(owner && child, child->cp_owner->ci_parent == owner) &&
 		/*
 		 * Either page is early in initialization (has neither child
 		 * nor parent yet), or it is in the object radix tree.
@@ -482,7 +481,7 @@
 		ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
 		     (void *)radix_tree_lookup(&header->coh_tree,
 					       pg->cp_index) == pg ||
-		     (child == NULL && parent == NULL));
+		     (!child && !parent));
 }
 
 static void cl_page_state_set0(const struct lu_env *env,
@@ -535,10 +534,10 @@
 	old = page->cp_state;
 	PASSERT(env, page, allowed_transitions[old][state]);
 	CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
-	for (; page != NULL; page = page->cp_child) {
+	for (; page; page = page->cp_child) {
 		PASSERT(env, page, page->cp_state == old);
 		PASSERT(env, page,
-			equi(state == CPS_OWNED, page->cp_owner != NULL));
+			equi(state == CPS_OWNED, page->cp_owner));
 
 		cl_page_state_set_trust(page, state);
 	}
@@ -584,7 +583,7 @@
 		LASSERT(page->cp_state == CPS_FREEING);
 
 		LASSERT(atomic_read(&page->cp_ref) == 0);
-		PASSERT(env, page, page->cp_owner == NULL);
+		PASSERT(env, page, !page->cp_owner);
 		PASSERT(env, page, list_empty(&page->cp_batch));
 		/*
 		 * Page is no longer reachable by other threads. Tear
@@ -609,11 +608,11 @@
 	page = cl_page_top(page);
 	do {
 		list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-			if (slice->cpl_ops->cpo_vmpage != NULL)
+			if (slice->cpl_ops->cpo_vmpage)
 				return slice->cpl_ops->cpo_vmpage(env, slice);
 		}
 		page = page->cp_child;
-	} while (page != NULL);
+	} while (page);
 	LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
 }
 EXPORT_SYMBOL(cl_page_vmpage);
@@ -639,10 +638,10 @@
 	 * can be rectified easily.
 	 */
 	top = (struct cl_page *)vmpage->private;
-	if (top == NULL)
+	if (!top)
 		return NULL;
 
-	for (page = top; page != NULL; page = page->cp_child) {
+	for (page = top; page; page = page->cp_child) {
 		if (cl_object_same(page->cp_obj, obj)) {
 			cl_page_get_trust(page);
 			break;
@@ -689,7 +688,7 @@
 					cpl_linkage) {		  \
 			__method = *(void **)((char *)__scan->cpl_ops + \
 					      __op);		    \
-			if (__method != NULL) {			 \
+			if (__method) {					\
 				__result = (*__method)(__env, __scan,   \
 						       ## __VA_ARGS__); \
 				if (__result != 0)		      \
@@ -697,7 +696,7 @@
 			}					       \
 		}						       \
 		__page = __page->cp_child;			      \
-	} while (__page != NULL && __result == 0);		      \
+	} while (__page && __result == 0);			      \
 	if (__result > 0)					       \
 		__result = 0;					   \
 	__result;						       \
@@ -717,12 +716,12 @@
 					cpl_linkage) {		  \
 			__method = *(void **)((char *)__scan->cpl_ops + \
 					      __op);		    \
-			if (__method != NULL)			   \
+			if (__method)				   \
 				(*__method)(__env, __scan,	      \
 					    ## __VA_ARGS__);	    \
 		}						       \
 		__page = __page->cp_child;			      \
-	} while (__page != NULL);				       \
+	} while (__page);					       \
 } while (0)
 
 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)	       \
@@ -734,19 +733,19 @@
 	void		      (*__method)_proto;			\
 									    \
 	/* get to the bottom page. */				       \
-	while (__page->cp_child != NULL)				    \
+	while (__page->cp_child)					    \
 		__page = __page->cp_child;				  \
 	do {								\
 		list_for_each_entry_reverse(__scan, &__page->cp_layers, \
 						cpl_linkage) {	      \
 			__method = *(void **)((char *)__scan->cpl_ops +     \
 					      __op);			\
-			if (__method != NULL)			       \
+			if (__method)				       \
 				(*__method)(__env, __scan,		  \
 					    ## __VA_ARGS__);		\
 		}							   \
 		__page = __page->cp_parent;				 \
-	} while (__page != NULL);					   \
+	} while (__page);						   \
 } while (0)
 
 static int cl_page_invoke(const struct lu_env *env,
@@ -772,8 +771,8 @@
 
 static void cl_page_owner_clear(struct cl_page *page)
 {
-	for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
-		if (page->cp_owner != NULL) {
+	for (page = cl_page_top(page); page; page = page->cp_child) {
+		if (page->cp_owner) {
 			LASSERT(page->cp_owner->ci_owned_nr > 0);
 			page->cp_owner->ci_owned_nr--;
 			page->cp_owner = NULL;
@@ -784,10 +783,8 @@
 
 static void cl_page_owner_set(struct cl_page *page)
 {
-	for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
-		LASSERT(page->cp_owner != NULL);
+	for (page = cl_page_top(page); page; page = page->cp_child)
 		page->cp_owner->ci_owned_nr++;
-	}
 }
 
 void cl_page_disown0(const struct lu_env *env,
@@ -862,8 +859,8 @@
 					 struct cl_io *, int),
 					io, nonblock);
 		if (result == 0) {
-			PASSERT(env, pg, pg->cp_owner == NULL);
-			PASSERT(env, pg, pg->cp_req == NULL);
+			PASSERT(env, pg, !pg->cp_owner);
+			PASSERT(env, pg, !pg->cp_req);
 			pg->cp_owner = io;
 			pg->cp_task  = current;
 			cl_page_owner_set(pg);
@@ -921,7 +918,7 @@
 	io = cl_io_top(io);
 
 	cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
-	PASSERT(env, pg, pg->cp_owner == NULL);
+	PASSERT(env, pg, !pg->cp_owner);
 	pg->cp_owner = io;
 	pg->cp_task = current;
 	cl_page_owner_set(pg);
@@ -1037,7 +1034,7 @@
 			 * skip removing it.
 			 */
 			tmp = pg->cp_child;
-		for (; tmp != NULL; tmp = tmp->cp_child) {
+		for (; tmp; tmp = tmp->cp_child) {
 			void		    *value;
 			struct cl_object_header *hdr;
 
@@ -1135,7 +1132,7 @@
 	pg = cl_page_top_trusted((struct cl_page *)pg);
 	slice = container_of(pg->cp_layers.next,
 			     const struct cl_page_slice, cpl_linkage);
-	PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
+	PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
 	/*
 	 * Call ->cpo_is_vmlocked() directly instead of going through
 	 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
@@ -1216,7 +1213,7 @@
 
 	PASSERT(env, pg, crt < CRT_NR);
 	/* cl_page::cp_req already cleared by the caller (osc_completion()) */
-	PASSERT(env, pg, pg->cp_req == NULL);
+	PASSERT(env, pg, !pg->cp_req);
 	PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
 
 	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
@@ -1304,7 +1301,7 @@
 		return -EINVAL;
 
 	list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
-		if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
+		if (!scan->cpl_ops->io[crt].cpo_cache_add)
 			continue;
 
 		result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
@@ -1450,8 +1447,8 @@
 {
 	struct cl_page *scan;
 
-	for (scan = cl_page_top((struct cl_page *)pg);
-	     scan != NULL; scan = scan->cp_child)
+	for (scan = cl_page_top((struct cl_page *)pg); scan;
+	     scan = scan->cp_child)
 		cl_page_header_print(env, cookie, printer, scan);
 	CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
 		       (const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 0975e44..c1310c2 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -42,7 +42,6 @@
 #include "../../include/linux/lnet/lnetctl.h"
 #include "../include/lustre_debug.h"
 #include "../include/lprocfs_status.h"
-#include "../include/lustre/lustre_build_version.h"
 #include <linux/list.h>
 #include "../include/cl_object.h"
 #include "llog_internal.h"
@@ -52,7 +51,7 @@
 struct list_head obd_types;
 DEFINE_RWLOCK(obd_dev_lock);
 
-/* The following are visible and mutable through /proc/sys/lustre/. */
+/* The following are visible and mutable through /sys/fs/lustre. */
 unsigned int obd_debug_peer_on_timeout;
 EXPORT_SYMBOL(obd_debug_peer_on_timeout);
 unsigned int obd_dump_on_timeout;
@@ -67,7 +66,7 @@
 EXPORT_SYMBOL(obd_timeout);
 unsigned int obd_timeout_set;
 EXPORT_SYMBOL(obd_timeout_set);
-/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */
+/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */
 unsigned int at_min;
 EXPORT_SYMBOL(at_min);
 unsigned int at_max = 600;
@@ -180,7 +179,7 @@
 	}
 
 	CDEBUG(D_IOCTL, "cmd = %x\n", cmd);
-	if (obd_ioctl_getdata(&buf, &len, (void *)arg)) {
+	if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) {
 		CERROR("OBD ioctl: data error\n");
 		return -EINVAL;
 	}
@@ -218,16 +217,16 @@
 			goto out;
 		}
 
-		if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) {
+		if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) {
 			CERROR("ioctl buffer too small to hold version\n");
 			err = -EINVAL;
 			goto out;
 		}
 
-		memcpy(data->ioc_bulk, BUILD_VERSION,
-		       strlen(BUILD_VERSION) + 1);
+		memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING,
+		       strlen(LUSTRE_VERSION_STRING) + 1);
 
-		err = obd_ioctl_popdata((void *)arg, data, len);
+		err = obd_ioctl_popdata((void __user *)arg, data, len);
 		if (err)
 			err = -EFAULT;
 		goto out;
@@ -246,7 +245,8 @@
 			goto out;
 		}
 
-		err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
+		err = obd_ioctl_popdata((void __user *)arg, data,
+					sizeof(*data));
 		if (err)
 			err = -EFAULT;
 		goto out;
@@ -283,7 +283,8 @@
 
 		CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1,
 		       dev);
-		err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
+		err = obd_ioctl_popdata((void __user *)arg, data,
+					sizeof(*data));
 		if (err)
 			err = -EFAULT;
 		goto out;
@@ -330,7 +331,7 @@
 			 (int)index, status, obd->obd_type->typ_name,
 			 obd->obd_name, obd->obd_uuid.uuid,
 			 atomic_read(&obd->obd_refcount));
-		err = obd_ioctl_popdata((void *)arg, data, len);
+		err = obd_ioctl_popdata((void __user *)arg, data, len);
 
 		err = 0;
 		goto out;
@@ -339,7 +340,7 @@
 	}
 
 	if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
-		if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) {
+		if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) {
 			err = -EINVAL;
 			goto out;
 		}
@@ -356,7 +357,7 @@
 		goto out;
 	}
 
-	if (obd == NULL) {
+	if (!obd) {
 		CERROR("OBD ioctl : No Device %d\n", data->ioc_dev);
 		err = -EINVAL;
 		goto out;
@@ -388,7 +389,7 @@
 		if (err)
 			goto out;
 
-		err = obd_ioctl_popdata((void *)arg, data, len);
+		err = obd_ioctl_popdata((void __user *)arg, data, len);
 		if (err)
 			err = -EFAULT;
 		goto out;
@@ -479,7 +480,7 @@
 
 	int lustre_register_fs(void);
 
-	LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
+	LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n");
 
 	spin_lock_init(&obd_types_lock);
 	obd_zombie_impexp_init();
@@ -507,7 +508,8 @@
 
 	/* Default the dirty page cache cap to 1/2 of system memory.
 	 * For clients with less memory, a larger fraction is needed
-	 * for other purposes (mostly for BGL). */
+	 * for other purposes (mostly for BGL).
+	 */
 	if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
 		obd_max_dirty_pages = totalram_pages / 4;
 	else
@@ -542,8 +544,6 @@
 	return err;
 }
 
-/* liblustre doesn't call cleanup_obdclass, apparently.  we carry on in this
- * ifdef to the end of the file to cover module and versioning goo.*/
 static void cleanup_obdclass(void)
 {
 	int i;
@@ -577,7 +577,7 @@
 }
 
 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION);
+MODULE_DESCRIPTION("Lustre Class Driver Build Version: " LUSTRE_VERSION_STRING);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(LUSTRE_VERSION_STRING);
 
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 228c44c..9042632 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -42,6 +42,7 @@
 #define DEBUG_SUBSYSTEM S_CLASS
 #include "../include/obd_class.h"
 #include "../include/lprocfs_status.h"
+#include "../include/lustre_kernelcomm.h"
 
 spinlock_t obd_types_lock;
 
@@ -69,17 +70,16 @@
 	struct obd_device *obd;
 
 	obd = kmem_cache_alloc(obd_device_cachep, GFP_NOFS | __GFP_ZERO);
-	if (obd != NULL)
+	if (obd)
 		obd->obd_magic = OBD_DEVICE_MAGIC;
 	return obd;
 }
 
 static void obd_device_free(struct obd_device *obd)
 {
-	LASSERT(obd != NULL);
 	LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n",
 		 obd, obd->obd_magic, OBD_DEVICE_MAGIC);
-	if (obd->obd_namespace != NULL) {
+	if (obd->obd_namespace) {
 		CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n",
 		       obd, obd->obd_namespace, obd->obd_force);
 		LBUG();
@@ -112,15 +112,6 @@
 	if (!type) {
 		const char *modname = name;
 
-		if (strcmp(modname, "obdfilter") == 0)
-			modname = "ofd";
-
-		if (strcmp(modname, LUSTRE_LWP_NAME) == 0)
-			modname = LUSTRE_OSP_NAME;
-
-		if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME)))
-			modname = LUSTRE_MDT_NAME;
-
 		if (!request_module("%s", modname)) {
 			CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
 			type = class_search_type(name);
@@ -202,7 +193,7 @@
 		goto failed;
 	}
 
-	if (ldt != NULL) {
+	if (ldt) {
 		type->typ_lu = ldt;
 		rc = lu_device_type_init(ldt);
 		if (rc != 0)
@@ -364,7 +355,7 @@
 		 obd, obd->obd_magic, OBD_DEVICE_MAGIC);
 	LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n",
 		 obd, obd->obd_minor, obd_devs[obd->obd_minor]);
-	LASSERT(obd_type != NULL);
+	LASSERT(obd_type);
 
 	CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
 	       obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
@@ -390,7 +381,8 @@
 
 		if (obd && strcmp(name, obd->obd_name) == 0) {
 			/* Make sure we finished attaching before we give
-			   out any references */
+			 * out any references
+			 */
 			LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
 			if (obd->obd_attached) {
 				read_unlock(&obd_dev_lock);
@@ -465,8 +457,9 @@
 EXPORT_SYMBOL(class_num2obd);
 
 /* Search for a client OBD connected to tgt_uuid.  If grp_uuid is
-   specified, then only the client with that uuid is returned,
-   otherwise any client connected to the tgt is returned. */
+ * specified, then only the client with that uuid is returned,
+ * otherwise any client connected to the tgt is returned.
+ */
 struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
 					  const char *typ_name,
 					  struct obd_uuid *grp_uuid)
@@ -497,9 +490,10 @@
 EXPORT_SYMBOL(class_find_client_obd);
 
 /* Iterate the obd_device list looking devices have grp_uuid. Start
-   searching at *next, and if a device is found, the next index to look
-   at is saved in *next. If next is NULL, then the first matching device
-   will always be returned. */
+ * searching at *next, and if a device is found, the next index to look
+ * at is saved in *next. If next is NULL, then the first matching device
+ * will always be returned.
+ */
 struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
 {
 	int i;
@@ -658,7 +652,7 @@
 	struct obd_device *obd = exp->exp_obd;
 
 	LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 
 	CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
 	       exp->exp_client_uuid.uuid, obd->obd_name);
@@ -698,7 +692,6 @@
 
 void class_export_put(struct obd_export *exp)
 {
-	LASSERT(exp != NULL);
 	LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
 	CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
 	       atomic_read(&exp->exp_refcount) - 1);
@@ -718,7 +711,8 @@
 
 /* Creates a new export, adds it to the hash table, and returns a
  * pointer to it. The refcount is 2: one for the hash reference, and
- * one for the pointer returned by this function. */
+ * one for the pointer returned by this function.
+ */
 struct obd_export *class_new_export(struct obd_device *obd,
 				    struct obd_uuid *cluuid)
 {
@@ -901,8 +895,9 @@
 	at_init(&at->iat_net_latency, 0, 0);
 	for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
 		/* max service estimates are tracked on the server side, so
-		   don't use the AT history here, just use the last reported
-		   val. (But keep hist for proc histogram, worst_ever) */
+		 * don't use the AT history here, just use the last reported
+		 * val. (But keep hist for proc histogram, worst_ever)
+		 */
 		at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT,
 			AT_FLG_NOHIST);
 	}
@@ -941,7 +936,8 @@
 	init_imp_at(&imp->imp_at);
 
 	/* the default magic is V2, will be used in connect RPC, and
-	 * then adjusted according to the flags in request/reply. */
+	 * then adjusted according to the flags in request/reply.
+	 */
 	imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
 
 	return imp;
@@ -950,7 +946,7 @@
 
 void class_destroy_import(struct obd_import *import)
 {
-	LASSERT(import != NULL);
+	LASSERT(import);
 	LASSERT(import != LP_POISON);
 
 	class_handle_unhash(&import->imp_handle);
@@ -970,8 +966,7 @@
 
 	LASSERT(lock->l_exp_refs_nr >= 0);
 
-	if (lock->l_exp_refs_target != NULL &&
-	    lock->l_exp_refs_target != exp) {
+	if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) {
 		LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n",
 			      exp, lock, lock->l_exp_refs_target);
 	}
@@ -1005,17 +1000,18 @@
 #endif
 
 /* A connection defines an export context in which preallocation can
-   be managed. This releases the export pointer reference, and returns
-   the export handle, so the export refcount is 1 when this function
-   returns. */
+ * be managed. This releases the export pointer reference, and returns
+ * the export handle, so the export refcount is 1 when this function
+ * returns.
+ */
 int class_connect(struct lustre_handle *conn, struct obd_device *obd,
 		  struct obd_uuid *cluuid)
 {
 	struct obd_export *export;
 
-	LASSERT(conn != NULL);
-	LASSERT(obd != NULL);
-	LASSERT(cluuid != NULL);
+	LASSERT(conn);
+	LASSERT(obd);
+	LASSERT(cluuid);
 
 	export = class_new_export(obd, cluuid);
 	if (IS_ERR(export))
@@ -1035,7 +1031,8 @@
  * and if disconnect really need
  * 2 - removing from hash
  * 3 - in client_unlink_export
- * The export pointer passed to this function can destroyed */
+ * The export pointer passed to this function can destroyed
+ */
 int class_disconnect(struct obd_export *export)
 {
 	int already_disconnected;
@@ -1052,7 +1049,8 @@
 
 	/* class_cleanup(), abort_recovery(), and class_fail_export()
 	 * all end up in here, and if any of them race we shouldn't
-	 * call extra class_export_puts(). */
+	 * call extra class_export_puts().
+	 */
 	if (already_disconnected)
 		goto no_disconn;
 
@@ -1092,7 +1090,8 @@
 
 	/* Most callers into obd_disconnect are removing their own reference
 	 * (request, for example) in addition to the one from the hash table.
-	 * We don't have such a reference here, so make one. */
+	 * We don't have such a reference here, so make one.
+	 */
 	class_export_get(exp);
 	rc = obd_disconnect(exp);
 	if (rc)
@@ -1141,14 +1140,14 @@
 
 		spin_unlock(&obd_zombie_impexp_lock);
 
-		if (import != NULL) {
+		if (import) {
 			class_import_destroy(import);
 			spin_lock(&obd_zombie_impexp_lock);
 			zombies_count--;
 			spin_unlock(&obd_zombie_impexp_lock);
 		}
 
-		if (export != NULL) {
+		if (export) {
 			class_export_destroy(export);
 			spin_lock(&obd_zombie_impexp_lock);
 			zombies_count--;
@@ -1156,7 +1155,7 @@
 		}
 
 		cond_resched();
-	} while (import != NULL || export != NULL);
+	} while (import || export);
 }
 
 static struct completion	obd_zombie_start;
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
similarity index 80%
rename from drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
rename to drivers/staging/lustre/lustre/obdclass/kernelcomm.c
index d8230ae..8405ecc 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
@@ -42,9 +42,8 @@
 #define DEBUG_SUBSYSTEM S_CLASS
 #define D_KUC D_OTHER
 
-#include "../../include/linux/libcfs/libcfs.h"
-
-/* This is the kernel side (liblustre as well). */
+#include "../include/obd_support.h"
+#include "../include/lustre_kernelcomm.h"
 
 /**
  * libcfs_kkuc_msg_put - send an message from kernel to userspace
@@ -58,14 +57,14 @@
 	ssize_t count = kuch->kuc_msglen;
 	loff_t offset = 0;
 	mm_segment_t fs;
-	int rc = -ENOSYS;
+	int rc = -ENXIO;
 
-	if (filp == NULL || IS_ERR(filp))
+	if (IS_ERR_OR_NULL(filp))
 		return -EBADF;
 
 	if (kuch->kuc_magic != KUC_MAGIC) {
 		CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic);
-		return -ENOSYS;
+		return rc;
 	}
 
 	fs = get_fs();
@@ -90,18 +89,20 @@
 }
 EXPORT_SYMBOL(libcfs_kkuc_msg_put);
 
-/* Broadcast groups are global across all mounted filesystems;
+/*
+ * Broadcast groups are global across all mounted filesystems;
  * i.e. registering for a group on 1 fs will get messages for that
- * group from any fs */
+ * group from any fs
+ */
 /** A single group registration has a uid and a file pointer */
 struct kkuc_reg {
-	struct list_head	kr_chain;
-	int		kr_uid;
+	struct list_head kr_chain;
+	int		 kr_uid;
 	struct file	*kr_fp;
-	__u32		kr_data;
+	char		 kr_data[0];
 };
 
-static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {};
+static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {};
 /* Protect message sending against remove and adds */
 static DECLARE_RWSEM(kg_sem);
 
@@ -109,9 +110,10 @@
  * @param filp pipe to write into
  * @param uid identifier for this receiver
  * @param group group number
+ * @param data user data
  */
 int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group,
-			  __u32 data)
+			  void *data, size_t data_len)
 {
 	struct kkuc_reg *reg;
 
@@ -121,20 +123,20 @@
 	}
 
 	/* fput in group_rem */
-	if (filp == NULL)
+	if (!filp)
 		return -EBADF;
 
 	/* freed in group_rem */
-	reg = kmalloc(sizeof(*reg), 0);
-	if (reg == NULL)
+	reg = kmalloc(sizeof(*reg) + data_len, 0);
+	if (!reg)
 		return -ENOMEM;
 
 	reg->kr_fp = filp;
 	reg->kr_uid = uid;
-	reg->kr_data = data;
+	memcpy(reg->kr_data, data, data_len);
 
 	down_write(&kg_sem);
-	if (kkuc_groups[group].next == NULL)
+	if (!kkuc_groups[group].next)
 		INIT_LIST_HEAD(&kkuc_groups[group]);
 	list_add(&reg->kr_chain, &kkuc_groups[group]);
 	up_write(&kg_sem);
@@ -145,14 +147,14 @@
 }
 EXPORT_SYMBOL(libcfs_kkuc_group_add);
 
-int libcfs_kkuc_group_rem(int uid, int group)
+int libcfs_kkuc_group_rem(int uid, unsigned int group)
 {
 	struct kkuc_reg *reg, *next;
 
-	if (kkuc_groups[group].next == NULL)
+	if (!kkuc_groups[group].next)
 		return 0;
 
-	if (uid == 0) {
+	if (!uid) {
 		/* Broadcast a shutdown message */
 		struct kuc_hdr lh;
 
@@ -165,11 +167,11 @@
 
 	down_write(&kg_sem);
 	list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
-		if ((uid == 0) || (uid == reg->kr_uid)) {
+		if (!uid || (uid == reg->kr_uid)) {
 			list_del(&reg->kr_chain);
 			CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
 			       reg->kr_uid, reg->kr_fp, group);
-			if (reg->kr_fp != NULL)
+			if (reg->kr_fp)
 				fput(reg->kr_fp);
 			kfree(reg);
 		}
@@ -180,28 +182,30 @@
 }
 EXPORT_SYMBOL(libcfs_kkuc_group_rem);
 
-int libcfs_kkuc_group_put(int group, void *payload)
+int libcfs_kkuc_group_put(unsigned int group, void *payload)
 {
 	struct kkuc_reg	*reg;
-	int		 rc = 0;
+	int rc = 0;
 	int one_success = 0;
 
-	down_read(&kg_sem);
+	down_write(&kg_sem);
 	list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
-		if (reg->kr_fp != NULL) {
+		if (reg->kr_fp) {
 			rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
-			if (rc == 0)
+			if (!rc) {
 				one_success = 1;
-			else if (rc == -EPIPE) {
+			} else if (rc == -EPIPE) {
 				fput(reg->kr_fp);
 				reg->kr_fp = NULL;
 			}
 		}
 	}
-	up_read(&kg_sem);
+	up_write(&kg_sem);
 
-	/* don't return an error if the message has been delivered
-	 * at least to one agent */
+	/*
+	 * don't return an error if the message has been delivered
+	 * at least to one agent
+	 */
 	if (one_success)
 		rc = 0;
 
@@ -213,9 +217,9 @@
  * Calls a callback function for each link of the given kuc group.
  * @param group the group to call the function on.
  * @param cb_func the function to be called.
- * @param cb_arg iextra argument to be passed to the callback function.
+ * @param cb_arg extra argument to be passed to the callback function.
  */
-int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
+int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func,
 			      void *cb_arg)
 {
 	struct kkuc_reg *reg;
@@ -227,15 +231,15 @@
 	}
 
 	/* no link for this group */
-	if (kkuc_groups[group].next == NULL)
+	if (!kkuc_groups[group].next)
 		return 0;
 
-	down_write(&kg_sem);
+	down_read(&kg_sem);
 	list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
-		if (reg->kr_fp != NULL)
+		if (reg->kr_fp)
 			rc = cb_func(reg->kr_data, cb_arg);
 	}
-	up_write(&kg_sem);
+	up_read(&kg_sem);
 
 	return rc;
 }
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index a055cbb..8eddf20 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -59,7 +59,6 @@
 #include <linux/highmem.h>
 #include <linux/io.h>
 #include <asm/ioctls.h>
-#include <linux/poll.h>
 #include <linux/uaccess.h>
 #include <linux/miscdevice.h>
 #include <linux/seq_file.h>
@@ -71,17 +70,16 @@
 #include "../../include/obd_class.h"
 #include "../../include/lprocfs_status.h"
 #include "../../include/lustre_ver.h"
-#include "../../include/lustre/lustre_build_version.h"
 
 /* buffer MUST be at least the size of obd_ioctl_hdr */
-int obd_ioctl_getdata(char **buf, int *len, void *arg)
+int obd_ioctl_getdata(char **buf, int *len, void __user *arg)
 {
 	struct obd_ioctl_hdr hdr;
 	struct obd_ioctl_data *data;
 	int err;
 	int offset = 0;
 
-	if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
+	if (copy_from_user(&hdr, arg, sizeof(hdr)))
 		return -EFAULT;
 
 	if (hdr.ioc_version != OBD_IOCTL_VERSION) {
@@ -104,9 +102,10 @@
 	/* When there are lots of processes calling vmalloc on multi-core
 	 * system, the high lock contention will hurt performance badly,
 	 * obdfilter-survey is an example, which relies on ioctl. So we'd
-	 * better avoid vmalloc on ioctl path. LU-66 */
+	 * better avoid vmalloc on ioctl path. LU-66
+	 */
 	*buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS);
-	if (*buf == NULL) {
+	if (!*buf) {
 		CERROR("Cannot allocate control buffer of len %d\n",
 		       hdr.ioc_len);
 		return -EINVAL;
@@ -114,7 +113,7 @@
 	*len = hdr.ioc_len;
 	data = (struct obd_ioctl_data *)*buf;
 
-	if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) {
+	if (copy_from_user(*buf, arg, hdr.ioc_len)) {
 		err = -EFAULT;
 		goto free_buf;
 	}
@@ -144,9 +143,8 @@
 		offset += cfs_size_round(data->ioc_inllen3);
 	}
 
-	if (data->ioc_inllen4) {
+	if (data->ioc_inllen4)
 		data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
-	}
 
 	return 0;
 
@@ -156,7 +154,7 @@
 }
 EXPORT_SYMBOL(obd_ioctl_getdata);
 
-int obd_ioctl_popdata(void *arg, void *data, int len)
+int obd_ioctl_popdata(void __user *arg, void *data, int len)
 {
 	int err;
 
@@ -240,7 +238,7 @@
 		struct obd_device *obd;
 
 		obd = class_num2obd(i);
-		if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
+		if (!obd || !obd->obd_attached || !obd->obd_set_up)
 			continue;
 
 		LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
@@ -250,9 +248,8 @@
 		class_incref(obd, __func__, current);
 		read_unlock(&obd_dev_lock);
 
-		if (obd_health_check(NULL, obd)) {
+		if (obd_health_check(NULL, obd))
 			healthy = false;
-		}
 		class_decref(obd, __func__, current);
 		read_lock(&obd_dev_lock);
 	}
@@ -360,7 +357,7 @@
 	struct obd_device *obd = class_num2obd((int)index);
 	char *status;
 
-	if (obd == NULL)
+	if (!obd)
 		return 0;
 
 	LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
@@ -424,7 +421,7 @@
 	struct dentry *file;
 
 	lustre_kobj = kobject_create_and_add("lustre", fs_kobj);
-	if (lustre_kobj == NULL)
+	if (!lustre_kobj)
 		goto out;
 
 	/* Create the files associated with this kobject */
@@ -456,8 +453,7 @@
 
 int class_procfs_clean(void)
 {
-	if (debugfs_lustre_root != NULL)
-		debugfs_remove_recursive(debugfs_lustre_root);
+	debugfs_remove_recursive(debugfs_lustre_root);
 
 	debugfs_lustre_root = NULL;
 
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index f956d7e..f7ee605 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -76,8 +76,6 @@
  */
 static void llog_free_handle(struct llog_handle *loghandle)
 {
-	LASSERT(loghandle != NULL);
-
 	/* failed llog_init_handle */
 	if (!loghandle->lgh_hdr)
 		goto out;
@@ -115,7 +113,7 @@
 	if (rc)
 		return rc;
 
-	if (lop->lop_read_header == NULL)
+	if (!lop->lop_read_header)
 		return -EOPNOTSUPP;
 
 	rc = lop->lop_read_header(env, handle);
@@ -144,7 +142,7 @@
 	struct llog_log_hdr	*llh;
 	int			 rc;
 
-	LASSERT(handle->lgh_hdr == NULL);
+	LASSERT(!handle->lgh_hdr);
 
 	llh = kzalloc(sizeof(*llh), GFP_NOFS);
 	if (!llh)
@@ -228,11 +226,11 @@
 		return 0;
 	}
 
-	if (cd != NULL) {
+	if (cd) {
 		last_called_index = cd->lpcd_first_idx;
 		index = cd->lpcd_first_idx + 1;
 	}
-	if (cd != NULL && cd->lpcd_last_idx)
+	if (cd && cd->lpcd_last_idx)
 		last_index = cd->lpcd_last_idx;
 	else
 		last_index = LLOG_BITMAP_BYTES * 8 - 1;
@@ -262,7 +260,8 @@
 
 		/* NB: when rec->lrh_len is accessed it is already swabbed
 		 * since it is used at the "end" of the loop and the rec
-		 * swabbing is done at the beginning of the loop. */
+		 * swabbing is done at the beginning of the loop.
+		 */
 		for (rec = (struct llog_rec_hdr *)buf;
 		     (char *)rec < buf + LLOG_CHUNK_SIZE;
 		     rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
@@ -328,7 +327,7 @@
 	}
 
 out:
-	if (cd != NULL)
+	if (cd)
 		cd->lpcd_last_idx = last_called_index;
 
 	kfree(buf);
@@ -376,17 +375,20 @@
 	lpi->lpi_catdata   = catdata;
 
 	if (fork) {
+		struct task_struct *task;
+
 		/* The new thread can't use parent env,
-		 * init the new one in llog_process_thread_daemonize. */
+		 * init the new one in llog_process_thread_daemonize.
+		 */
 		lpi->lpi_env = NULL;
 		init_completion(&lpi->lpi_completion);
-		rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi,
-					     "llog_process_thread"));
-		if (IS_ERR_VALUE(rc)) {
+		task = kthread_run(llog_process_thread_daemonize, lpi,
+				   "llog_process_thread");
+		if (IS_ERR(task)) {
+			rc = PTR_ERR(task);
 			CERROR("%s: cannot start thread: rc = %d\n",
 			       loghandle->lgh_ctxt->loc_obd->obd_name, rc);
-			kfree(lpi);
-			return rc;
+			goto out_lpi;
 		}
 		wait_for_completion(&lpi->lpi_completion);
 	} else {
@@ -394,6 +396,7 @@
 		llog_process_thread(lpi);
 	}
 	rc = lpi->lpi_rc;
+out_lpi:
 	kfree(lpi);
 	return rc;
 }
@@ -416,13 +419,13 @@
 	LASSERT(ctxt);
 	LASSERT(ctxt->loc_logops);
 
-	if (ctxt->loc_logops->lop_open == NULL) {
+	if (!ctxt->loc_logops->lop_open) {
 		*lgh = NULL;
 		return -EOPNOTSUPP;
 	}
 
 	*lgh = llog_alloc_handle();
-	if (*lgh == NULL)
+	if (!*lgh)
 		return -ENOMEM;
 	(*lgh)->lgh_ctxt = ctxt;
 	(*lgh)->lgh_logops = ctxt->loc_logops;
@@ -449,7 +452,7 @@
 	rc = llog_handle2ops(loghandle, &lop);
 	if (rc)
 		goto out;
-	if (lop->lop_close == NULL) {
+	if (!lop->lop_close) {
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index 0f05e9c..b88ccba 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -69,7 +69,7 @@
 	struct llog_handle	*loghandle;
 	int			 rc = 0;
 
-	if (cathandle == NULL)
+	if (!cathandle)
 		return -EBADF;
 
 	down_write(&cathandle->lgh_lock);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 9bc5199..826623f 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -88,7 +88,8 @@
 	spin_unlock(&obd->obd_dev_lock);
 
 	/* obd->obd_starting is needed for the case of cleanup
-	 * in error case while obd is starting up. */
+	 * in error case while obd is starting up.
+	 */
 	LASSERTF(obd->obd_starting == 1 ||
 		 obd->obd_stopping == 1 || obd->obd_set_up == 0,
 		 "wrong obd state: %d/%d/%d\n", !!obd->obd_starting,
@@ -110,11 +111,8 @@
 	struct obd_llog_group *olg;
 	int rc, idx;
 
-	LASSERT(ctxt != NULL);
-	LASSERT(ctxt != LP_POISON);
-
 	olg = ctxt->loc_olg;
-	LASSERT(olg != NULL);
+	LASSERT(olg);
 	LASSERT(olg != LP_POISON);
 
 	idx = ctxt->loc_idx;
@@ -151,7 +149,7 @@
 	if (index < 0 || index >= LLOG_MAX_CTXTS)
 		return -EINVAL;
 
-	LASSERT(olg != NULL);
+	LASSERT(olg);
 
 	ctxt = llog_new_ctxt(obd);
 	if (!ctxt)
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 3aa7393..967ba2e 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -346,7 +346,6 @@
 		__swab32s(&lcfg->lcfg_buflens[i]);
 
 	print_lustre_cfg(lcfg);
-	return;
 }
 EXPORT_SYMBOL(lustre_swab_lustre_cfg);
 
@@ -387,7 +386,8 @@
 		 *
 		 * Overwrite fields from the end first, so they are not
 		 * clobbered, and use memmove() instead of memcpy() because
-		 * the source and target buffers overlap.  bug 16771 */
+		 * the source and target buffers overlap.  bug 16771
+		 */
 		createtime = cm32->cm_createtime;
 		canceltime = cm32->cm_canceltime;
 		memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32);
@@ -406,7 +406,5 @@
 		__swab64s(&marker->cm_createtime);
 		__swab64s(&marker->cm_canceltime);
 	}
-
-	return;
 }
 EXPORT_SYMBOL(lustre_swab_cfg_marker);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
index 6acc4a1..13aca5b 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
@@ -48,14 +48,15 @@
 	int				smp_id;
 	unsigned long			flags = 0;
 
-	if (stats == NULL)
+	if (!stats)
 		return;
 
 	LASSERTF(0 <= idx && idx < stats->ls_num,
 		 "idx %d, ls_num %hu\n", idx, stats->ls_num);
 
 	/* With per-client stats, statistics are allocated only for
-	 * single CPU area, so the smp_id should be 0 always. */
+	 * single CPU area, so the smp_id should be 0 always.
+	 */
 	smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
 	if (smp_id < 0)
 		return;
@@ -96,14 +97,15 @@
 	int				smp_id;
 	unsigned long			flags = 0;
 
-	if (stats == NULL)
+	if (!stats)
 		return;
 
 	LASSERTF(0 <= idx && idx < stats->ls_num,
 		 "idx %d, ls_num %hu\n", idx, stats->ls_num);
 
 	/* With per-client stats, statistics are allocated only for
-	 * single CPU area, so the smp_id should be 0 always. */
+	 * single CPU area, so the smp_id should be 0 always.
+	 */
 	smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
 	if (smp_id < 0)
 		return;
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 51fe15f..28bb4e5 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -109,7 +109,7 @@
 	__u64 mask = 1;
 	int i, ret = 0;
 
-	for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+	for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
 		if (flags & mask)
 			ret += snprintf(page + ret, count - ret, "%s%s",
 					ret ? sep : "", obd_connect_names[i]);
@@ -149,10 +149,10 @@
 		}
 		/*
 		 * Need to think these cases :
-		 *      1. #echo x.00 > /proc/xxx       output result : x
-		 *      2. #echo x.0x > /proc/xxx       output result : x.0x
-		 *      3. #echo x.x0 > /proc/xxx       output result : x.x
-		 *      4. #echo x.xx > /proc/xxx       output result : x.xx
+		 *      1. #echo x.00 > /sys/xxx       output result : x
+		 *      2. #echo x.0x > /sys/xxx       output result : x.0x
+		 *      3. #echo x.x0 > /sys/xxx       output result : x.x
+		 *      4. #echo x.xx > /sys/xxx       output result : x.xx
 		 *      Only reserved 2 bits fraction.
 		 */
 		for (i = 0; i < (5 - prtn); i++)
@@ -199,7 +199,7 @@
 	if (pbuf == end)
 		return -EINVAL;
 
-	if (end != NULL && *end == '.') {
+	if (end && *end == '.') {
 		int temp_val, pow = 1;
 		int i;
 
@@ -247,7 +247,7 @@
 	struct dentry *entry;
 	umode_t mode = 0;
 
-	if (root == NULL || name == NULL || fops == NULL)
+	if (!root || !name || !fops)
 		return ERR_PTR(-EINVAL);
 
 	if (fops->read)
@@ -261,7 +261,7 @@
 	}
 	return entry;
 }
-EXPORT_SYMBOL(ldebugfs_add_simple);
+EXPORT_SYMBOL_GPL(ldebugfs_add_simple);
 
 static struct file_operations lprocfs_generic_fops = { };
 
@@ -272,7 +272,7 @@
 	if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list))
 		return -EINVAL;
 
-	while (list->name != NULL) {
+	while (list->name) {
 		struct dentry *entry;
 		umode_t mode = 0;
 
@@ -294,14 +294,14 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(ldebugfs_add_vars);
+EXPORT_SYMBOL_GPL(ldebugfs_add_vars);
 
 void ldebugfs_remove(struct dentry **entryp)
 {
 	debugfs_remove_recursive(*entryp);
 	*entryp = NULL;
 }
-EXPORT_SYMBOL(ldebugfs_remove);
+EXPORT_SYMBOL_GPL(ldebugfs_remove);
 
 struct dentry *ldebugfs_register(const char *name,
 				 struct dentry *parent,
@@ -327,7 +327,7 @@
 out:
 	return entry;
 }
-EXPORT_SYMBOL(ldebugfs_register);
+EXPORT_SYMBOL_GPL(ldebugfs_register);
 
 /* Generic callbacks */
 int lprocfs_rd_uint(struct seq_file *m, void *data)
@@ -491,7 +491,7 @@
 	char *imp_state_name = NULL;
 	int rc;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	rc = lprocfs_climp_check(obd);
 	if (rc)
 		return rc;
@@ -514,7 +514,7 @@
 	struct ptlrpc_connection *conn;
 	int rc;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 
 	rc = lprocfs_climp_check(obd);
 	if (rc)
@@ -543,7 +543,7 @@
 
 	memset(cnt, 0, sizeof(*cnt));
 
-	if (stats == NULL) {
+	if (!stats) {
 		/* set count to 1 to avoid divide-by-zero errs in callers */
 		cnt->lc_count = 1;
 		return;
@@ -554,7 +554,7 @@
 	num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
 
 	for (i = 0; i < num_entry; i++) {
-		if (stats->ls_percpu[i] == NULL)
+		if (!stats->ls_percpu[i])
 			continue;
 		percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
 
@@ -577,7 +577,7 @@
 #define flag2str(flag, first)						\
 	do {								\
 		if (imp->imp_##flag)					\
-		     seq_printf(m, "%s" #flag, first ? "" : ", ");	\
+			seq_printf(m, "%s" #flag, first ? "" : ", ");	\
 	} while (0)
 static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
 {
@@ -604,7 +604,7 @@
 	int i;
 	bool first = true;
 
-	for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+	for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
 		if (flags & mask) {
 			seq_printf(m, "%s%s",
 					first ? sep : "", obd_connect_names[i]);
@@ -629,7 +629,7 @@
 	int				rw	= 0;
 	int				rc;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	rc = lprocfs_climp_check(obd);
 	if (rc)
 		return rc;
@@ -642,21 +642,21 @@
 		     "    target: %s\n"
 		     "    state: %s\n"
 		     "    instance: %u\n"
-		     "    connect_flags: [",
+		     "    connect_flags: [ ",
 		     obd->obd_name,
 		     obd2cli_tgt(obd),
 		     ptlrpc_import_state_name(imp->imp_state),
 		     imp->imp_connect_data.ocd_instance);
 	obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", ");
 	seq_printf(m,
-		      "]\n"
-		      "    import_flags: [");
+		      " ]\n"
+		      "    import_flags: [ ");
 	obd_import_flags2str(imp, m);
 
 	seq_printf(m,
-		      "]\n"
+		      " ]\n"
 		      "    connection:\n"
-		      "       failover_nids: [");
+		      "       failover_nids: [ ");
 	spin_lock(&imp->imp_lock);
 	j = 0;
 	list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
@@ -665,13 +665,13 @@
 		seq_printf(m, "%s%s", j ? ", " : "", nidstr);
 		j++;
 	}
-	if (imp->imp_connection != NULL)
+	if (imp->imp_connection)
 		libcfs_nid2str_r(imp->imp_connection->c_peer.nid,
 				 nidstr, sizeof(nidstr));
 	else
 		strncpy(nidstr, "<none>", sizeof(nidstr));
 	seq_printf(m,
-		      "]\n"
+		      " ]\n"
 		      "       current_connection: %s\n"
 		      "       connection_attempts: %u\n"
 		      "       generation: %u\n"
@@ -682,7 +682,7 @@
 		      atomic_read(&imp->imp_inval_count));
 	spin_unlock(&imp->imp_lock);
 
-	if (obd->obd_svc_stats == NULL)
+	if (!obd->obd_svc_stats)
 		goto out_climp;
 
 	header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
@@ -779,7 +779,7 @@
 	struct obd_import *imp;
 	int j, k, rc;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	rc = lprocfs_climp_check(obd);
 	if (rc)
 		return rc;
@@ -795,7 +795,7 @@
 			&imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN];
 		if (ish->ish_state == 0)
 			continue;
-		seq_printf(m, " - [%lld, %s]\n", (s64)ish->ish_time,
+		seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time,
 			   ptlrpc_import_state_name(ish->ish_state));
 	}
 
@@ -825,7 +825,7 @@
 	struct dhms ts;
 	int i, rc;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	rc = lprocfs_climp_check(obd);
 	if (rc)
 		return rc;
@@ -942,7 +942,7 @@
 
 	return rc;
 }
-EXPORT_SYMBOL(lprocfs_obd_setup);
+EXPORT_SYMBOL_GPL(lprocfs_obd_setup);
 
 int lprocfs_obd_cleanup(struct obd_device *obd)
 {
@@ -957,7 +957,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(lprocfs_obd_cleanup);
+EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup);
 
 int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
 {
@@ -967,12 +967,12 @@
 	unsigned long           flags = 0;
 	int                     i;
 
-	LASSERT(stats->ls_percpu[cpuid] == NULL);
+	LASSERT(!stats->ls_percpu[cpuid]);
 	LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
 
 	percpusize = lprocfs_stats_counter_size(stats);
 	LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
-	if (stats->ls_percpu[cpuid] != NULL) {
+	if (stats->ls_percpu[cpuid]) {
 		rc = 0;
 		if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
 			if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
@@ -1017,7 +1017,7 @@
 
 	/* alloc percpu pointers for all possible cpu slots */
 	LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
-	if (stats == NULL)
+	if (!stats)
 		return NULL;
 
 	stats->ls_num = num;
@@ -1027,14 +1027,14 @@
 	/* alloc num of counter headers */
 	LIBCFS_ALLOC(stats->ls_cnt_header,
 		     stats->ls_num * sizeof(struct lprocfs_counter_header));
-	if (stats->ls_cnt_header == NULL)
+	if (!stats->ls_cnt_header)
 		goto fail;
 
 	if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
 		/* contains only one set counters */
 		percpusize = lprocfs_stats_counter_size(stats);
 		LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
-		if (stats->ls_percpu[0] == NULL)
+		if (!stats->ls_percpu[0])
 			goto fail;
 		stats->ls_biggest_alloc_num = 1;
 	} else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
@@ -1059,7 +1059,7 @@
 	unsigned int percpusize;
 	unsigned int i;
 
-	if (stats == NULL || stats->ls_num == 0)
+	if (!stats || stats->ls_num == 0)
 		return;
 	*statsh = NULL;
 
@@ -1070,9 +1070,9 @@
 
 	percpusize = lprocfs_stats_counter_size(stats);
 	for (i = 0; i < num_entry; i++)
-		if (stats->ls_percpu[i] != NULL)
+		if (stats->ls_percpu[i])
 			LIBCFS_FREE(stats->ls_percpu[i], percpusize);
-	if (stats->ls_cnt_header != NULL)
+	if (stats->ls_cnt_header)
 		LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
 					sizeof(struct lprocfs_counter_header));
 	LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
@@ -1090,7 +1090,7 @@
 	num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
 
 	for (i = 0; i < num_entry; i++) {
-		if (stats->ls_percpu[i] == NULL)
+		if (!stats->ls_percpu[i])
 			continue;
 		for (j = 0; j < stats->ls_num; j++) {
 			percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
@@ -1219,7 +1219,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(ldebugfs_register_stats);
+EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
 
 void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
 			  unsigned conf, const char *name, const char *units)
@@ -1230,10 +1230,8 @@
 	unsigned int			i;
 	unsigned int			num_cpu;
 
-	LASSERT(stats != NULL);
-
 	header = &stats->ls_cnt_header[index];
-	LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n",
+	LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n",
 		 index, name, units);
 
 	header->lc_config = conf;
@@ -1242,7 +1240,7 @@
 
 	num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
 	for (i = 0; i < num_cpu; ++i) {
-		if (stats->ls_percpu[i] == NULL)
+		if (!stats->ls_percpu[i])
 			continue;
 		percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
 		percpu_cntr->lc_count		= 0;
@@ -1270,7 +1268,7 @@
 {
 	__s64 ret = 0;
 
-	if (lc == NULL || header == NULL)
+	if (!lc || !header)
 		return 0;
 
 	switch (field) {
@@ -1319,8 +1317,8 @@
 }
 EXPORT_SYMBOL(lprocfs_write_u64_helper);
 
-int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
-			      __u64 *val, int mult)
+int lprocfs_write_frac_u64_helper(const char __user *buffer,
+				  unsigned long count, __u64 *val, int mult)
 {
 	char kernbuf[22], *end, *pbuf;
 	__u64 whole, frac = 0, units;
@@ -1412,7 +1410,7 @@
 
 	/* there is no strnstr() in rhel5 and ubuntu kernels */
 	val = lprocfs_strnstr(buffer, name, buflen);
-	if (val == NULL)
+	if (!val)
 		return (char *)buffer;
 
 	val += strlen(name);			     /* skip prefix */
@@ -1446,7 +1444,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(ldebugfs_seq_create);
+EXPORT_SYMBOL_GPL(ldebugfs_seq_create);
 
 int ldebugfs_obd_seq_create(struct obd_device *dev,
 			    const char *name,
@@ -1457,7 +1455,7 @@
 	return ldebugfs_seq_create(dev->obd_debugfs_entry, name,
 				   mode, seq_fops, data);
 }
-EXPORT_SYMBOL(ldebugfs_obd_seq_create);
+EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create);
 
 void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
 {
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index ce248f4..0fa4bac 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -86,13 +86,12 @@
 	 */
 	fid = lu_object_fid(o);
 	if (fid_is_zero(fid)) {
-		LASSERT(top->loh_hash.next == NULL
-			&& top->loh_hash.pprev == NULL);
+		LASSERT(!top->loh_hash.next && !top->loh_hash.pprev);
 		LASSERT(list_empty(&top->loh_lru));
 		if (!atomic_dec_and_test(&top->loh_ref))
 			return;
 		list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
-			if (o->lo_ops->loo_object_release != NULL)
+			if (o->lo_ops->loo_object_release)
 				o->lo_ops->loo_object_release(env, o);
 		}
 		lu_object_free(env, orig);
@@ -119,7 +118,7 @@
 	 * layers, and notify them that object is no longer busy.
 	 */
 	list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
-		if (o->lo_ops->loo_object_release != NULL)
+		if (o->lo_ops->loo_object_release)
 			o->lo_ops->loo_object_release(env, o);
 	}
 
@@ -210,7 +209,7 @@
 	 * lu_object_header.
 	 */
 	top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
-	if (top == NULL)
+	if (!top)
 		return ERR_PTR(-ENOMEM);
 	if (IS_ERR(top))
 		return top;
@@ -245,7 +244,7 @@
 	} while (!clean);
 
 	list_for_each_entry_reverse(scan, layers, lo_linkage) {
-		if (scan->lo_ops->loo_object_start != NULL) {
+		if (scan->lo_ops->loo_object_start) {
 			result = scan->lo_ops->loo_object_start(env, scan);
 			if (result != 0) {
 				lu_object_free(env, top);
@@ -276,7 +275,7 @@
 	 * First call ->loo_object_delete() method to release all resources.
 	 */
 	list_for_each_entry_reverse(scan, layers, lo_linkage) {
-		if (scan->lo_ops->loo_object_delete != NULL)
+		if (scan->lo_ops->loo_object_delete)
 			scan->lo_ops->loo_object_delete(env, scan);
 	}
 
@@ -296,7 +295,6 @@
 		 */
 		o = container_of0(splice.prev, struct lu_object, lo_linkage);
 		list_del_init(&o->lo_linkage);
-		LASSERT(o->lo_ops->loo_object_free != NULL);
 		o->lo_ops->loo_object_free(env, o);
 	}
 
@@ -451,7 +449,6 @@
 	va_start(args, format);
 
 	key = lu_context_key_get(&env->le_ctx, &lu_global_key);
-	LASSERT(key != NULL);
 
 	used = strlen(key->lck_area);
 	complete = format[strlen(format) - 1] == '\n';
@@ -508,7 +505,7 @@
 		(*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
 			   o->lo_dev->ld_type->ldt_name, o);
 
-		if (o->lo_ops->loo_object_print != NULL)
+		if (o->lo_ops->loo_object_print)
 			(*o->lo_ops->loo_object_print)(env, cookie, printer, o);
 
 		(*printer)(env, cookie, "\n");
@@ -535,9 +532,10 @@
 	*version = ver;
 	bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
 	/* cfs_hash_bd_peek_locked is a somehow "internal" function
-	 * of cfs_hash, it doesn't add refcount on object. */
+	 * of cfs_hash, it doesn't add refcount on object.
+	 */
 	hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
-	if (hnode == NULL) {
+	if (!hnode) {
 		lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
 		return ERR_PTR(-ENOENT);
 	}
@@ -636,7 +634,7 @@
 	 * If dying object is found during index search, add @waiter to the
 	 * site wait-queue and return ERR_PTR(-EAGAIN).
 	 */
-	if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+	if (conf && conf->loc_flags & LOC_F_NEW)
 		return lu_object_new(env, dev, f, conf);
 
 	s  = dev->ld_site;
@@ -715,7 +713,7 @@
 	top = lu_object_find(env, dev, f, conf);
 	if (!IS_ERR(top)) {
 		obj = lu_object_locate(top->lo_header, dev->ld_type);
-		if (obj == NULL)
+		if (!obj)
 			lu_object_put(env, top);
 	} else
 		obj = top;
@@ -966,11 +964,11 @@
 						 CFS_HASH_NO_ITEMREF |
 						 CFS_HASH_DEPTH |
 						 CFS_HASH_ASSERT_EMPTY);
-		if (s->ls_obj_hash != NULL)
+		if (s->ls_obj_hash)
 			break;
 	}
 
-	if (s->ls_obj_hash == NULL) {
+	if (!s->ls_obj_hash) {
 		CERROR("failed to create lu_site hash with bits: %d\n", bits);
 		return -ENOMEM;
 	}
@@ -982,7 +980,7 @@
 	}
 
 	s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
-	if (s->ls_stats == NULL) {
+	if (!s->ls_stats) {
 		cfs_hash_putref(s->ls_obj_hash);
 		s->ls_obj_hash = NULL;
 		return -ENOMEM;
@@ -1031,19 +1029,19 @@
 	list_del_init(&s->ls_linkage);
 	mutex_unlock(&lu_sites_guard);
 
-	if (s->ls_obj_hash != NULL) {
+	if (s->ls_obj_hash) {
 		cfs_hash_putref(s->ls_obj_hash);
 		s->ls_obj_hash = NULL;
 	}
 
-	if (s->ls_top_dev != NULL) {
+	if (s->ls_top_dev) {
 		s->ls_top_dev->ld_site = NULL;
 		lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
 		lu_device_put(s->ls_top_dev);
 		s->ls_top_dev = NULL;
 	}
 
-	if (s->ls_stats != NULL)
+	if (s->ls_stats)
 		lprocfs_free_stats(&s->ls_stats);
 }
 EXPORT_SYMBOL(lu_site_fini);
@@ -1088,7 +1086,7 @@
  */
 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
 {
-	if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
+	if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start)
 		t->ldt_ops->ldto_start(t);
 	memset(d, 0, sizeof(*d));
 	atomic_set(&d->ld_ref, 0);
@@ -1107,7 +1105,7 @@
 	struct lu_device_type *t;
 
 	t = d->ld_type;
-	if (d->ld_obd != NULL) {
+	if (d->ld_obd) {
 		d->ld_obd->obd_lu_dev = NULL;
 		d->ld_obd = NULL;
 	}
@@ -1116,7 +1114,7 @@
 	LASSERTF(atomic_read(&d->ld_ref) == 0,
 		 "Refcount is %u\n", atomic_read(&d->ld_ref));
 	LASSERT(t->ldt_device_nr > 0);
-	if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
+	if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop)
 		t->ldt_ops->ldto_stop(t);
 }
 EXPORT_SYMBOL(lu_device_fini);
@@ -1148,7 +1146,7 @@
 
 	LASSERT(list_empty(&o->lo_linkage));
 
-	if (dev != NULL) {
+	if (dev) {
 		lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
 			      "lu_object", o);
 		lu_device_put(dev);
@@ -1239,7 +1237,7 @@
 	struct lu_device *next;
 
 	lu_site_purge(env, site, ~0);
-	for (scan = top; scan != NULL; scan = next) {
+	for (scan = top; scan; scan = next) {
 		next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
 		lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
 		lu_device_put(scan);
@@ -1248,13 +1246,13 @@
 	/* purge again. */
 	lu_site_purge(env, site, ~0);
 
-	for (scan = top; scan != NULL; scan = next) {
+	for (scan = top; scan; scan = next) {
 		const struct lu_device_type *ldt = scan->ld_type;
 		struct obd_type	     *type;
 
 		next = ldt->ldt_ops->ldto_device_free(env, scan);
 		type = ldt->ldt_obd_type;
-		if (type != NULL) {
+		if (type) {
 			type->typ_refcnt--;
 			class_put_type(type);
 		}
@@ -1289,14 +1287,14 @@
 	int result;
 	int i;
 
-	LASSERT(key->lct_init != NULL);
-	LASSERT(key->lct_fini != NULL);
+	LASSERT(key->lct_init);
+	LASSERT(key->lct_fini);
 	LASSERT(key->lct_tags != 0);
 
 	result = -ENFILE;
 	spin_lock(&lu_keys_guard);
 	for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
-		if (lu_keys[i] == NULL) {
+		if (!lu_keys[i]) {
 			key->lct_index = i;
 			atomic_set(&key->lct_used, 1);
 			lu_keys[i] = key;
@@ -1313,12 +1311,10 @@
 
 static void key_fini(struct lu_context *ctx, int index)
 {
-	if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
+	if (ctx->lc_value && ctx->lc_value[index]) {
 		struct lu_context_key *key;
 
 		key = lu_keys[index];
-		LASSERT(key != NULL);
-		LASSERT(key->lct_fini != NULL);
 		LASSERT(atomic_read(&key->lct_used) > 1);
 
 		key->lct_fini(ctx, key, ctx->lc_value[index]);
@@ -1376,7 +1372,7 @@
 		if (result)
 			break;
 		key = va_arg(args, struct lu_context_key *);
-	} while (key != NULL);
+	} while (key);
 	va_end(args);
 
 	if (result != 0) {
@@ -1404,7 +1400,7 @@
 	do {
 		lu_context_key_degister(k);
 		k = va_arg(args, struct lu_context_key*);
-	} while (k != NULL);
+	} while (k);
 	va_end(args);
 }
 EXPORT_SYMBOL(lu_context_key_degister_many);
@@ -1420,7 +1416,7 @@
 	do {
 		lu_context_key_revive(k);
 		k = va_arg(args, struct lu_context_key*);
-	} while (k != NULL);
+	} while (k);
 	va_end(args);
 }
 EXPORT_SYMBOL(lu_context_key_revive_many);
@@ -1436,7 +1432,7 @@
 	do {
 		lu_context_key_quiesce(k);
 		k = va_arg(args, struct lu_context_key*);
-	} while (k != NULL);
+	} while (k);
 	va_end(args);
 }
 EXPORT_SYMBOL(lu_context_key_quiesce_many);
@@ -1497,7 +1493,7 @@
 {
 	int	i;
 
-	if (ctx->lc_value == NULL)
+	if (!ctx->lc_value)
 		return;
 
 	for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
@@ -1511,12 +1507,12 @@
 {
 	int i;
 
-	LINVRNT(ctx->lc_value != NULL);
+	LINVRNT(ctx->lc_value);
 	for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
 		struct lu_context_key *key;
 
 		key = lu_keys[i];
-		if (ctx->lc_value[i] == NULL && key != NULL &&
+		if (!ctx->lc_value[i] && key &&
 		    (key->lct_tags & ctx->lc_tags) &&
 		    /*
 		     * Don't create values for a LCT_QUIESCENT key, as this
@@ -1525,7 +1521,7 @@
 		    !(key->lct_tags & LCT_QUIESCENT)) {
 			void *value;
 
-			LINVRNT(key->lct_init != NULL);
+			LINVRNT(key->lct_init);
 			LINVRNT(key->lct_index == i);
 
 			value = key->lct_init(ctx, key);
@@ -1542,7 +1538,7 @@
 			 * value.
 			 */
 			ctx->lc_value[i] = value;
-			if (key->lct_exit != NULL)
+			if (key->lct_exit)
 				ctx->lc_tags |= LCT_HAS_EXIT;
 		}
 		ctx->lc_version = key_set_version;
@@ -1554,7 +1550,7 @@
 {
 	ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]),
 				GFP_NOFS);
-	if (likely(ctx->lc_value != NULL))
+	if (likely(ctx->lc_value))
 		return keys_fill(ctx);
 
 	return -ENOMEM;
@@ -1626,14 +1622,13 @@
 
 	LINVRNT(ctx->lc_state == LCS_ENTERED);
 	ctx->lc_state = LCS_LEFT;
-	if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
+	if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
 		for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
-			if (ctx->lc_value[i] != NULL) {
+			if (ctx->lc_value[i]) {
 				struct lu_context_key *key;
 
 				key = lu_keys[i];
-				LASSERT(key != NULL);
-				if (key->lct_exit != NULL)
+				if (key->lct_exit)
 					key->lct_exit(ctx,
 						      key, ctx->lc_value[i]);
 			}
@@ -1688,7 +1683,7 @@
 	int result;
 
 	result = lu_context_refill(&env->le_ctx);
-	if (result == 0 && env->le_ses != NULL)
+	if (result == 0 && env->le_ses)
 		result = lu_context_refill(env->le_ses);
 	return result;
 }
@@ -1922,11 +1917,11 @@
 	int result;
 	struct lu_kmem_descr *iter = caches;
 
-	for (result = 0; iter->ckd_cache != NULL; ++iter) {
+	for (result = 0; iter->ckd_cache; ++iter) {
 		*iter->ckd_cache = kmem_cache_create(iter->ckd_name,
 							iter->ckd_size,
 							0, 0, NULL);
-		if (*iter->ckd_cache == NULL) {
+		if (!*iter->ckd_cache) {
 			result = -ENOMEM;
 			/* free all previously allocated caches */
 			lu_kmem_fini(caches);
@@ -1943,7 +1938,7 @@
  */
 void lu_kmem_fini(struct lu_kmem_descr *caches)
 {
-	for (; caches->ckd_cache != NULL; ++caches) {
+	for (; caches->ckd_cache; ++caches) {
 		kmem_cache_destroy(*caches->ckd_cache);
 		*caches->ckd_cache = NULL;
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index fb9147c..403ceea 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -65,7 +65,7 @@
 {
 	struct handle_bucket *bucket;
 
-	LASSERT(h != NULL);
+	LASSERT(h);
 	LASSERT(list_empty(&h->h_link));
 
 	/*
@@ -140,10 +140,11 @@
 	struct portals_handle *h;
 	void *retval = NULL;
 
-	LASSERT(handle_hash != NULL);
+	LASSERT(handle_hash);
 
 	/* Be careful when you want to change this code. See the
-	 * rcu_read_lock() definition on top this file. - jxiong */
+	 * rcu_read_lock() definition on top this file. - jxiong
+	 */
 	bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
 
 	rcu_read_lock();
@@ -170,7 +171,7 @@
 	struct portals_handle *h = RCU2HANDLE(rcu);
 	void *ptr = (void *)(unsigned long)h->h_cookie;
 
-	if (h->h_ops->hop_free != NULL)
+	if (h->h_ops->hop_free)
 		h->h_ops->hop_free(ptr, h->h_size);
 	else
 		kfree(ptr);
@@ -183,11 +184,11 @@
 	struct timespec64 ts;
 	int seed[2];
 
-	LASSERT(handle_hash == NULL);
+	LASSERT(!handle_hash);
 
 	handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE,
 				      GFP_NOFS);
-	if (handle_hash == NULL)
+	if (!handle_hash)
 		return -ENOMEM;
 
 	spin_lock_init(&handle_base_lock);
@@ -234,7 +235,7 @@
 {
 	int count;
 
-	LASSERT(handle_hash != NULL);
+	LASSERT(handle_hash);
 
 	count = cleanup_all_handles();
 
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index d6184f8..b10ea31 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -93,7 +93,8 @@
 EXPORT_SYMBOL(lustre_uuid_to_peer);
 
 /* Add a nid to a niduuid.  Multiple nids can be added to a single uuid;
-   LNET will choose the best one. */
+ * LNET will choose the best one.
+ */
 int class_add_uuid(const char *uuid, __u64 nid)
 {
 	struct uuid_nid_data *data, *entry;
@@ -151,7 +152,7 @@
 	struct uuid_nid_data *data;
 
 	spin_lock(&g_uuid_lock);
-	if (uuid != NULL) {
+	if (uuid) {
 		struct obd_uuid tmp;
 
 		obd_str2uuid(&tmp, uuid);
@@ -165,7 +166,7 @@
 		list_splice_init(&g_uuid_list, &deathrow);
 	spin_unlock(&g_uuid_lock);
 
-	if (uuid != NULL && list_empty(&deathrow)) {
+	if (uuid && list_empty(&deathrow)) {
 		CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
 		return -EINVAL;
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 49cdc64..2134b60 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -71,8 +71,9 @@
 EXPORT_SYMBOL(class_find_param);
 
 /* returns 0 if this is the first key in the buffer, else 1.
-   valp points to first char after key. */
-static int class_match_param(char *buf, char *key, char **valp)
+ * valp points to first char after key.
+ */
+static int class_match_param(char *buf, const char *key, char **valp)
 {
 	if (!buf)
 		return 1;
@@ -114,9 +115,10 @@
 };
 
 /* 0 is good nid,
-   1 not found
-   < 0 error
-   endh is set to next separator */
+ * 1 not found
+ * < 0 error
+ * endh is set to next separator
+ */
 static int class_parse_value(char *buf, int opc, void *value, char **endh,
 			     int quiet)
 {
@@ -210,7 +212,7 @@
 		       name, typename, rc);
 		goto out;
 	}
-	LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n",
+	LASSERTF(obd, "Cannot get obd device %s of type %s\n",
 		 name, typename);
 	LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
 		 "obd %p obd_magic %08X != %08X\n",
@@ -230,7 +232,8 @@
 	mutex_init(&obd->obd_dev_mutex);
 	spin_lock_init(&obd->obd_osfs_lock);
 	/* obd->obd_osfs_age must be set to a value in the distant
-	 * past to guarantee a fresh statfs is fetched on mount. */
+	 * past to guarantee a fresh statfs is fetched on mount.
+	 */
 	obd->obd_osfs_age = cfs_time_shift_64(-1000);
 
 	/* XXX belongs in setup not attach  */
@@ -272,9 +275,9 @@
 	       obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
 	return 0;
  out:
-	if (obd != NULL) {
+	if (obd)
 		class_release_dev(obd);
-	}
+
 	return rc;
 }
 
@@ -286,7 +289,7 @@
 	int err = 0;
 	struct obd_export *exp;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	LASSERTF(obd == class_num2obd(obd->obd_minor),
 		 "obd %p != obd_devs[%d] %p\n",
 		 obd, obd->obd_minor, class_num2obd(obd->obd_minor));
@@ -315,7 +318,8 @@
 		return -EEXIST;
 	}
 	/* just leave this on forever.  I can't use obd_set_up here because
-	   other fns check that status, and we're not actually set up yet. */
+	 * other fns check that status, and we're not actually set up yet.
+	 */
 	obd->obd_starting = 1;
 	obd->obd_uuid_hash = NULL;
 	spin_unlock(&obd->obd_dev_lock);
@@ -503,7 +507,8 @@
 
 	if ((refs == 1) && obd->obd_stopping) {
 		/* All exports have been destroyed; there should
-		   be no more in-progress ops by this point.*/
+		 * be no more in-progress ops by this point.
+		 */
 
 		spin_lock(&obd->obd_self_export->exp_lock);
 		obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
@@ -723,7 +728,8 @@
 }
 
 /* We can't call ll_process_config or lquota_process_config directly because
- * it lives in a module that must be loaded after this one. */
+ * it lives in a module that must be loaded after this one.
+ */
 static int (*client_process_config)(struct lustre_cfg *lcfg);
 static int (*quota_process_config)(struct lustre_cfg *lcfg);
 
@@ -812,7 +818,8 @@
 		       lustre_cfg_string(lcfg, 2),
 		       lustre_cfg_string(lcfg, 3));
 		/* set these mount options somewhere, so ll_fill_super
-		 * can find them. */
+		 * can find them.
+		 */
 		err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1),
 					lustre_cfg_string(lcfg, 1),
 					LUSTRE_CFG_BUFLEN(lcfg, 2),
@@ -988,8 +995,9 @@
 	fakefile.private_data = &fake_seqfile;
 	fake_seqfile.private = data;
 	/* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt
-	   or   lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
-	   or   lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */
+	 * or   lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
+	 * or   lctl conf_param lustre-OST0000.osc.max_dirty_mb=36
+	 */
 	for (i = 1; i < lcfg->lcfg_bufcount; i++) {
 		key = lustre_cfg_buf(lcfg, i);
 		/* Strip off prefix */
@@ -1008,7 +1016,7 @@
 		/* Search proc entries */
 		while (lvars[j].name) {
 			var = &lvars[j];
-			if (class_match_param(key, (char *)var->name, NULL) == 0
+			if (!class_match_param(key, var->name, NULL)
 			    && keylen == strlen(var->name)) {
 				matched++;
 				rc = -EROFS;
@@ -1027,9 +1035,10 @@
 		}
 		if (!matched) {
 			/* If the prefix doesn't match, return error so we
-			   can pass it down the stack */
+			 * can pass it down the stack
+			 */
 			if (strnchr(key, keylen, '.'))
-			    return -ENOSYS;
+				return -ENOSYS;
 			CERROR("%s: unknown param %s\n",
 			       (char *)lustre_cfg_string(lcfg, 0), key);
 			/* rc = -EINVAL;	continue parsing other params */
@@ -1116,7 +1125,8 @@
 			}
 		}
 		/* A config command without a start marker before it is
-		   illegal (post 146) */
+		 * illegal (post 146)
+		 */
 		if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
 		    !(clli->cfg_flags & CFG_F_MARKER) &&
 		    (lcfg->lcfg_command != LCFG_MARKER)) {
@@ -1182,8 +1192,9 @@
 		}
 
 		/* we override the llog's uuid for clients, to insure they
-		are unique */
-		if (clli && clli->cfg_instance != NULL &&
+		 * are unique
+		 */
+		if (clli && clli->cfg_instance &&
 		    lcfg->lcfg_command == LCFG_ATTACH) {
 			lustre_cfg_bufs_set_string(&bufs, 2,
 						   clli->cfg_uuid.uuid);
@@ -1211,7 +1222,8 @@
 		lcfg_new->lcfg_flags = lcfg->lcfg_flags;
 
 		/* XXX Hack to try to remain binary compatible with
-		 * pre-newconfig logs */
+		 * pre-newconfig logs
+		 */
 		if (lcfg->lcfg_nal != 0 &&      /* pre-newconfig log? */
 		    (lcfg->lcfg_nid >> 32) == 0) {
 			__u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff);
@@ -1270,7 +1282,7 @@
 	if (cfg) {
 		cd.lpcd_first_idx = cfg->cfg_last_idx;
 		callback = cfg->cfg_callback;
-		LASSERT(callback != NULL);
+		LASSERT(callback);
 	} else {
 		callback = class_config_llog_handler;
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index b5aa816..d138e05 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -283,9 +283,10 @@
 		recov_bk = 0;
 
 		/* Try all connections, but only once (again).
-		   We don't want to block another target from starting
-		   (using its local copy of the log), but we do want to connect
-		   if at all possible. */
+		 * We don't want to block another target from starting
+		 * (using its local copy of the log), but we do want to connect
+		 * if at all possible.
+		 */
 		recov_bk++;
 		CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,
 		       recov_bk);
@@ -375,7 +376,8 @@
 		goto out_free;
 
 	/* Keep a refcount of servers/clients who started with "mount",
-	   so we know when we can get rid of the mgc. */
+	 * so we know when we can get rid of the mgc.
+	 */
 	atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
 
 	/* We connect to the MGS at setup, and don't disconnect until cleanup */
@@ -403,7 +405,8 @@
 
 out:
 	/* Keep the mgc info in the sb. Note that many lsi's can point
-	   to the same mgc.*/
+	 * to the same mgc.
+	 */
 	lsi->lsi_mgc = obd;
 out_free:
 	mutex_unlock(&mgc_start_lock);
@@ -432,7 +435,8 @@
 	LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
 	if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
 		/* This is not fatal, every client that stops
-		   will call in here. */
+		 * will call in here.
+		 */
 		CDEBUG(D_MOUNT, "mgc still has %d references.\n",
 		       atomic_read(&obd->u.cli.cl_mgc_refcount));
 		rc = -EBUSY;
@@ -440,19 +444,20 @@
 	}
 
 	/* The MGC has no recoverable data in any case.
-	 * force shutdown set in umount_begin */
+	 * force shutdown set in umount_begin
+	 */
 	obd->obd_no_recov = 1;
 
 	if (obd->u.cli.cl_mgc_mgsexp) {
 		/* An error is not fatal, if we are unable to send the
-		   disconnect mgs ping evictor cleans up the export */
+		 * disconnect mgs ping evictor cleans up the export
+		 */
 		rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp);
 		if (rc)
 			CDEBUG(D_MOUNT, "disconnect failed %d\n", rc);
 	}
 
-	/* Save the obdname for cleaning the nid uuids, which are
-	   obdname_XX */
+	/* Save the obdname for cleaning the nid uuids, which are obdname_XX */
 	len = strlen(obd->obd_name) + 6;
 	niduuid = kzalloc(len, GFP_NOFS);
 	if (niduuid) {
@@ -518,13 +523,12 @@
 {
 	struct lustre_sb_info *lsi = s2lsi(sb);
 
-	LASSERT(lsi != NULL);
 	CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
 
 	/* someone didn't call server_put_mount. */
 	LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
 
-	if (lsi->lsi_lmd != NULL) {
+	if (lsi->lsi_lmd) {
 		kfree(lsi->lsi_lmd->lmd_dev);
 		kfree(lsi->lsi_lmd->lmd_profile);
 		kfree(lsi->lsi_lmd->lmd_mgssec);
@@ -538,7 +542,7 @@
 		kfree(lsi->lsi_lmd);
 	}
 
-	LASSERT(lsi->lsi_llsbi == NULL);
+	LASSERT(!lsi->lsi_llsbi);
 	kfree(lsi);
 	s2lsi_nocast(sb) = NULL;
 
@@ -546,13 +550,12 @@
 }
 
 /* The lsi has one reference for every server that is using the disk -
-   e.g. MDT, MGS, and potentially MGC */
+ * e.g. MDT, MGS, and potentially MGC
+ */
 static int lustre_put_lsi(struct super_block *sb)
 {
 	struct lustre_sb_info *lsi = s2lsi(sb);
 
-	LASSERT(lsi != NULL);
-
 	CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
 	if (atomic_dec_and_test(&lsi->lsi_mounts)) {
 		lustre_free_lsi(sb);
@@ -588,21 +591,22 @@
 	if (dash == svname)
 		return -EINVAL;
 
-	if (fsname != NULL) {
+	if (fsname) {
 		strncpy(fsname, svname, dash - svname);
 		fsname[dash - svname] = '\0';
 	}
 
-	if (endptr != NULL)
+	if (endptr)
 		*endptr = dash;
 
 	return 0;
 }
 
 /* Get the index from the obd name.
-   rc = server type, or
-   rc < 0  on error
-   if endptr isn't NULL it is set to end of name */
+ *  rc = server type, or
+ * rc < 0  on error
+ * if endptr isn't NULL it is set to end of name
+ */
 static int server_name2index(const char *svname, __u32 *idx,
 			     const char **endptr)
 {
@@ -627,18 +631,18 @@
 	dash += 3;
 
 	if (strncmp(dash, "all", 3) == 0) {
-		if (endptr != NULL)
+		if (endptr)
 			*endptr = dash + 3;
 		return rc | LDD_F_SV_ALL;
 	}
 
 	index = simple_strtoul(dash, (char **)endptr, 16);
-	if (idx != NULL)
+	if (idx)
 		*idx = index;
 
 	/* Account for -mdc after index that is possible when specifying mdt */
-	if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
-				      sizeof(LUSTRE_MDC_NAME)-1) == 0)
+	if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
+			      sizeof(LUSTRE_MDC_NAME) - 1) == 0)
 		*endptr += sizeof(LUSTRE_MDC_NAME);
 
 	return rc;
@@ -661,7 +665,8 @@
 			return rc;
 		}
 		/* BUSY just means that there's some other obd that
-		   needs the mgc.  Let him clean it up. */
+		 * needs the mgc.  Let him clean it up.
+		 */
 		CDEBUG(D_MOUNT, "MGC still in use\n");
 	}
 	/* Drop a ref to the mounted disk */
@@ -731,8 +736,9 @@
 	int rc = 0, devmax;
 
 	/* The shortest an ost name can be is 8 chars: -OST0000.
-	   We don't actually know the fsname at this time, so in fact
-	   a user could specify any fsname. */
+	 * We don't actually know the fsname at this time, so in fact
+	 * a user could specify any fsname.
+	 */
 	devmax = strlen(ptr) / 8 + 1;
 
 	/* temp storage until we figure out how many we have */
@@ -756,7 +762,8 @@
 			       (uint)(s2-s1), s1, rc);
 		s1 = s2;
 		/* now we are pointing at ':' (next exclude)
-		   or ',' (end of excludes) */
+		 * or ',' (end of excludes)
+		 */
 		if (lmd->lmd_exclude_count >= devmax)
 			break;
 	}
@@ -788,7 +795,7 @@
 	lmd->lmd_mgssec = NULL;
 
 	tail = strchr(ptr, ',');
-	if (tail == NULL)
+	if (!tail)
 		length = strlen(ptr);
 	else
 		length = tail - ptr;
@@ -807,14 +814,14 @@
 	char   *tail;
 	int     length;
 
-	if ((handle == NULL) || (ptr == NULL))
+	if (!handle || !ptr)
 		return -EINVAL;
 
 	kfree(*handle);
 	*handle = NULL;
 
 	tail = strchr(ptr, ',');
-	if (tail == NULL)
+	if (!tail)
 		length = strlen(ptr);
 	else
 		length = tail - ptr;
@@ -847,14 +854,14 @@
 		return -EINVAL;
 	}
 
-	if (lmd->lmd_mgs != NULL)
+	if (lmd->lmd_mgs)
 		oldlen = strlen(lmd->lmd_mgs) + 1;
 
 	mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
 	if (!mgsnid)
 		return -ENOMEM;
 
-	if (lmd->lmd_mgs != NULL) {
+	if (lmd->lmd_mgs) {
 		/* Multiple mgsnid= are taken to mean failover locations */
 		memcpy(mgsnid, lmd->lmd_mgs, oldlen);
 		mgsnid[oldlen - 1] = ':';
@@ -909,10 +916,12 @@
 			s1++;
 
 		/* Client options are parsed in ll_options: eg. flock,
-		   user_xattr, acl */
+		 * user_xattr, acl
+		 */
 
 		/* Parse non-ldiskfs options here. Rather than modifying
-		   ldiskfs, we just zero these out here */
+		 * ldiskfs, we just zero these out here
+		 */
 		if (strncmp(s1, "abort_recov", 11) == 0) {
 			lmd->lmd_flags |= LMD_FLG_ABORT_RECOV;
 			clear++;
@@ -940,7 +949,8 @@
 				   sizeof(PARAM_MGSNODE) - 1) == 0) {
 			s2 = s1 + sizeof(PARAM_MGSNODE) - 1;
 			/* Assume the next mount opt is the first
-			   invalid nid we get to. */
+			 * invalid nid we get to.
+			 */
 			rc = lmd_parse_mgs(lmd, &s2);
 			if (rc)
 				goto invalid;
@@ -981,7 +991,7 @@
 			size_t length, params_length;
 			char *tail = strchr(s1 + 6, ',');
 
-			if (tail == NULL)
+			if (!tail)
 				length = strlen(s1);
 			else
 				length = tail - s1;
@@ -1000,18 +1010,20 @@
 			clear++;
 		}
 		/* Linux 2.4 doesn't pass the device, so we stuck it at the
-		   end of the options. */
+		 * end of the options.
+		 */
 		else if (strncmp(s1, "device=", 7) == 0) {
 			devname = s1 + 7;
 			/* terminate options right before device.  device
-			   must be the last one. */
+			 * must be the last one.
+			 */
 			*s1 = '\0';
 			break;
 		}
 
 		/* Find next opt */
 		s2 = strchr(s1, ',');
-		if (s2 == NULL) {
+		if (!s2) {
 			if (clear)
 				*s1 = '\0';
 			break;
@@ -1113,9 +1125,9 @@
 
 	if (lmd_is_client(lmd)) {
 		CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
-		if (client_fill_super == NULL)
+		if (!client_fill_super)
 			request_module("lustre");
-		if (client_fill_super == NULL) {
+		if (!client_fill_super) {
 			LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
 			lustre_put_lsi(sb);
 			rc = -ENODEV;
@@ -1136,7 +1148,8 @@
 	}
 
 	/* If error happens in fill_super() call, @lsi will be killed there.
-	 * This is why we do not put it here. */
+	 * This is why we do not put it here.
+	 */
 	goto out;
 out:
 	if (rc) {
@@ -1151,7 +1164,8 @@
 }
 
 /* We can't call ll_fill_super by name because it lives in a module that
-   must be loaded after this one. */
+ * must be loaded after this one.
+ */
 void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
 						  struct vfsmount *mnt))
 {
@@ -1166,7 +1180,7 @@
 EXPORT_SYMBOL(lustre_register_kill_super_cb);
 
 /***************** FS registration ******************/
-struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
+static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
 				const char *devname, void *data)
 {
 	struct lustre_mount_data2 lmd2 = {
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 75e1dea..e6436cb 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -55,7 +55,8 @@
 EXPORT_SYMBOL(obdo_set_parent_fid);
 
 /* WARNING: the file systems must take care not to tinker with
-   attributes they don't manage (such as blocks). */
+ * attributes they don't manage (such as blocks).
+ */
 void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
 {
 	u32 newvalid = 0;
@@ -122,7 +123,8 @@
 		ostid_set_seq_mdt0(&ioobj->ioo_oid);
 
 	/* Since 2.4 this does not contain o_mode in the low 16 bits.
-	 * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */
+	 * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs
+	 */
 	ioobj->ioo_max_brw = 0;
 }
 EXPORT_SYMBOL(obdo_to_ioobj);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 7b53f7d..3be28c5 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -60,7 +60,6 @@
 	struct cl_site	  ed_site_myself;
 	struct cl_site	 *ed_site;
 	struct lu_device       *ed_next;
-	int		     ed_next_islov;
 };
 
 struct echo_object {
@@ -147,7 +146,7 @@
 	struct echo_thread_info *info;
 
 	info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
-	LASSERT(info != NULL);
+	LASSERT(info);
 	return info;
 }
 
@@ -162,9 +161,6 @@
 static struct echo_object *cl_echo_object_find(struct echo_device *d,
 					       struct lov_stripe_md **lsm);
 static int cl_echo_object_put(struct echo_object *eco);
-static int cl_echo_enqueue(struct echo_object *eco, u64 start,
-			   u64 end, int mode, __u64 *cookie);
-static int cl_echo_cancel(struct echo_device *d, __u64 cookie);
 static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
 			      struct page **pages, int npages, int async);
 
@@ -271,7 +267,7 @@
 				 const struct cl_page_slice *slice,
 				 int ioret)
 {
-	LASSERT(slice->cpl_page->cp_sync_io != NULL);
+	LASSERT(slice->cpl_page->cp_sync_io);
 }
 
 static void echo_page_fini(const struct lu_env *env,
@@ -397,13 +393,13 @@
 	struct echo_lock *el;
 
 	el = kmem_cache_alloc(echo_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (el != NULL) {
+	if (el) {
 		cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
 		el->el_object = cl2echo_obj(obj);
 		INIT_LIST_HEAD(&el->el_chain);
 		atomic_set(&el->el_refcount, 0);
 	}
-	return el == NULL ? -ENOMEM : 0;
+	return !el ? -ENOMEM : 0;
 }
 
 static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -443,7 +439,7 @@
 		under = ed->ed_next;
 		below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
 							under);
-		if (below == NULL)
+		if (!below)
 			return -ENOMEM;
 		lu_object_add(obj, below);
 	}
@@ -474,12 +470,12 @@
 	int lsm_size;
 
 	/* If export is lov/osc then use their obd method */
-	if (ed->ed_next != NULL)
+	if (ed->ed_next)
 		return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
 	/* OFD has no unpackmd method, do everything here */
 	lsm_size = lov_stripe_md_size(1);
 
-	LASSERT(*lsmp == NULL);
+	LASSERT(!*lsmp);
 	*lsmp = kzalloc(lsm_size, GFP_NOFS);
 	if (!*lsmp)
 		return -ENOMEM;
@@ -502,12 +498,11 @@
 	int lsm_size;
 
 	/* If export is lov/osc then use their obd method */
-	if (ed->ed_next != NULL)
+	if (ed->ed_next)
 		return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
 	/* OFD has no unpackmd method, do everything here */
 	lsm_size = lov_stripe_md_size(1);
 
-	LASSERT(*lsmp != NULL);
 	kfree((*lsmp)->lsm_oinfo[0]);
 	kfree(*lsmp);
 	*lsmp = NULL;
@@ -566,9 +561,9 @@
 	struct lu_object *obj = NULL;
 
 	/* we're the top dev. */
-	LASSERT(hdr == NULL);
+	LASSERT(!hdr);
 	eco = kmem_cache_alloc(echo_object_kmem, GFP_NOFS | __GFP_ZERO);
-	if (eco != NULL) {
+	if (eco) {
 		struct cl_object_header *hdr = &eco->eo_hdr;
 
 		obj = &echo_obj2cl(eco)->co_lu;
@@ -582,13 +577,13 @@
 	return obj;
 }
 
-static struct lu_device_operations echo_device_lu_ops = {
+static const struct lu_device_operations echo_device_lu_ops = {
 	.ldo_object_alloc   = echo_object_alloc,
 };
 
 /** @} echo_lu_dev_ops */
 
-static struct cl_device_operations echo_device_cl_ops = {
+static const struct cl_device_operations echo_device_cl_ops = {
 };
 
 /** \defgroup echo_init Setup and teardown
@@ -631,7 +626,7 @@
 	struct echo_thread_info *info;
 
 	info = kmem_cache_alloc(echo_thread_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info == NULL)
+	if (!info)
 		info = ERR_PTR(-ENOMEM);
 	return info;
 }
@@ -662,7 +657,7 @@
 	struct echo_session_info *session;
 
 	session = kmem_cache_alloc(echo_session_kmem, GFP_NOFS | __GFP_ZERO);
-	if (session == NULL)
+	if (!session)
 		session = ERR_PTR(-ENOMEM);
 	return session;
 }
@@ -719,11 +714,11 @@
 
 	cleanup = 2;
 	obd = class_name2obd(lustre_cfg_string(cfg, 0));
-	LASSERT(obd != NULL);
-	LASSERT(env != NULL);
+	LASSERT(obd);
+	LASSERT(env);
 
 	tgt = class_name2obd(lustre_cfg_string(cfg, 1));
-	if (tgt == NULL) {
+	if (!tgt) {
 		CERROR("Can not find tgt device %s\n",
 			lustre_cfg_string(cfg, 1));
 		rc = -ENODEV;
@@ -751,14 +746,14 @@
 	cleanup = 4;
 
 	/* if echo client is to be stacked upon ost device, the next is
-	 * NULL since ost is not a clio device so far */
-	if (next != NULL && !lu_device_is_cl(next))
+	 * NULL since ost is not a clio device so far
+	 */
+	if (next && !lu_device_is_cl(next))
 		next = NULL;
 
 	tgt_type_name = tgt->obd_type->typ_name;
-	if (next != NULL) {
-		LASSERT(next != NULL);
-		if (next->ld_site != NULL) {
+	if (next) {
+		if (next->ld_site) {
 			rc = -EBUSY;
 			goto out;
 		}
@@ -770,14 +765,6 @@
 		if (rc)
 			goto out;
 
-		/* Tricky case, I have to determine the obd type since
-		 * CLIO uses the different parameters to initialize
-		 * objects for lov & osc. */
-		if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
-			ed->ed_next_islov = 1;
-		else
-			LASSERT(strcmp(tgt_type_name,
-				       LUSTRE_OSC_NAME) == 0);
 	} else {
 		LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
 	}
@@ -963,20 +950,11 @@
 	info = echo_env_info(env);
 	conf = &info->eti_conf;
 	if (d->ed_next) {
-		if (!d->ed_next_islov) {
-			struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
+		struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
 
-			LASSERT(oinfo != NULL);
-			oinfo->loi_oi = lsm->lsm_oi;
-			conf->eoc_cl.u.coc_oinfo = oinfo;
-		} else {
-			struct lustre_md *md;
-
-			md = &info->eti_md;
-			memset(md, 0, sizeof(*md));
-			md->lsm = lsm;
-			conf->eoc_cl.u.coc_md = md;
-		}
+		LASSERT(oinfo);
+		oinfo->loi_oi = lsm->lsm_oi;
+		conf->eoc_cl.u.coc_oinfo = oinfo;
 	}
 	conf->eoc_md = lsmp;
 
@@ -988,7 +966,8 @@
 	}
 
 	/* In the function below, .hs_keycmp resolves to
-	 * lu_obj_hop_keycmp() */
+	 * lu_obj_hop_keycmp()
+	 */
 	/* coverity[overrun-buffer-val] */
 	obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
 	if (IS_ERR(obj)) {
@@ -1076,36 +1055,6 @@
 	return rc;
 }
 
-static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end,
-			   int mode, __u64 *cookie)
-{
-	struct echo_thread_info *info;
-	struct lu_env *env;
-	struct cl_io *io;
-	int refcheck;
-	int result;
-
-	env = cl_env_get(&refcheck);
-	if (IS_ERR(env))
-		return PTR_ERR(env);
-
-	info = echo_env_info(env);
-	io = &info->eti_io;
-
-	io->ci_ignore_layout = 1;
-	result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
-	if (result < 0)
-		goto out;
-	LASSERT(result == 0);
-
-	result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
-	cl_io_fini(env, io);
-
-out:
-	cl_env_put(env, &refcheck);
-	return result;
-}
-
 static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
 			   __u64 cookie)
 {
@@ -1114,7 +1063,6 @@
 	struct list_head	     *el;
 	int found = 0, still_used = 0;
 
-	LASSERT(ec != NULL);
 	spin_lock(&ec->ec_lock);
 	list_for_each(el, &ec->ec_locks) {
 		ecl = list_entry(el, struct echo_lock, el_chain);
@@ -1137,22 +1085,6 @@
 	return 0;
 }
 
-static int cl_echo_cancel(struct echo_device *ed, __u64 cookie)
-{
-	struct lu_env *env;
-	int refcheck;
-	int rc;
-
-	env = cl_env_get(&refcheck);
-	if (IS_ERR(env))
-		return PTR_ERR(env);
-
-	rc = cl_echo_cancel0(env, ed, cookie);
-
-	cl_env_put(env, &refcheck);
-	return rc;
-}
-
 static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
 			     enum cl_req_type unused, struct cl_2queue *queue)
 {
@@ -1188,7 +1120,7 @@
 	int i;
 
 	LASSERT((offset & ~CFS_PAGE_MASK) == 0);
-	LASSERT(ed->ed_next != NULL);
+	LASSERT(ed->ed_next);
 	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
 		return PTR_ERR(env);
@@ -1234,7 +1166,8 @@
 		cl_page_list_add(&queue->c2_qin, clp);
 
 		/* drop the reference count for cl_page_find, so that the page
-		 * will be freed in cl_2queue_fini. */
+		 * will be freed in cl_2queue_fini.
+		 */
 		cl_page_put(env, clp);
 		cl_page_clip(env, clp, 0, page_size);
 
@@ -1268,61 +1201,8 @@
 
 static u64 last_object_id;
 
-static int
-echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
-{
-	struct lov_stripe_md *ulsm = _ulsm;
-	struct lov_oinfo **p;
-	int nob, i;
-
-	nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
-	if (nob > ulsm_nob)
-		return -EINVAL;
-
-	if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
-		return -EFAULT;
-
-	for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
-		struct lov_oinfo __user *up;
-		if (get_user(up, ulsm->lsm_oinfo + i) ||
-		    copy_to_user(up, *p, sizeof(struct lov_oinfo)))
-			return -EFAULT;
-	}
-	return 0;
-}
-
-static int
-echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
-		struct lov_stripe_md __user *ulsm, int ulsm_nob)
-{
-	struct echo_client_obd *ec = ed->ed_ec;
-	struct lov_oinfo **p;
-	int		     i;
-
-	if (ulsm_nob < sizeof(*lsm))
-		return -EINVAL;
-
-	if (copy_from_user(lsm, ulsm, sizeof(*lsm)))
-		return -EFAULT;
-
-	if (lsm->lsm_stripe_count > ec->ec_nstripes ||
-	    lsm->lsm_magic != LOV_MAGIC ||
-	    (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 ||
-	    ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
-		return -EINVAL;
-
-	for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
-		struct lov_oinfo __user *up;
-		if (get_user(up, ulsm->lsm_oinfo + i) ||
-		    copy_from_user(*p, up, sizeof(struct lov_oinfo)))
-			return -EFAULT;
-	}
-	return 0;
-}
-
 static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
-			      int on_target, struct obdo *oa, void *ulsm,
-			      int ulsm_nob, struct obd_trans_info *oti)
+			      struct obdo *oa, struct obd_trans_info *oti)
 {
 	struct echo_object     *eco;
 	struct echo_client_obd *ec = ed->ed_ec;
@@ -1330,10 +1210,10 @@
 	int		     rc;
 	int		     created = 0;
 
-	if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
-	    (on_target ||		       /* set_stripe */
-	     ec->ec_nstripes != 0)) {	   /* LOV */
-		CERROR("No valid oid\n");
+	if (!(oa->o_valid & OBD_MD_FLID) ||
+	    !(oa->o_valid & OBD_MD_FLGROUP) ||
+	    !fid_seq_is_echo(ostid_seq(&oa->o_oi))) {
+		CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi));
 		return -EINVAL;
 	}
 
@@ -1343,52 +1223,18 @@
 		goto failed;
 	}
 
-	if (ulsm != NULL) {
-		int i, idx;
-
-		rc = echo_copyin_lsm(ed, lsm, ulsm, ulsm_nob);
-		if (rc != 0)
-			goto failed;
-
-		if (lsm->lsm_stripe_count == 0)
-			lsm->lsm_stripe_count = ec->ec_nstripes;
-
-		if (lsm->lsm_stripe_size == 0)
-			lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
-
-		idx = cfs_rand();
-
-		/* setup stripes: indices + default ids if required */
-		for (i = 0; i < lsm->lsm_stripe_count; i++) {
-			if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0)
-				lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi;
-
-			lsm->lsm_oinfo[i]->loi_ost_idx =
-				(idx + i) % ec->ec_nstripes;
-		}
-	}
-
-	/* setup object ID here for !on_target and LOV hint */
-	if (oa->o_valid & OBD_MD_FLID) {
-		LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-		lsm->lsm_oi = oa->o_oi;
-	}
+	/* setup object ID here */
+	lsm->lsm_oi = oa->o_oi;
 
 	if (ostid_id(&lsm->lsm_oi) == 0)
 		ostid_set_id(&lsm->lsm_oi, ++last_object_id);
 
-	rc = 0;
-	if (on_target) {
-		/* Only echo objects are allowed to be created */
-		LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
-			(ostid_seq(&oa->o_oi) == FID_SEQ_ECHO));
-		rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
-		if (rc != 0) {
-			CERROR("Cannot create objects: rc = %d\n", rc);
-			goto failed;
-		}
-		created = 1;
+	rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
+	if (rc != 0) {
+		CERROR("Cannot create objects: rc = %d\n", rc);
+		goto failed;
 	}
+	created = 1;
 
 	/* See what object ID we were given */
 	oa->o_oi = lsm->lsm_oi;
@@ -1452,37 +1298,7 @@
 }
 
 static void
-echo_get_stripe_off_id(struct lov_stripe_md *lsm, u64 *offp, u64 *idp)
-{
-	unsigned long stripe_count;
-	unsigned long stripe_size;
-	unsigned long width;
-	unsigned long woffset;
-	int	   stripe_index;
-	u64       offset;
-
-	if (lsm->lsm_stripe_count <= 1)
-		return;
-
-	offset       = *offp;
-	stripe_size  = lsm->lsm_stripe_size;
-	stripe_count = lsm->lsm_stripe_count;
-
-	/* width = # bytes in all stripes */
-	width = stripe_size * stripe_count;
-
-	/* woffset = offset within a width; offset = whole number of widths */
-	woffset = do_div(offset, width);
-
-	stripe_index = woffset / stripe_size;
-
-	*idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi);
-	*offp = offset * stripe_size + woffset % stripe_size;
-}
-
-static void
-echo_client_page_debug_setup(struct lov_stripe_md *lsm,
-			     struct page *page, int rw, u64 id,
+echo_client_page_debug_setup(struct page *page, int rw, u64 id,
 			     u64 offset, u64 count)
 {
 	char    *addr;
@@ -1499,7 +1315,6 @@
 		if (rw == OBD_BRW_WRITE) {
 			stripe_off = offset + delta;
 			stripe_id = id;
-			echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
 		} else {
 			stripe_off = 0xdeadbeef00c0ffeeULL;
 			stripe_id = 0xdeadbeef00c0ffeeULL;
@@ -1511,8 +1326,7 @@
 	kunmap(page);
 }
 
-static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
-					struct page *page, u64 id,
+static int echo_client_page_debug_check(struct page *page, u64 id,
 					u64 offset, u64 count)
 {
 	u64	stripe_off;
@@ -1530,7 +1344,6 @@
 	for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
 		stripe_off = offset + delta;
 		stripe_id = id;
-		echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
 
 		rc2 = block_debug_check("test_brw",
 					addr + delta, OBD_ECHO_BLOCK_SIZE,
@@ -1550,7 +1363,6 @@
 			    u64 count, int async,
 			    struct obd_trans_info *oti)
 {
-	struct lov_stripe_md   *lsm = eco->eo_lsm;
 	u32	       npages;
 	struct brw_page	*pga;
 	struct brw_page	*pgp;
@@ -1569,8 +1381,6 @@
 	gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
 
 	LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
-	LASSERT(lsm != NULL);
-	LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi));
 
 	if (count <= 0 ||
 	    (count & (~CFS_PAGE_MASK)) != 0)
@@ -1583,11 +1393,11 @@
 		brw_flags = OBD_BRW_ASYNC;
 
 	pga = kcalloc(npages, sizeof(*pga), GFP_NOFS);
-	if (pga == NULL)
+	if (!pga)
 		return -ENOMEM;
 
 	pages = kcalloc(npages, sizeof(*pages), GFP_NOFS);
-	if (pages == NULL) {
+	if (!pages) {
 		kfree(pga);
 		return -ENOMEM;
 	}
@@ -1596,11 +1406,11 @@
 	     i < npages;
 	     i++, pgp++, off += PAGE_CACHE_SIZE) {
 
-		LASSERT(pgp->pg == NULL);      /* for cleanup */
+		LASSERT(!pgp->pg);      /* for cleanup */
 
 		rc = -ENOMEM;
 		pgp->pg = alloc_page(gfp_mask);
-		if (pgp->pg == NULL)
+		if (!pgp->pg)
 			goto out;
 
 		pages[i] = pgp->pg;
@@ -1609,13 +1419,13 @@
 		pgp->flag = brw_flags;
 
 		if (verify)
-			echo_client_page_debug_setup(lsm, pgp->pg, rw,
+			echo_client_page_debug_setup(pgp->pg, rw,
 						     ostid_id(&oa->o_oi), off,
 						     pgp->count);
 	}
 
 	/* brw mode can only be used at client */
-	LASSERT(ed->ed_next != NULL);
+	LASSERT(ed->ed_next);
 	rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
 
  out:
@@ -1623,13 +1433,13 @@
 		verify = 0;
 
 	for (i = 0, pgp = pga; i < npages; i++, pgp++) {
-		if (pgp->pg == NULL)
+		if (!pgp->pg)
 			continue;
 
 		if (verify) {
 			int vrc;
 
-			vrc = echo_client_page_debug_check(lsm, pgp->pg,
+			vrc = echo_client_page_debug_check(pgp->pg,
 							   ostid_id(&oa->o_oi),
 							   pgp->off, pgp->count);
 			if (vrc != 0 && rc == 0)
@@ -1649,7 +1459,6 @@
 				   u64 batch, struct obd_trans_info *oti,
 				   int async)
 {
-	struct lov_stripe_md *lsm = eco->eo_lsm;
 	struct obd_ioobj ioo;
 	struct niobuf_local *lnb;
 	struct niobuf_remote *rnb;
@@ -1657,8 +1466,7 @@
 	u64 npages, tot_pages;
 	int i, ret = 0, brw_flags = 0;
 
-	if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
-	    (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
+	if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
 		return -EINVAL;
 
 	npages = batch >> PAGE_CACHE_SHIFT;
@@ -1667,7 +1475,7 @@
 	lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
 	rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
 
-	if (lnb == NULL || rnb == NULL) {
+	if (!lnb || !rnb) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -1705,7 +1513,7 @@
 			struct page *page = lnb[i].page;
 
 			/* read past eof? */
-			if (page == NULL && lnb[i].rc == 0)
+			if (!page  && lnb[i].rc == 0)
 				continue;
 
 			if (async)
@@ -1717,12 +1525,12 @@
 				continue;
 
 			if (rw == OBD_BRW_WRITE)
-				echo_client_page_debug_setup(lsm, page, rw,
+				echo_client_page_debug_setup(page, rw,
 							    ostid_id(&oa->o_oi),
 							     rnb[i].offset,
 							     rnb[i].len);
 			else
-				echo_client_page_debug_check(lsm, page,
+				echo_client_page_debug_check(page,
 							    ostid_id(&oa->o_oi),
 							     rnb[i].offset,
 							     rnb[i].len);
@@ -1774,7 +1582,7 @@
 	if (test_mode == 1)
 		async = 0;
 
-	if (ed->ed_next == NULL && test_mode != 3) {
+	if (!ed->ed_next && test_mode != 3) {
 		test_mode = 3;
 		data->ioc_plen1 = data->ioc_count;
 	}
@@ -1805,55 +1613,8 @@
 }
 
 static int
-echo_client_enqueue(struct obd_export *exp, struct obdo *oa,
-		    int mode, u64 offset, u64 nob)
-{
-	struct echo_device     *ed = obd2echo_dev(exp->exp_obd);
-	struct lustre_handle   *ulh = &oa->o_handle;
-	struct echo_object     *eco;
-	u64		 end;
-	int		     rc;
-
-	if (ed->ed_next == NULL)
-		return -EOPNOTSUPP;
-
-	if (!(mode == LCK_PR || mode == LCK_PW))
-		return -EINVAL;
-
-	if ((offset & (~CFS_PAGE_MASK)) != 0 ||
-	    (nob & (~CFS_PAGE_MASK)) != 0)
-		return -EINVAL;
-
-	rc = echo_get_object(&eco, ed, oa);
-	if (rc != 0)
-		return rc;
-
-	end = (nob == 0) ? ((u64) -1) : (offset + nob - 1);
-	rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie);
-	if (rc == 0) {
-		oa->o_valid |= OBD_MD_FLHANDLE;
-		CDEBUG(D_INFO, "Cookie is %#llx\n", ulh->cookie);
-	}
-	echo_put_object(eco);
-	return rc;
-}
-
-static int
-echo_client_cancel(struct obd_export *exp, struct obdo *oa)
-{
-	struct echo_device *ed     = obd2echo_dev(exp->exp_obd);
-	__u64	       cookie = oa->o_handle.cookie;
-
-	if ((oa->o_valid & OBD_MD_FLHANDLE) == 0)
-		return -EINVAL;
-
-	CDEBUG(D_INFO, "Cookie is %#llx\n", cookie);
-	return cl_echo_cancel(ed, cookie);
-}
-
-static int
 echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
-		      void *karg, void *uarg)
+		      void *karg, void __user *uarg)
 {
 	struct obd_device      *obd = exp->exp_obd;
 	struct echo_device     *ed = obd2echo_dev(obd);
@@ -1899,8 +1660,7 @@
 			goto out;
 		}
 
-		rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
-					data->ioc_plen1, &dummy_oti);
+		rc = echo_create_object(env, ed, oa, &dummy_oti);
 		goto out;
 
 	case OBD_IOC_DESTROY:
@@ -1911,7 +1671,7 @@
 
 		rc = echo_get_object(&eco, ed, oa);
 		if (rc == 0) {
-			rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm,
+			rc = obd_destroy(env, ec->ec_exp, oa, NULL,
 					 &dummy_oti, NULL);
 			if (rc == 0)
 				eco->eo_deleted = 1;
@@ -1922,10 +1682,10 @@
 	case OBD_IOC_GETATTR:
 		rc = echo_get_object(&eco, ed, oa);
 		if (rc == 0) {
-			struct obd_info oinfo = { };
+			struct obd_info oinfo = {
+				.oi_oa = oa,
+			};
 
-			oinfo.oi_md = eco->eo_lsm;
-			oinfo.oi_oa = oa;
 			rc = obd_getattr(env, ec->ec_exp, &oinfo);
 			echo_put_object(eco);
 		}
@@ -1939,10 +1699,9 @@
 
 		rc = echo_get_object(&eco, ed, oa);
 		if (rc == 0) {
-			struct obd_info oinfo = { };
-
-			oinfo.oi_oa = oa;
-			oinfo.oi_md = eco->eo_lsm;
+			struct obd_info oinfo = {
+				.oi_oa = oa,
+			};
 
 			rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
 			echo_put_object(eco);
@@ -1961,50 +1720,6 @@
 		rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
 		goto out;
 
-	case ECHO_IOC_GET_STRIPE:
-		rc = echo_get_object(&eco, ed, oa);
-		if (rc == 0) {
-			rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1,
-					      data->ioc_plen1);
-			echo_put_object(eco);
-		}
-		goto out;
-
-	case ECHO_IOC_SET_STRIPE:
-		if (!capable(CFS_CAP_SYS_ADMIN)) {
-			rc = -EPERM;
-			goto out;
-		}
-
-		if (data->ioc_pbuf1 == NULL) {  /* unset */
-			rc = echo_get_object(&eco, ed, oa);
-			if (rc == 0) {
-				eco->eo_deleted = 1;
-				echo_put_object(eco);
-			}
-		} else {
-			rc = echo_create_object(env, ed, 0, oa,
-						data->ioc_pbuf1,
-						data->ioc_plen1, &dummy_oti);
-		}
-		goto out;
-
-	case ECHO_IOC_ENQUEUE:
-		if (!capable(CFS_CAP_SYS_ADMIN)) {
-			rc = -EPERM;
-			goto out;
-		}
-
-		rc = echo_client_enqueue(exp, oa,
-					 data->ioc_conn1, /* lock mode */
-					 data->ioc_offset,
-					 data->ioc_count);/*extent*/
-		goto out;
-
-	case ECHO_IOC_CANCEL:
-		rc = echo_client_cancel(exp, oa);
-		goto out;
-
 	default:
 		CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
 		rc = -ENOTTY;
@@ -2051,7 +1766,6 @@
 	INIT_LIST_HEAD(&ec->ec_objects);
 	INIT_LIST_HEAD(&ec->ec_locks);
 	ec->ec_unique = 0;
-	ec->ec_nstripes = 0;
 
 	ocd = kzalloc(sizeof(*ocd), GFP_NOFS);
 	if (!ocd) {
@@ -2120,7 +1834,7 @@
 {
 	int		     rc;
 
-	if (exp == NULL) {
+	if (!exp) {
 		rc = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
index 69063fa..f5034a2 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 1091536..b5df5e6 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -381,7 +381,7 @@
 
 	DECLARE_CKSUM_NAME;
 
-	if (obd == NULL)
+	if (!obd)
 		return 0;
 
 	for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
@@ -406,7 +406,7 @@
 	DECLARE_CKSUM_NAME;
 	char kernbuf[10];
 
-	if (obd == NULL)
+	if (!obd)
 		return 0;
 
 	if (count > sizeof(kernbuf) - 1)
@@ -422,8 +422,8 @@
 		if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
 			continue;
 		if (!strcmp(kernbuf, cksum_name[i])) {
-		       obd->u.cli.cl_cksum_type = 1 << i;
-		       return count;
+			obd->u.cli.cl_cksum_type = 1 << i;
+			return count;
 		}
 	}
 	return -EINVAL;
@@ -480,9 +480,19 @@
 	struct obd_device *obd = container_of(kobj, struct obd_device,
 					      obd_kobj);
 	struct osc_device *od  = obd2osc_dev(obd);
+	int rc;
+	int val;
 
-	return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?:
-		count;
+	rc = kstrtoint(buffer, 10, &val);
+	if (rc)
+		return rc;
+
+	if (val < 0)
+		return -EINVAL;
+
+	od->od_contention_time = val;
+
+	return count;
 }
 LUSTRE_RW_ATTR(contention_seconds);
 
@@ -505,9 +515,16 @@
 	struct obd_device *obd = container_of(kobj, struct obd_device,
 					      obd_kobj);
 	struct osc_device *od  = obd2osc_dev(obd);
+	int rc;
+	unsigned int val;
 
-	return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?:
-		count;
+	rc = kstrtouint(buffer, 10, &val);
+	if (rc)
+		return rc;
+
+	od->od_lockless_truncate = val;
+
+	return count;
 }
 LUSTRE_RW_ATTR(lockless_truncate);
 
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 2229419..c6623c1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -140,7 +140,7 @@
 
 static inline struct osc_extent *rb_extent(struct rb_node *n)
 {
-	if (n == NULL)
+	if (!n)
 		return NULL;
 
 	return container_of(n, struct osc_extent, oe_node);
@@ -148,7 +148,7 @@
 
 static inline struct osc_extent *next_extent(struct osc_extent *ext)
 {
-	if (ext == NULL)
+	if (!ext)
 		return NULL;
 
 	LASSERT(ext->oe_intree);
@@ -157,7 +157,7 @@
 
 static inline struct osc_extent *prev_extent(struct osc_extent *ext)
 {
-	if (ext == NULL)
+	if (!ext)
 		return NULL;
 
 	LASSERT(ext->oe_intree);
@@ -240,7 +240,7 @@
 		goto out;
 	}
 
-	if (ext->oe_osclock == NULL && ext->oe_grants > 0) {
+	if (!ext->oe_osclock && ext->oe_grants > 0) {
 		rc = 90;
 		goto out;
 	}
@@ -262,7 +262,8 @@
 	}
 
 	/* Do not verify page list if extent is in RPC. This is because an
-	 * in-RPC extent is supposed to be exclusively accessible w/o lock. */
+	 * in-RPC extent is supposed to be exclusively accessible w/o lock.
+	 */
 	if (ext->oe_state > OES_CACHE) {
 		rc = 0;
 		goto out;
@@ -319,7 +320,7 @@
 	if (!extent_debug)
 		return 0;
 
-	for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
+	for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) {
 		if (tmp == ext)
 			continue;
 		if (tmp->oe_end >= ext->oe_start &&
@@ -347,7 +348,7 @@
 	struct osc_extent *ext;
 
 	ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO);
-	if (ext == NULL)
+	if (!ext)
 		return NULL;
 
 	RB_CLEAR_NODE(&ext->oe_node);
@@ -415,7 +416,7 @@
 	struct osc_extent *tmp, *p = NULL;
 
 	LASSERT(osc_object_is_locked(obj));
-	while (n != NULL) {
+	while (n) {
 		tmp = rb_extent(n);
 		if (index < tmp->oe_start) {
 			n = n->rb_left;
@@ -439,7 +440,7 @@
 	struct osc_extent *ext;
 
 	ext = osc_extent_search(obj, index);
-	if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end)
+	if (ext && ext->oe_start <= index && index <= ext->oe_end)
 		return osc_extent_get(ext);
 	return NULL;
 }
@@ -454,7 +455,7 @@
 	LASSERT(ext->oe_intree == 0);
 	LASSERT(ext->oe_obj == obj);
 	LASSERT(osc_object_is_locked(obj));
-	while (*n != NULL) {
+	while (*n) {
 		tmp = rb_extent(*n);
 		parent = *n;
 
@@ -533,7 +534,7 @@
 
 	LASSERT(cur->oe_state == OES_CACHE);
 	LASSERT(osc_object_is_locked(obj));
-	if (victim == NULL)
+	if (!victim)
 		return -EINVAL;
 
 	if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
@@ -587,7 +588,8 @@
 		if (ext->oe_trunc_pending) {
 			/* a truncate process is waiting for this extent.
 			 * This may happen due to a race, check
-			 * osc_cache_truncate_start(). */
+			 * osc_cache_truncate_start().
+			 */
 			osc_extent_state_set(ext, OES_TRUNC);
 			ext->oe_trunc_pending = 0;
 		} else {
@@ -639,11 +641,10 @@
 	int rc;
 
 	cur = osc_extent_alloc(obj);
-	if (cur == NULL)
+	if (!cur)
 		return ERR_PTR(-ENOMEM);
 
 	lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
-	LASSERT(lock != NULL);
 	LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
 
 	LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
@@ -678,9 +679,9 @@
 restart:
 	osc_object_lock(obj);
 	ext = osc_extent_search(obj, cur->oe_start);
-	if (ext == NULL)
+	if (!ext)
 		ext = first_extent(obj);
-	while (ext != NULL) {
+	while (ext) {
 		loff_t ext_chk_start = ext->oe_start >> ppc_bits;
 		loff_t ext_chk_end = ext->oe_end >> ppc_bits;
 
@@ -705,18 +706,21 @@
 
 		/* ok, from now on, ext and cur have these attrs:
 		 * 1. covered by the same lock
-		 * 2. contiguous at chunk level or overlapping. */
+		 * 2. contiguous at chunk level or overlapping.
+		 */
 
 		if (overlapped(ext, cur)) {
 			/* cur is the minimum unit, so overlapping means
-			 * full contain. */
+			 * full contain.
+			 */
 			EASSERTF((ext->oe_start <= cur->oe_start &&
 				  ext->oe_end >= cur->oe_end),
 				 ext, EXTSTR, EXTPARA(cur));
 
 			if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
 				/* for simplicity, we wait for this extent to
-				 * finish before going forward. */
+				 * finish before going forward.
+				 */
 				conflict = osc_extent_get(ext);
 				break;
 			}
@@ -729,17 +733,20 @@
 		if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
 			/* we can't do anything for a non OES_CACHE extent, or
 			 * if there is someone waiting for this extent to be
-			 * flushed, try next one. */
+			 * flushed, try next one.
+			 */
 			ext = next_extent(ext);
 			continue;
 		}
 
 		/* check if they belong to the same rpc slot before trying to
 		 * merge. the extents are not overlapped and contiguous at
-		 * chunk level to get here. */
+		 * chunk level to get here.
+		 */
 		if (ext->oe_max_end != max_end) {
 			/* if they don't belong to the same RPC slot or
-			 * max_pages_per_rpc has ever changed, do not merge. */
+			 * max_pages_per_rpc has ever changed, do not merge.
+			 */
 			ext = next_extent(ext);
 			continue;
 		}
@@ -748,7 +755,8 @@
 		 * level so that we know the whole extent is covered by grant
 		 * (the pages in the extent are NOT required to be contiguous).
 		 * Otherwise, it will be too much difficult to know which
-		 * chunks have grants allocated. */
+		 * chunks have grants allocated.
+		 */
 
 		/* try to do front merge - extend ext's start */
 		if (chunk + 1 == ext_chk_start) {
@@ -768,28 +776,29 @@
 			*grants -= chunksize;
 
 			/* try to merge with the next one because we just fill
-			 * in a gap */
+			 * in a gap
+			 */
 			if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
 				/* we can save extent tax from next extent */
 				*grants += cli->cl_extent_tax;
 
 			found = osc_extent_hold(ext);
 		}
-		if (found != NULL)
+		if (found)
 			break;
 
 		ext = next_extent(ext);
 	}
 
 	osc_extent_tree_dump(D_CACHE, obj);
-	if (found != NULL) {
-		LASSERT(conflict == NULL);
+	if (found) {
+		LASSERT(!conflict);
 		if (!IS_ERR(found)) {
 			LASSERT(found->oe_osclock == cur->oe_osclock);
 			OSC_EXTENT_DUMP(D_CACHE, found,
 					"found caching ext for %lu.\n", index);
 		}
-	} else if (conflict == NULL) {
+	} else if (!conflict) {
 		/* create a new extent */
 		EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
 		cur->oe_grants = chunksize + cli->cl_extent_tax;
@@ -804,11 +813,12 @@
 	}
 	osc_object_unlock(obj);
 
-	if (conflict != NULL) {
-		LASSERT(found == NULL);
+	if (conflict) {
+		LASSERT(!found);
 
 		/* waiting for IO to finish. Please notice that it's impossible
-		 * to be an OES_TRUNC extent. */
+		 * to be an OES_TRUNC extent.
+		 */
 		rc = osc_extent_wait(env, conflict, OES_INV);
 		osc_extent_put(env, conflict);
 		conflict = NULL;
@@ -865,7 +875,8 @@
 		   last_count != PAGE_CACHE_SIZE) {
 		/* For short writes we shouldn't count parts of pages that
 		 * span a whole chunk on the OST side, or our accounting goes
-		 * wrong.  Should match the code in filter_grant_check. */
+		 * wrong.  Should match the code in filter_grant_check.
+		 */
 		int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
 		int count = oap->oap_count + (offset & (blocksize - 1));
 		int end = (offset + oap->oap_count) & (blocksize - 1);
@@ -909,7 +920,8 @@
 	osc_object_lock(obj);
 	LASSERT(sanity_check_nolock(ext) == 0);
 	/* `Kick' this extent only if the caller is waiting for it to be
-	 * written out. */
+	 * written out.
+	 */
 	if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp &&
 	    !ext->oe_trunc_pending) {
 		if (ext->oe_state == OES_ACTIVE) {
@@ -967,7 +979,8 @@
 
 	/* Request new lu_env.
 	 * We can't use that env from osc_cache_truncate_start() because
-	 * it's from lov_io_sub and not fully initialized. */
+	 * it's from lov_io_sub and not fully initialized.
+	 */
 	env = cl_env_nested_get(&nest);
 	io  = &osc_env_info(env)->oti_io;
 	io->ci_obj = cl_object_top(osc2cl(obj));
@@ -984,7 +997,8 @@
 		LASSERT(list_empty(&oap->oap_rpc_item));
 
 		/* only discard the pages with their index greater than
-		 * trunc_index, and ... */
+		 * trunc_index, and ...
+		 */
 		if (sub->cp_index < trunc_index ||
 		    (sub->cp_index == trunc_index && partial)) {
 			/* accounting how many pages remaining in the chunk
@@ -1028,11 +1042,13 @@
 		pgoff_t last_index;
 
 		/* if there is no pages in this chunk, we can also free grants
-		 * for the last chunk */
+		 * for the last chunk
+		 */
 		if (pages_in_chunk == 0) {
 			/* if this is the 1st chunk and no pages in this chunk,
 			 * ext->oe_nr_pages must be zero, so we should be in
-			 * the other if-clause. */
+			 * the other if-clause.
+			 */
 			LASSERT(trunc_chunk > 0);
 			--trunc_chunk;
 			++chunks;
@@ -1074,13 +1090,13 @@
 	LASSERT(sanity_check(ext) == 0);
 	/* in locking state, any process should not touch this extent. */
 	EASSERT(ext->oe_state == OES_LOCKING, ext);
-	EASSERT(ext->oe_owner != NULL, ext);
+	EASSERT(ext->oe_owner, ext);
 
 	OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
 
 	list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
 		++page_count;
-		if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
+		if (!last || last->oap_obj_off < oap->oap_obj_off)
 			last = oap;
 
 		/* checking ASYNC_READY is race safe */
@@ -1103,9 +1119,10 @@
 	}
 
 	LASSERT(page_count == ext->oe_nr_pages);
-	LASSERT(last != NULL);
+	LASSERT(last);
 	/* the last page is the only one we need to refresh its count by
-	 * the size of file. */
+	 * the size of file.
+	 */
 	if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
 		last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
 		LASSERT(last->oap_count > 0);
@@ -1114,7 +1131,8 @@
 	}
 
 	/* for the rest of pages, we don't need to call osf_refresh_count()
-	 * because it's known they are not the last page */
+	 * because it's known they are not the last page
+	 */
 	list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
 		if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
 			oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
@@ -1167,9 +1185,10 @@
 	end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
 
 	next = next_extent(ext);
-	if (next != NULL && next->oe_start <= end_index) {
+	if (next && next->oe_start <= end_index) {
 		/* complex mode - overlapped with the next extent,
-		 * this case will be handled by osc_extent_find() */
+		 * this case will be handled by osc_extent_find()
+		 */
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -1197,7 +1216,7 @@
 
 	/* osc_object_lock(obj); */
 	cnt = 1;
-	for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
+	for (ext = first_extent(obj); ext; ext = next_extent(ext))
 		OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
 
 	cnt = 1;
@@ -1262,7 +1281,6 @@
 
 	/* readpage queues with _COUNT_STABLE, shouldn't get here. */
 	LASSERT(!(cmd & OBD_BRW_READ));
-	LASSERT(opg != NULL);
 	obj = opg->ops_cl.cpl_obj;
 
 	cl_object_attr_lock(obj);
@@ -1299,16 +1317,16 @@
 	 * page->cp_req can be NULL if io submission failed before
 	 * cl_req was allocated.
 	 */
-	if (page->cp_req != NULL)
+	if (page->cp_req)
 		cl_req_page_done(env, page);
-	LASSERT(page->cp_req == NULL);
+	LASSERT(!page->cp_req);
 
 	crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
 	/* Clear opg->ops_transfer_pinned before VM lock is released. */
 	opg->ops_transfer_pinned = 0;
 
 	spin_lock(&obj->oo_seatbelt);
-	LASSERT(opg->ops_submitter != NULL);
+	LASSERT(opg->ops_submitter);
 	LASSERT(!list_empty(&opg->ops_inflight));
 	list_del_init(&opg->ops_inflight);
 	opg->ops_submitter = NULL;
@@ -1367,7 +1385,8 @@
 }
 
 /* the companion to osc_consume_write_grant, called when a brw has completed.
- * must be called with the loi lock held. */
+ * must be called with the loi lock held.
+ */
 static void osc_release_write_grant(struct client_obd *cli,
 				    struct brw_page *pga)
 {
@@ -1410,7 +1429,8 @@
 	/* it's quite normal for us to get more grant than reserved.
 	 * Thinking about a case that two extents merged by adding a new
 	 * chunk, we can save one extent tax. If extent tax is greater than
-	 * one chunk, we can save more grant by adding a new chunk */
+	 * one chunk, we can save more grant by adding a new chunk
+	 */
 	cli->cl_reserved_grant -= reserved;
 	if (unused > reserved) {
 		cli->cl_avail_grant += reserved;
@@ -1454,7 +1474,8 @@
 	cli->cl_lost_grant += lost_grant;
 	if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
 		/* borrow some grant from truncate to avoid the case that
-		 * truncate uses up all avail grant */
+		 * truncate uses up all avail grant
+		 */
 		cli->cl_lost_grant -= grant;
 		cli->cl_avail_grant += grant;
 	}
@@ -1539,7 +1560,8 @@
 	client_obd_list_lock(&cli->cl_loi_list_lock);
 
 	/* force the caller to try sync io.  this can jump the list
-	 * of queued writes and create a discontiguous rpc stream */
+	 * of queued writes and create a discontiguous rpc stream
+	 */
 	if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
 	    cli->cl_dirty_max < PAGE_CACHE_SIZE     ||
 	    cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
@@ -1558,7 +1580,8 @@
 	 * Adding a cache waiter will trigger urgent write-out no matter what
 	 * RPC size will be.
 	 * The exiting condition is no avail grants and no dirty pages caching,
-	 * that really means there is no space on the OST. */
+	 * that really means there is no space on the OST.
+	 */
 	init_waitqueue_head(&ocw.ocw_waitq);
 	ocw.ocw_oap   = oap;
 	ocw.ocw_grant = bytes;
@@ -1640,7 +1663,8 @@
 
 /* This maintains the lists of pending pages to read/write for a given object
  * (lop).  This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
- * to quickly find objects that are ready to send an RPC. */
+ * to quickly find objects that are ready to send an RPC.
+ */
 static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
 			 int cmd)
 {
@@ -1649,8 +1673,9 @@
 	/* if we have an invalid import we want to drain the queued pages
 	 * by forcing them through rpcs that immediately fail and complete
 	 * the pages.  recovery relies on this to empty the queued pages
-	 * before canceling the locks and evicting down the llite pages */
-	if ((cli->cl_import == NULL || cli->cl_import->imp_invalid))
+	 * before canceling the locks and evicting down the llite pages
+	 */
+	if (!cli->cl_import || cli->cl_import->imp_invalid)
 		invalid_import = 1;
 
 	if (cmd & OBD_BRW_WRITE) {
@@ -1670,7 +1695,8 @@
 		}
 		/* trigger a write rpc stream as long as there are dirtiers
 		 * waiting for space.  as they're waiting, they're not going to
-		 * create more pages to coalesce with what's waiting.. */
+		 * create more pages to coalesce with what's waiting..
+		 */
 		if (!list_empty(&cli->cl_cache_waiters)) {
 			CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
 			return 1;
@@ -1723,7 +1749,8 @@
 }
 
 /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
- * can find pages to build into rpcs quickly */
+ * can find pages to build into rpcs quickly
+ */
 static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
 {
 	if (osc_makes_hprpc(osc)) {
@@ -1761,7 +1788,8 @@
  * application.  As an async write fails we record the error code for later if
  * the app does an fsync.  As long as errors persist we force future rpcs to be
  * sync so that the app can get a sync error and break the cycle of queueing
- * pages for which writeback will fail. */
+ * pages for which writeback will fail.
+ */
 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
 			   int rc)
 {
@@ -1780,7 +1808,8 @@
 }
 
 /* this must be called holding the loi list lock to give coverage to exit_cache,
- * async_flag maintenance, and oap_request */
+ * async_flag maintenance, and oap_request
+ */
 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
 			      struct osc_async_page *oap, int sent, int rc)
 {
@@ -1788,7 +1817,7 @@
 	struct lov_oinfo *loi = osc->oo_oinfo;
 	__u64 xid = 0;
 
-	if (oap->oap_request != NULL) {
+	if (oap->oap_request) {
 		xid = ptlrpc_req_xid(oap->oap_request);
 		ptlrpc_req_finished(oap->oap_request);
 		oap->oap_request = NULL;
@@ -1906,7 +1935,7 @@
 		while ((ext = next_extent(ext)) != NULL) {
 			if ((ext->oe_state != OES_CACHE) ||
 			    (!list_empty(&ext->oe_link) &&
-			     ext->oe_owner != NULL))
+			     ext->oe_owner))
 				continue;
 
 			if (!try_to_add_extent_for_io(cli, ext, rpclist,
@@ -1918,10 +1947,10 @@
 		return page_count;
 
 	ext = first_extent(obj);
-	while (ext != NULL) {
+	while (ext) {
 		if ((ext->oe_state != OES_CACHE) ||
 		    /* this extent may be already in current rpclist */
-		    (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
+		    (!list_empty(&ext->oe_link) && ext->oe_owner)) {
 			ext = next_extent(ext);
 			continue;
 		}
@@ -1938,6 +1967,7 @@
 static int
 osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
 		   struct osc_object *osc)
+	__must_hold(osc)
 {
 	LIST_HEAD(rpclist);
 	struct osc_extent *ext;
@@ -1967,7 +1997,8 @@
 	}
 
 	/* we're going to grab page lock, so release object lock because
-	 * lock order is page lock -> object lock. */
+	 * lock order is page lock -> object lock.
+	 */
 	osc_object_unlock(osc);
 
 	list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
@@ -1979,7 +2010,7 @@
 				continue;
 			}
 		}
-		if (first == NULL) {
+		if (!first) {
 			first = ext;
 			srvlock = ext->oe_srvlock;
 		} else {
@@ -2010,6 +2041,7 @@
 static int
 osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
 		  struct osc_object *osc)
+	__must_hold(osc)
 {
 	struct osc_extent *ext;
 	struct osc_extent *next;
@@ -2051,12 +2083,14 @@
 })
 
 /* This is called by osc_check_rpcs() to find which objects have pages that
- * we could be sending.  These lists are maintained by osc_makes_rpc(). */
+ * we could be sending.  These lists are maintained by osc_makes_rpc().
+ */
 static struct osc_object *osc_next_obj(struct client_obd *cli)
 {
 	/* First return objects that have blocked locks so that they
 	 * will be flushed quickly and other clients can get the lock,
-	 * then objects which have pages ready to be stuffed into RPCs */
+	 * then objects which have pages ready to be stuffed into RPCs
+	 */
 	if (!list_empty(&cli->cl_loi_hp_ready_list))
 		return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
 	if (!list_empty(&cli->cl_loi_ready_list))
@@ -2065,14 +2099,16 @@
 	/* then if we have cache waiters, return all objects with queued
 	 * writes.  This is especially important when many small files
 	 * have filled up the cache and not been fired into rpcs because
-	 * they don't pass the nr_pending/object threshold */
+	 * they don't pass the nr_pending/object threshold
+	 */
 	if (!list_empty(&cli->cl_cache_waiters) &&
 	    !list_empty(&cli->cl_loi_write_list))
 		return list_to_obj(&cli->cl_loi_write_list, write_item);
 
 	/* then return all queued objects when we have an invalid import
-	 * so that they get flushed */
-	if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
+	 * so that they get flushed
+	 */
+	if (!cli->cl_import || cli->cl_import->imp_invalid) {
 		if (!list_empty(&cli->cl_loi_write_list))
 			return list_to_obj(&cli->cl_loi_write_list, write_item);
 		if (!list_empty(&cli->cl_loi_read_list))
@@ -2083,6 +2119,7 @@
 
 /* called with the loi list lock held */
 static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
+	__must_hold(&cli->cl_loi_list_lock)
 {
 	struct osc_object *osc;
 	int rc = 0;
@@ -2108,7 +2145,8 @@
 		 * would be redundant if we were getting read/write work items
 		 * instead of objects.  we don't want send_oap_rpc to drain a
 		 * partial read pending queue when we're given this object to
-		 * do io on writes while there are cache waiters */
+		 * do io on writes while there are cache waiters
+		 */
 		osc_object_lock(osc);
 		if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
 			rc = osc_send_write_rpc(env, cli, osc);
@@ -2130,7 +2168,8 @@
 				 * because it might be blocked at grabbing
 				 * the page lock as we mentioned.
 				 *
-				 * Anyway, continue to drain pages. */
+				 * Anyway, continue to drain pages.
+				 */
 				/* break; */
 			}
 		}
@@ -2155,12 +2194,13 @@
 {
 	int rc = 0;
 
-	if (osc != NULL && osc_list_maint(cli, osc) == 0)
+	if (osc && osc_list_maint(cli, osc) == 0)
 		return 0;
 
 	if (!async) {
 		/* disable osc_lru_shrink() temporarily to avoid
-		 * potential stack overrun problem. LU-2859 */
+		 * potential stack overrun problem. LU-2859
+		 */
 		atomic_inc(&cli->cl_lru_shrinkers);
 		client_obd_list_lock(&cli->cl_loi_list_lock);
 		osc_check_rpcs(env, cli);
@@ -2168,7 +2208,7 @@
 		atomic_dec(&cli->cl_lru_shrinkers);
 	} else {
 		CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
-		LASSERT(cli->cl_writeback_work != NULL);
+		LASSERT(cli->cl_writeback_work);
 		rc = ptlrpcd_queue_work(cli->cl_writeback_work);
 	}
 	return rc;
@@ -2233,7 +2273,7 @@
 	if (oap->oap_magic != OAP_MAGIC)
 		return -EINVAL;
 
-	if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
+	if (!cli->cl_import || cli->cl_import->imp_invalid)
 		return -EIO;
 
 	if (!list_empty(&oap->oap_pending_item) ||
@@ -2284,12 +2324,14 @@
 	 * 1. if there exists an active extent for this IO, mostly this page
 	 *    can be added to the active extent and sometimes we need to
 	 *    expand extent to accommodate this page;
-	 * 2. otherwise, a new extent will be allocated. */
+	 * 2. otherwise, a new extent will be allocated.
+	 */
 
 	ext = oio->oi_active;
-	if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
+	if (ext && ext->oe_start <= index && ext->oe_max_end >= index) {
 		/* one chunk plus extent overhead must be enough to write this
-		 * page */
+		 * page
+		 */
 		grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
 		if (ext->oe_end >= index)
 			grants = 0;
@@ -2316,7 +2358,7 @@
 			}
 		}
 		rc = 0;
-	} else if (ext != NULL) {
+	} else if (ext) {
 		/* index is located outside of active extent */
 		need_release = 1;
 	}
@@ -2326,13 +2368,14 @@
 		ext = NULL;
 	}
 
-	if (ext == NULL) {
+	if (!ext) {
 		int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
 
 		/* try to find new extent to cover this page */
-		LASSERT(oio->oi_active == NULL);
+		LASSERT(!oio->oi_active);
 		/* we may have allocated grant for this page if we failed
-		 * to expand the previous active extent. */
+		 * to expand the previous active extent.
+		 */
 		LASSERT(ergo(grants > 0, grants >= tmp));
 
 		rc = 0;
@@ -2359,8 +2402,8 @@
 			osc_unreserve_grant(cli, grants, tmp);
 	}
 
-	LASSERT(ergo(rc == 0, ext != NULL));
-	if (ext != NULL) {
+	LASSERT(ergo(rc == 0, ext));
+	if (ext) {
 		EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
 			 ext, "index = %lu.\n", index);
 		LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
@@ -2397,15 +2440,16 @@
 		ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
 		/* only truncated pages are allowed to be taken out.
 		 * See osc_extent_truncate() and osc_cache_truncate_start()
-		 * for details. */
-		if (ext != NULL && ext->oe_state != OES_TRUNC) {
+		 * for details.
+		 */
+		if (ext && ext->oe_state != OES_TRUNC) {
 			OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
 					oap2cl_page(oap)->cp_index);
 			rc = -EBUSY;
 		}
 	}
 	osc_object_unlock(obj);
-	if (ext != NULL)
+	if (ext)
 		osc_extent_put(env, ext);
 	return rc;
 }
@@ -2430,7 +2474,7 @@
 
 	osc_object_lock(obj);
 	ext = osc_extent_lookup(obj, index);
-	if (ext == NULL) {
+	if (!ext) {
 		osc_extent_tree_dump(D_ERROR, obj);
 		LASSERTF(0, "page index %lu is NOT covered.\n", index);
 	}
@@ -2448,7 +2492,8 @@
 		 * exists a deadlock problem because other process can wait for
 		 * page writeback bit holding page lock; and meanwhile in
 		 * vvp_page_make_ready(), we need to grab page lock before
-		 * really sending the RPC. */
+		 * really sending the RPC.
+		 */
 	case OES_TRUNC:
 		/* race with truncate, page will be redirtied */
 	case OES_ACTIVE:
@@ -2456,7 +2501,8 @@
 		 * re-dirty the page. If we continued on here, and we were the
 		 * one making the extent active, we could deadlock waiting for
 		 * the page writeback to clear but it won't because the extent
-		 * is active and won't be written out. */
+		 * is active and won't be written out.
+		 */
 		rc = -EAGAIN;
 		goto out;
 	default:
@@ -2527,12 +2573,13 @@
 		if (ext->oe_start <= index && ext->oe_end >= index) {
 			LASSERT(ext->oe_state == OES_LOCK_DONE);
 			/* For OES_LOCK_DONE state extent, it has already held
-			 * a refcount for RPC. */
+			 * a refcount for RPC.
+			 */
 			found = osc_extent_get(ext);
 			break;
 		}
 	}
-	if (found != NULL) {
+	if (found) {
 		list_del_init(&found->oe_link);
 		osc_update_pending(obj, cmd, -found->oe_nr_pages);
 		osc_object_unlock(obj);
@@ -2543,8 +2590,9 @@
 	} else {
 		osc_object_unlock(obj);
 		/* ok, it's been put in an rpc. only one oap gets a request
-		 * reference */
-		if (oap->oap_request != NULL) {
+		 * reference
+		 */
+		if (oap->oap_request) {
 			ptlrpc_mark_interrupted(oap->oap_request);
 			ptlrpcd_wake(oap->oap_request);
 			ptlrpc_req_finished(oap->oap_request);
@@ -2579,7 +2627,7 @@
 	}
 
 	ext = osc_extent_alloc(obj);
-	if (ext == NULL) {
+	if (!ext) {
 		list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
 			list_del_init(&oap->oap_pending_item);
 			osc_ap_completion(env, cli, oap, 0, -ENOMEM);
@@ -2634,18 +2682,19 @@
 again:
 	osc_object_lock(obj);
 	ext = osc_extent_search(obj, index);
-	if (ext == NULL)
+	if (!ext)
 		ext = first_extent(obj);
 	else if (ext->oe_end < index)
 		ext = next_extent(ext);
-	while (ext != NULL) {
+	while (ext) {
 		EASSERT(ext->oe_state != OES_TRUNC, ext);
 
 		if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
 			/* if ext is in urgent state, it means there must exist
 			 * a page already having been flushed by write_page().
 			 * We have to wait for this extent because we can't
-			 * truncate that page. */
+			 * truncate that page.
+			 */
 			LASSERT(!ext->oe_hp);
 			OSC_EXTENT_DUMP(D_CACHE, ext,
 					"waiting for busy extent\n");
@@ -2660,7 +2709,8 @@
 			/* though we grab inode mutex for write path, but we
 			 * release it before releasing extent(in osc_io_end()),
 			 * so there is a race window that an extent is still
-			 * in OES_ACTIVE when truncate starts. */
+			 * in OES_ACTIVE when truncate starts.
+			 */
 			LASSERT(!ext->oe_trunc_pending);
 			ext->oe_trunc_pending = 1;
 		} else {
@@ -2685,7 +2735,8 @@
 		list_del_init(&ext->oe_link);
 
 		/* extent may be in OES_ACTIVE state because inode mutex
-		 * is released before osc_io_end() in file write case */
+		 * is released before osc_io_end() in file write case
+		 */
 		if (ext->oe_state != OES_TRUNC)
 			osc_extent_wait(env, ext, OES_TRUNC);
 
@@ -2710,19 +2761,21 @@
 
 			/* we need to hold this extent in OES_TRUNC state so
 			 * that no writeback will happen. This is to avoid
-			 * BUG 17397. */
-			LASSERT(oio->oi_trunc == NULL);
+			 * BUG 17397.
+			 */
+			LASSERT(!oio->oi_trunc);
 			oio->oi_trunc = osc_extent_get(ext);
 			OSC_EXTENT_DUMP(D_CACHE, ext,
 					"trunc at %llu\n", size);
 		}
 		osc_extent_put(env, ext);
 	}
-	if (waiting != NULL) {
+	if (waiting) {
 		int rc;
 
 		/* ignore the result of osc_extent_wait the write initiator
-		 * should take care of it. */
+		 * should take care of it.
+		 */
 		rc = osc_extent_wait(env, waiting, OES_INV);
 		if (rc < 0)
 			OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
@@ -2743,7 +2796,7 @@
 	struct osc_extent *ext = oio->oi_trunc;
 
 	oio->oi_trunc = NULL;
-	if (ext != NULL) {
+	if (ext) {
 		bool unplug = false;
 
 		EASSERT(ext->oe_nr_pages > 0, ext);
@@ -2786,11 +2839,11 @@
 again:
 	osc_object_lock(obj);
 	ext = osc_extent_search(obj, index);
-	if (ext == NULL)
+	if (!ext)
 		ext = first_extent(obj);
 	else if (ext->oe_end < index)
 		ext = next_extent(ext);
-	while (ext != NULL) {
+	while (ext) {
 		int rc;
 
 		if (ext->oe_start > end)
@@ -2841,11 +2894,11 @@
 
 	osc_object_lock(obj);
 	ext = osc_extent_search(obj, start);
-	if (ext == NULL)
+	if (!ext)
 		ext = first_extent(obj);
 	else if (ext->oe_end < start)
 		ext = next_extent(ext);
-	while (ext != NULL) {
+	while (ext) {
 		if (ext->oe_start > end)
 			break;
 
@@ -2864,12 +2917,13 @@
 					ext->oe_urgent = 1;
 					list = &obj->oo_urgent_exts;
 				}
-				if (list != NULL)
+				if (list)
 					list_move_tail(&ext->oe_link, list);
 				unplug = true;
 			} else {
 				/* the only discarder is lock cancelling, so
-				 * [start, end] must contain this extent */
+				 * [start, end] must contain this extent
+				 */
 				EASSERT(ext->oe_start >= start &&
 					ext->oe_max_end <= end, ext);
 				osc_extent_state_set(ext, OES_LOCKING);
@@ -2884,14 +2938,16 @@
 			/* It's pretty bad to wait for ACTIVE extents, because
 			 * we don't know how long we will wait for it to be
 			 * flushed since it may be blocked at awaiting more
-			 * grants. We do this for the correctness of fsync. */
+			 * grants. We do this for the correctness of fsync.
+			 */
 			LASSERT(hp == 0 && discard == 0);
 			ext->oe_urgent = 1;
 			break;
 		case OES_TRUNC:
 			/* this extent is being truncated, can't do anything
 			 * for it now. it will be set to urgent after truncate
-			 * is finished in osc_cache_truncate_end(). */
+			 * is finished in osc_cache_truncate_end().
+			 */
 		default:
 			break;
 		}
@@ -2910,7 +2966,8 @@
 			EASSERT(ext->oe_state == OES_LOCKING, ext);
 
 			/* Discard caching pages. We don't actually write this
-			 * extent out but we complete it as if we did. */
+			 * extent out but we complete it as if we did.
+			 */
 			rc = osc_extent_make_ready(env, ext);
 			if (unlikely(rc < 0)) {
 				OSC_EXTENT_DUMP(D_ERROR, ext,
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 415c27e..d55d04d 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -69,10 +69,12 @@
 	/** true if this io is lockless. */
 	int		oi_lockless;
 	/** active extents, we know how many bytes is going to be written,
-	 * so having an active extent will prevent it from being fragmented */
+	 * so having an active extent will prevent it from being fragmented
+	 */
 	struct osc_extent *oi_active;
 	/** partially truncated extent, we need to hold this extent to prevent
-	 * page writeback from happening. */
+	 * page writeback from happening.
+	 */
 	struct osc_extent *oi_trunc;
 
 	struct obd_info    oi_info;
@@ -154,7 +156,8 @@
 	atomic_t	 oo_nr_writes;
 
 	/** Protect extent tree. Will be used to protect
-	 * oo_{read|write}_pages soon. */
+	 * oo_{read|write}_pages soon.
+	 */
 	spinlock_t	    oo_lock;
 };
 
@@ -472,7 +475,7 @@
 	struct osc_thread_info *info;
 
 	info = lu_context_key_get(&env->le_ctx, &osc_key);
-	LASSERT(info != NULL);
+	LASSERT(info);
 	return info;
 }
 
@@ -481,7 +484,7 @@
 	struct osc_session *ses;
 
 	ses = lu_context_key_get(env->le_ses, &osc_session_key);
-	LASSERT(ses != NULL);
+	LASSERT(ses);
 	return ses;
 }
 
@@ -522,7 +525,7 @@
 	return (struct cl_object *)&obj->oo_cl;
 }
 
-static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode)
+static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
 {
 	LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
 	if (mode == CLM_READ)
@@ -533,7 +536,7 @@
 		return LCK_GROUP;
 }
 
-static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode)
+static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
 {
 	LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
 	if (mode == LCK_PR)
@@ -627,22 +630,26 @@
 			   oe_srvlock:1,
 			   oe_memalloc:1,
 	/** an ACTIVE extent is going to be truncated, so when this extent
-	 * is released, it will turn into TRUNC state instead of CACHE. */
+	 * is released, it will turn into TRUNC state instead of CACHE.
+	 */
 			   oe_trunc_pending:1,
 	/** this extent should be written asap and someone may wait for the
 	 * write to finish. This bit is usually set along with urgent if
 	 * the extent was CACHE state.
 	 * fsync_wait extent can't be merged because new extent region may
-	 * exceed fsync range. */
+	 * exceed fsync range.
+	 */
 			   oe_fsync_wait:1,
 	/** covering lock is being canceled */
 			   oe_hp:1,
 	/** this extent should be written back asap. set if one of pages is
-	 * called by page WB daemon, or sync write or reading requests. */
+	 * called by page WB daemon, or sync write or reading requests.
+	 */
 			   oe_urgent:1;
 	/** how many grants allocated for this extent.
 	 *  Grant allocated for this extent. There is no grant allocated
-	 *  for reading extents and sync write extents. */
+	 *  for reading extents and sync write extents.
+	 */
 	unsigned int       oe_grants;
 	/** # of dirty pages in this extent */
 	unsigned int       oe_nr_pages;
@@ -655,21 +662,25 @@
 	struct osc_page   *oe_next_page;
 	/** start and end index of this extent, include start and end
 	 * themselves. Page offset here is the page index of osc_pages.
-	 * oe_start is used as keyword for red-black tree. */
+	 * oe_start is used as keyword for red-black tree.
+	 */
 	pgoff_t	    oe_start;
 	pgoff_t	    oe_end;
 	/** maximum ending index of this extent, this is limited by
-	 * max_pages_per_rpc, lock extent and chunk size. */
+	 * max_pages_per_rpc, lock extent and chunk size.
+	 */
 	pgoff_t	    oe_max_end;
 	/** waitqueue - for those who want to be notified if this extent's
-	 * state has changed. */
+	 * state has changed.
+	 */
 	wait_queue_head_t	oe_waitq;
 	/** lock covering this extent */
 	struct cl_lock    *oe_osclock;
 	/** terminator of this extent. Must be true if this extent is in IO. */
 	struct task_struct	*oe_owner;
 	/** return value of writeback. If somebody is waiting for this extent,
-	 * this value can be known by outside world. */
+	 * this value can be known by outside world.
+	 */
 	int		oe_rc;
 	/** max pages per rpc when this extent was created */
 	unsigned int       oe_mppr;
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 7078cc5..67cb6e4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -123,7 +123,7 @@
 	struct osc_thread_info *info;
 
 	info = kmem_cache_alloc(osc_thread_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info == NULL)
+	if (!info)
 		info = ERR_PTR(-ENOMEM);
 	return info;
 }
@@ -148,7 +148,7 @@
 	struct osc_session *info;
 
 	info = kmem_cache_alloc(osc_session_kmem, GFP_NOFS | __GFP_ZERO);
-	if (info == NULL)
+	if (!info)
 		info = ERR_PTR(-ENOMEM);
 	return info;
 }
@@ -228,7 +228,7 @@
 
 	/* Setup OSC OBD */
 	obd = class_name2obd(lustre_cfg_string(cfg, 0));
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	rc = osc_setup(obd, cfg);
 	if (rc) {
 		osc_device_free(env, d);
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index a4c6146..ea695c2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -47,11 +47,13 @@
 
 enum async_flags {
 	ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
-			      page is added to an rpc */
+			    * page is added to an rpc
+			    */
 	ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
 	ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
-				     to give the caller a chance to update
-				     or cancel the size of the io */
+				   * to give the caller a chance to update
+				   * or cancel the size of the io
+				   */
 	ASYNC_HP = 0x10,
 };
 
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index abd0beb..2d8d93c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -73,7 +73,7 @@
 	const struct cl_page_slice *slice;
 
 	slice = cl_page_at(page, &osc_device_type);
-	LASSERT(slice != NULL);
+	LASSERT(slice);
 
 	return cl2osc_page(slice);
 }
@@ -135,7 +135,7 @@
 
 		/* Top level IO. */
 		io = page->cp_owner;
-		LASSERT(io != NULL);
+		LASSERT(io);
 
 		opg = osc_cl_page_osc(page);
 		oap = &opg->ops_oap;
@@ -266,13 +266,14 @@
 	 * This implements OBD_BRW_CHECK logic from old client.
 	 */
 
-	if (imp == NULL || imp->imp_invalid)
+	if (!imp || imp->imp_invalid)
 		result = -EIO;
 	if (result == 0 && oio->oi_lockless)
 		/* this page contains `invalid' data, but who cares?
 		 * nobody can access the invalid data.
 		 * in osc_io_commit_write(), we're going to write exact
-		 * [from, to) bytes of this page to OST. -jay */
+		 * [from, to) bytes of this page to OST. -jay
+		 */
 		cl_page_export(env, slice->cpl_page, 1);
 
 	return result;
@@ -349,7 +350,7 @@
 	__u64 start = *(__u64 *)cbdata;
 
 	slice = cl_page_at(page, &osc_device_type);
-	LASSERT(slice != NULL);
+	LASSERT(slice);
 	ops = cl2osc_page(slice);
 	oap = &ops->ops_oap;
 
@@ -500,7 +501,7 @@
 		__u64 size = io->u.ci_setattr.sa_attr.lvb_size;
 
 		osc_trunc_check(env, io, oio, size);
-		if (oio->oi_trunc != NULL) {
+		if (oio->oi_trunc) {
 			osc_cache_truncate_end(env, oio, cl2osc(obj));
 			oio->oi_trunc = NULL;
 		}
@@ -596,7 +597,8 @@
 		 * send OST_SYNC RPC. This is bad because it causes extents
 		 * to be written osc by osc. However, we usually start
 		 * writeback before CL_FSYNC_ALL so this won't have any real
-		 * problem. */
+		 * problem.
+		 */
 		rc = osc_cache_wait_range(env, osc, start, end);
 		if (result == 0)
 			result = rc;
@@ -754,7 +756,7 @@
 		opg = osc_cl_page_osc(apage);
 		apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
 		lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
-		if (lock == NULL) {
+		if (!lock) {
 			struct cl_object_header *head;
 			struct cl_lock *scan;
 
@@ -770,10 +772,9 @@
 		}
 
 		olck = osc_lock_at(lock);
-		LASSERT(olck != NULL);
-		LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL));
+		LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock));
 		/* check for lockless io. */
-		if (olck->ols_lock != NULL) {
+		if (olck->ols_lock) {
 			oa->o_handle = olck->ols_lock->l_remote_handle;
 			oa->o_valid |= OBD_MD_FLHANDLE;
 		}
@@ -804,7 +805,7 @@
 	int result;
 
 	or = kmem_cache_alloc(osc_req_kmem, GFP_NOFS | __GFP_ZERO);
-	if (or != NULL) {
+	if (or) {
 		cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
 		result = 0;
 	} else
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 71f2810..87f3522 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -79,7 +79,7 @@
 	struct ldlm_lock *lock;
 
 	lock = ldlm_handle2lock(handle);
-	if (lock != NULL)
+	if (lock)
 		LDLM_LOCK_PUT(lock);
 	return lock;
 }
@@ -94,42 +94,40 @@
 	int handle_used = lustre_handle_is_used(&ols->ols_handle);
 
 	if (ergo(osc_lock_is_lockless(ols),
-		 ols->ols_locklessable && ols->ols_lock == NULL))
+		 ols->ols_locklessable && !ols->ols_lock))
 		return 1;
 
 	/*
 	 * If all the following "ergo"s are true, return 1, otherwise 0
 	 */
-	if (!ergo(olock != NULL, handle_used))
+	if (!ergo(olock, handle_used))
 		return 0;
 
-	if (!ergo(olock != NULL,
-		   olock->l_handle.h_cookie == ols->ols_handle.cookie))
+	if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie))
 		return 0;
 
 	if (!ergo(handle_used,
-		   ergo(lock != NULL && olock != NULL, lock == olock) &&
-		   ergo(lock == NULL, olock == NULL)))
+		  ergo(lock && olock, lock == olock) &&
+		  ergo(!lock, !olock)))
 		return 0;
 	/*
 	 * Check that ->ols_handle and ->ols_lock are consistent, but
 	 * take into account that they are set at the different time.
 	 */
 	if (!ergo(ols->ols_state == OLS_CANCELLED,
-		   olock == NULL && !handle_used))
+		  !olock && !handle_used))
 		return 0;
 	/*
 	 * DLM lock is destroyed only after we have seen cancellation
 	 * ast.
 	 */
-	if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
-		   ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+	if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
+		  ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
 		return 0;
 
 	if (!ergo(ols->ols_state == OLS_GRANTED,
-		   olock != NULL &&
-		   olock->l_req_mode == olock->l_granted_mode &&
-		   ols->ols_hold))
+		  olock && olock->l_req_mode == olock->l_granted_mode &&
+		  ols->ols_hold))
 		return 0;
 	return 1;
 }
@@ -149,14 +147,15 @@
 
 	spin_lock(&osc_ast_guard);
 	dlmlock = olck->ols_lock;
-	if (dlmlock == NULL) {
+	if (!dlmlock) {
 		spin_unlock(&osc_ast_guard);
 		return;
 	}
 
 	olck->ols_lock = NULL;
 	/* wb(); --- for all who checks (ols->ols_lock != NULL) before
-	 * call to osc_lock_detach() */
+	 * call to osc_lock_detach()
+	 */
 	dlmlock->l_ast_data = NULL;
 	olck->ols_handle.cookie = 0ULL;
 	spin_unlock(&osc_ast_guard);
@@ -171,7 +170,8 @@
 		/* Must get the value under the lock to avoid possible races. */
 		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
 		/* Update the kms. Need to loop all granted locks.
-		 * Not a problem for the client */
+		 * Not a problem for the client
+		 */
 		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
 
 		cl_object_attr_set(env, obj, attr, CAT_KMS);
@@ -247,7 +247,7 @@
 	 * lock is destroyed immediately after upcall.
 	 */
 	osc_lock_unhold(ols);
-	LASSERT(ols->ols_lock == NULL);
+	LASSERT(!ols->ols_lock);
 	LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
 		atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
 
@@ -292,7 +292,7 @@
 	lock_res_and_lock(dlm_lock);
 	spin_lock(&osc_ast_guard);
 	olck = dlm_lock->l_ast_data;
-	if (olck != NULL) {
+	if (olck) {
 		struct cl_lock *lock = olck->ols_cl.cls_lock;
 		/*
 		 * If osc_lock holds a reference on ldlm lock, return it even
@@ -359,13 +359,13 @@
 		__u64 size;
 
 		dlmlock = olck->ols_lock;
-		LASSERT(dlmlock != NULL);
 
 		/* re-grab LVB from a dlm lock under DLM spin-locks. */
 		*lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
 		size = lvb->lvb_size;
 		/* Extend KMS up to the end of this lock and no further
-		 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
+		 * A lock on [x,y] means a KMS of up to y + 1 bytes!
+		 */
 		if (size > dlmlock->l_policy_data.l_extent.end)
 			size = dlmlock->l_policy_data.l_extent.end + 1;
 		if (size >= oinfo->loi_kms) {
@@ -429,7 +429,8 @@
 		 * to take a semaphore on a parent lock. This is safe, because
 		 * spin-locks are needed to protect consistency of
 		 * dlmlock->l_*_mode and LVB, and we have finished processing
-		 * them. */
+		 * them.
+		 */
 		unlock_res_and_lock(dlmlock);
 		cl_lock_modify(env, lock, descr);
 		cl_lock_signal(env, lock);
@@ -444,12 +445,12 @@
 	struct ldlm_lock *dlmlock;
 
 	dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
-	LASSERT(dlmlock != NULL);
+	LASSERT(dlmlock);
 
 	lock_res_and_lock(dlmlock);
 	spin_lock(&osc_ast_guard);
 	LASSERT(dlmlock->l_ast_data == olck);
-	LASSERT(olck->ols_lock == NULL);
+	LASSERT(!olck->ols_lock);
 	olck->ols_lock = dlmlock;
 	spin_unlock(&osc_ast_guard);
 
@@ -470,7 +471,8 @@
 	olck->ols_hold = 1;
 
 	/* lock reference taken by ldlm_handle2lock_long() is owned by
-	 * osc_lock and released in osc_lock_detach() */
+	 * osc_lock and released in osc_lock_detach()
+	 */
 	lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
 	olck->ols_has_ref = 1;
 }
@@ -508,10 +510,10 @@
 			struct ldlm_lock *dlmlock;
 
 			dlmlock = ldlm_handle2lock(&olck->ols_handle);
-			if (dlmlock != NULL) {
+			if (dlmlock) {
 				lock_res_and_lock(dlmlock);
 				spin_lock(&osc_ast_guard);
-				LASSERT(olck->ols_lock == NULL);
+				LASSERT(!olck->ols_lock);
 				dlmlock->l_ast_data = NULL;
 				olck->ols_handle.cookie = 0ULL;
 				spin_unlock(&osc_ast_guard);
@@ -548,7 +550,8 @@
 			/* For AGL case, the RPC sponsor may exits the cl_lock
 			*  processing without wait() called before related OSC
 			*  lock upcall(). So update the lock status according
-			*  to the enqueue result inside AGL upcall(). */
+			*  to the enqueue result inside AGL upcall().
+			*/
 			if (olck->ols_agl) {
 				lock->cll_flags |= CLF_FROM_UPCALL;
 				cl_wait_try(env, lock);
@@ -571,7 +574,8 @@
 
 		lu_ref_del(&lock->cll_reference, "upcall", lock);
 		/* This maybe the last reference, so must be called after
-		 * cl_lock_mutex_put(). */
+		 * cl_lock_mutex_put().
+		 */
 		cl_lock_put(env, lock);
 
 		cl_env_nested_put(&nest, env);
@@ -634,7 +638,7 @@
 
 	cancel = 0;
 	olck = osc_ast_data_get(dlmlock);
-	if (olck != NULL) {
+	if (olck) {
 		lock = olck->ols_cl.cls_lock;
 		cl_lock_mutex_get(env, lock);
 		LINVRNT(osc_lock_invariant(olck));
@@ -786,17 +790,17 @@
 	env = cl_env_nested_get(&nest);
 	if (!IS_ERR(env)) {
 		olck = osc_ast_data_get(dlmlock);
-		if (olck != NULL) {
+		if (olck) {
 			lock = olck->ols_cl.cls_lock;
 			cl_lock_mutex_get(env, lock);
 			/*
 			 * ldlm_handle_cp_callback() copied LVB from request
 			 * to lock->l_lvb_data, store it in osc_lock.
 			 */
-			LASSERT(dlmlock->l_lvb_data != NULL);
+			LASSERT(dlmlock->l_lvb_data);
 			lock_res_and_lock(dlmlock);
 			olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
-			if (olck->ols_lock == NULL) {
+			if (!olck->ols_lock) {
 				/*
 				 * upcall (osc_lock_upcall()) hasn't yet been
 				 * called. Do nothing now, upcall will bind
@@ -850,14 +854,15 @@
 		 * environment.
 		 */
 		olck = osc_ast_data_get(dlmlock);
-		if (olck != NULL) {
+		if (olck) {
 			lock = olck->ols_cl.cls_lock;
 			/* Do not grab the mutex of cl_lock for glimpse.
 			 * See LU-1274 for details.
 			 * BTW, it's okay for cl_lock to be cancelled during
 			 * this period because server can handle this race.
 			 * See ldlm_server_glimpse_ast() for details.
-			 * cl_lock_mutex_get(env, lock); */
+			 * cl_lock_mutex_get(env, lock);
+			 */
 			cap = &req->rq_pill;
 			req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
 			req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
@@ -1017,7 +1022,8 @@
 	LASSERT(cl_lock_is_mutexed(lock));
 
 	/* make it enqueue anyway for glimpse lock, because we actually
-	 * don't need to cancel any conflicting locks. */
+	 * don't need to cancel any conflicting locks.
+	 */
 	if (olck->ols_glimpse)
 		return 0;
 
@@ -1051,7 +1057,8 @@
 		 * imagine that client has PR lock on [0, 1000], and thread T0
 		 * is doing lockless IO in [500, 1500] region. Concurrent
 		 * thread T1 can see lockless data in [500, 1000], which is
-		 * wrong, because these data are possibly stale. */
+		 * wrong, because these data are possibly stale.
+		 */
 		if (!lockless && osc_lock_compatible(olck, scan_ols))
 			continue;
 
@@ -1074,7 +1081,7 @@
 		} else {
 			CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
 			       lock, conflict);
-			LASSERT(lock->cll_conflict == NULL);
+			LASSERT(!lock->cll_conflict);
 			lu_ref_add(&conflict->cll_reference, "cancel-wait",
 				   lock);
 			lock->cll_conflict = conflict;
@@ -1123,7 +1130,8 @@
 			struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
 
 			/* lock will be passed as upcall cookie,
-			 * hold ref to prevent to be released. */
+			 * hold ref to prevent to be released.
+			 */
 			cl_lock_hold_add(env, lock, "upcall", lock);
 			/* a user for lock also */
 			cl_lock_user_add(env, lock);
@@ -1174,7 +1182,8 @@
 		} else if (olck->ols_agl) {
 			if (lock->cll_flags & CLF_FROM_UPCALL)
 				/* It is from enqueue RPC reply upcall for
-				 * updating state. Do not re-enqueue. */
+				 * updating state. Do not re-enqueue.
+				 */
 				return -ENAVAIL;
 			olck->ols_state = OLS_NEW;
 		} else {
@@ -1197,7 +1206,7 @@
 	}
 
 	LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
-		     lock->cll_error == 0, olck->ols_lock != NULL));
+		     lock->cll_error == 0, olck->ols_lock));
 
 	return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
 }
@@ -1235,7 +1244,8 @@
 		LASSERT(lock->cll_state == CLS_INTRANSIT);
 		LASSERT(lock->cll_users > 0);
 		/* set a flag for osc_dlm_blocking_ast0() to signal the
-		 * lock.*/
+		 * lock.
+		 */
 		olck->ols_ast_wait = 1;
 		rc = CLO_WAIT;
 	}
@@ -1306,7 +1316,7 @@
 	LASSERT(cl_lock_is_mutexed(lock));
 	LINVRNT(osc_lock_invariant(olck));
 
-	if (dlmlock != NULL) {
+	if (dlmlock) {
 		int do_cancel;
 
 		discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
@@ -1318,7 +1328,8 @@
 		/* Now that we're the only user of dlm read/write reference,
 		 * mostly the ->l_readers + ->l_writers should be zero.
 		 * However, there is a corner case.
-		 * See bug 18829 for details.*/
+		 * See bug 18829 for details.
+		 */
 		do_cancel = (dlmlock->l_readers == 0 &&
 			     dlmlock->l_writers == 0);
 		dlmlock->l_flags |= LDLM_FL_CBPENDING;
@@ -1382,7 +1393,7 @@
 	if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
 		struct osc_io *oio = osc_env_io(env);
 
-		LASSERT(lock->ols_owner == NULL);
+		LASSERT(!lock->ols_owner);
 		lock->ols_owner = oio;
 	} else if (state != CLS_HELD)
 		lock->ols_owner = NULL;
@@ -1517,7 +1528,8 @@
 		lock->ols_owner = oio;
 
 		/* set the io to be lockless if this lock is for io's
-		 * host object */
+		 * host object
+		 */
 		if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
 			oio->oi_lockless = 1;
 	}
@@ -1556,7 +1568,7 @@
 	int result;
 
 	clk = kmem_cache_alloc(osc_lock_kmem, GFP_NOFS | __GFP_ZERO);
-	if (clk != NULL) {
+	if (clk) {
 		__u32 enqflags = lock->cll_descr.cld_enq_flags;
 
 		osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
@@ -1599,7 +1611,7 @@
 	 * doesn't matter because in the worst case we don't cancel a lock
 	 * which we actually can, that's no harm.
 	 */
-	if (olock != NULL &&
+	if (olock &&
 	    atomic_add_return(_PAGEREF_MAGIC,
 				  &olock->ols_pageref) != _PAGEREF_MAGIC) {
 		atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index fdd6219..60d8230 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -113,7 +113,7 @@
 	LASSERT(list_empty(&osc->oo_write_item));
 	LASSERT(list_empty(&osc->oo_read_item));
 
-	LASSERT(osc->oo_root.rb_node == NULL);
+	LASSERT(!osc->oo_root.rb_node);
 	LASSERT(list_empty(&osc->oo_hp_exts));
 	LASSERT(list_empty(&osc->oo_urgent_exts));
 	LASSERT(list_empty(&osc->oo_rpc_exts));
@@ -256,7 +256,7 @@
 	struct lu_object *obj;
 
 	osc = kmem_cache_alloc(osc_object_kmem, GFP_NOFS | __GFP_ZERO);
-	if (osc != NULL) {
+	if (osc) {
 		obj = osc2lu(osc);
 		lu_object_init(obj, NULL, dev);
 		osc->oo_cl.co_ops = &osc_ops;
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 2439d80..d11582a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -51,111 +51,12 @@
  *  @{
  */
 
-/*
- * Comment out osc_page_protected because it may sleep inside the
- * the client_obd_list_lock.
- * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
- *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
- *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
- */
-#if 0
-static int osc_page_is_dlocked(const struct lu_env *env,
-			       const struct osc_page *opg,
-			       enum cl_lock_mode mode, int pending, int unref)
-{
-	struct cl_page	 *page;
-	struct osc_object      *obj;
-	struct osc_thread_info *info;
-	struct ldlm_res_id     *resname;
-	struct lustre_handle   *lockh;
-	ldlm_policy_data_t     *policy;
-	ldlm_mode_t	     dlmmode;
-	__u64                   flags;
-
-	might_sleep();
-
-	info = osc_env_info(env);
-	resname = &info->oti_resname;
-	policy = &info->oti_policy;
-	lockh = &info->oti_handle;
-	page = opg->ops_cl.cpl_page;
-	obj = cl2osc(opg->ops_cl.cpl_obj);
-
-	flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
-	if (pending)
-		flags |= LDLM_FL_CBPENDING;
-
-	dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
-	osc_lock_build_res(env, obj, resname);
-	osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
-	return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
-			      dlmmode, &flags, NULL, lockh, unref);
-}
-
-/**
- * Checks an invariant that a page in the cache is covered by a lock, as
- * needed.
- */
-static int osc_page_protected(const struct lu_env *env,
-			      const struct osc_page *opg,
-			      enum cl_lock_mode mode, int unref)
-{
-	struct cl_object_header *hdr;
-	struct cl_lock	  *scan;
-	struct cl_page	  *page;
-	struct cl_lock_descr    *descr;
-	int result;
-
-	LINVRNT(!opg->ops_temp);
-
-	page = opg->ops_cl.cpl_page;
-	if (page->cp_owner != NULL &&
-	    cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
-		/*
-		 * If IO is done without locks (liblustre, or lloop), lock is
-		 * not required.
-		 */
-		result = 1;
-	else
-		/* otherwise check for a DLM lock */
-	result = osc_page_is_dlocked(env, opg, mode, 1, unref);
-	if (result == 0) {
-		/* maybe this page is a part of a lockless io? */
-		hdr = cl_object_header(opg->ops_cl.cpl_obj);
-		descr = &osc_env_info(env)->oti_descr;
-		descr->cld_mode = mode;
-		descr->cld_start = page->cp_index;
-		descr->cld_end   = page->cp_index;
-		spin_lock(&hdr->coh_lock_guard);
-		list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
-			/*
-			 * Lock-less sub-lock has to be either in HELD state
-			 * (when io is actively going on), or in CACHED state,
-			 * when top-lock is being unlocked:
-			 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
-			 */
-			if ((scan->cll_state == CLS_HELD ||
-			     scan->cll_state == CLS_CACHED) &&
-			    cl_lock_ext_match(&scan->cll_descr, descr)) {
-				struct osc_lock *olck;
-
-				olck = osc_lock_at(scan);
-				result = osc_lock_is_lockless(olck);
-				break;
-			}
-		}
-		spin_unlock(&hdr->coh_lock_guard);
-	}
-	return result;
-}
-#else
 static int osc_page_protected(const struct lu_env *env,
 			      const struct osc_page *opg,
 			      enum cl_lock_mode mode, int unref)
 {
 	return 1;
 }
-#endif
 
 /*****************************************************************************
  *
@@ -168,7 +69,7 @@
 	struct osc_page *opg = cl2osc_page(slice);
 
 	CDEBUG(D_TRACE, "%p\n", opg);
-	LASSERT(opg->ops_lock == NULL);
+	LASSERT(!opg->ops_lock);
 }
 
 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
@@ -204,7 +105,8 @@
 	struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
 
 	/* ops_lru and ops_inflight share the same field, so take it from LRU
-	 * first and then use it as inflight. */
+	 * first and then use it as inflight.
+	 */
 	osc_lru_del(osc_cli(obj), opg, false);
 
 	spin_lock(&obj->oo_seatbelt);
@@ -232,9 +134,10 @@
 
 	/* for sync write, kernel will wait for this page to be flushed before
 	 * osc_io_end() is called, so release it earlier.
-	 * for mkwrite(), it's known there is no further pages. */
+	 * for mkwrite(), it's known there is no further pages.
+	 */
 	if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
-		if (oio->oi_active != NULL) {
+		if (oio->oi_active) {
 			osc_extent_release(env, oio->oi_active);
 			oio->oi_active = NULL;
 		}
@@ -258,7 +161,7 @@
 	struct osc_lock *olock;
 	int rc;
 
-	LASSERT(opg->ops_lock == NULL);
+	LASSERT(!opg->ops_lock);
 
 	olock = osc_lock_at(lock);
 	if (atomic_inc_return(&olock->ols_pageref) <= 0) {
@@ -278,7 +181,7 @@
 	struct cl_lock *lock = opg->ops_lock;
 	struct osc_lock *olock;
 
-	LASSERT(lock != NULL);
+	LASSERT(lock);
 	olock = osc_lock_at(lock);
 
 	atomic_dec(&olock->ols_pageref);
@@ -296,7 +199,7 @@
 
 	lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
 			       NULL, 1, 0);
-	if (lock != NULL) {
+	if (lock) {
 		if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
 			result = -EBUSY;
 		cl_lock_put(env, lock);
@@ -424,7 +327,7 @@
 	}
 
 	spin_lock(&obj->oo_seatbelt);
-	if (opg->ops_submitter != NULL) {
+	if (opg->ops_submitter) {
 		LASSERT(!list_empty(&opg->ops_inflight));
 		list_del_init(&opg->ops_inflight);
 		opg->ops_submitter = NULL;
@@ -434,8 +337,8 @@
 	osc_lru_del(osc_cli(obj), opg, true);
 }
 
-void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
-		   int from, int to)
+static void osc_page_clip(const struct lu_env *env,
+			  const struct cl_page_slice *slice, int from, int to)
 {
 	struct osc_page *opg = cl2osc_page(slice);
 	struct osc_async_page *oap = &opg->ops_oap;
@@ -458,7 +361,8 @@
 	LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
 
 	/* Check if the transferring against this page
-	 * is completed, or not even queued. */
+	 * is completed, or not even queued.
+	 */
 	if (opg->ops_transfer_pinned)
 		/* FIXME: may not be interrupted.. */
 		rc = osc_cancel_async_page(env, opg);
@@ -522,7 +426,8 @@
 	 * creates temporary pages outside of a lock.
 	 */
 	/* ops_inflight and ops_lru are the same field, but it doesn't
-	 * hurt to initialize it twice :-) */
+	 * hurt to initialize it twice :-)
+	 */
 	INIT_LIST_HEAD(&opg->ops_inflight);
 	INIT_LIST_HEAD(&opg->ops_lru);
 
@@ -581,7 +486,8 @@
 static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
 static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
 /* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and.. */
+ * number of pages to avoid running out of LRU budget, and..
+ */
 static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT);  /* 2M */
 /* free this number at most otherwise it will take too long time to finish. */
 static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
@@ -590,7 +496,8 @@
  * we should free slots aggressively. In this way, slots are freed in a steady
  * step to maintain fairness among OSCs.
  *
- * Return how many LRU pages should be freed. */
+ * Return how many LRU pages should be freed.
+ */
 static int osc_cache_too_much(struct client_obd *cli)
 {
 	struct cl_client_cache *cache = cli->cl_cache;
@@ -602,7 +509,8 @@
 		return min(pages, lru_shrink_max);
 
 	/* if it's going to run out LRU slots, we should free some, but not
-	 * too much to maintain fairness among OSCs. */
+	 * too much to maintain fairness among OSCs.
+	 */
 	if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
 		unsigned long tmp;
 
@@ -630,7 +538,8 @@
 			/* free LRU page only if nobody is using it.
 			 * This check is necessary to avoid freeing the pages
 			 * having already been removed from LRU and pinned
-			 * for IO. */
+			 * for IO.
+			 */
 			if (!cl_page_in_use(page)) {
 				cl_page_unmap(env, io, page);
 				cl_page_discard(env, io, page);
@@ -688,14 +597,14 @@
 			continue;
 		}
 
-		LASSERT(page->cp_obj != NULL);
+		LASSERT(page->cp_obj);
 		if (clobj != page->cp_obj) {
 			struct cl_object *tmp = page->cp_obj;
 
 			cl_object_get(tmp);
 			client_obd_list_unlock(&cli->cl_lru_list_lock);
 
-			if (clobj != NULL) {
+			if (clobj) {
 				count -= discard_pagevec(env, io, pvec, index);
 				index = 0;
 
@@ -720,11 +629,13 @@
 
 		/* move this page to the end of list as it will be discarded
 		 * soon. The page will be finally removed from LRU list in
-		 * osc_page_delete().  */
+		 * osc_page_delete().
+		 */
 		list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
 
 		/* it's okay to grab a refcount here w/o holding lock because
-		 * it has to grab cl_lru_list_lock to delete the page. */
+		 * it has to grab cl_lru_list_lock to delete the page.
+		 */
 		cl_page_get(page);
 		pvec[index++] = page;
 		if (++count >= target)
@@ -740,7 +651,7 @@
 	}
 	client_obd_list_unlock(&cli->cl_lru_list_lock);
 
-	if (clobj != NULL) {
+	if (clobj) {
 		count -= discard_pagevec(env, io, pvec, index);
 
 		cl_io_fini(env, io);
@@ -775,7 +686,8 @@
 }
 
 /* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache. */
+ * reasons: redirtied or deleted from page cache.
+ */
 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
 {
 	if (opg->ops_in_lru) {
@@ -797,7 +709,8 @@
 			 * this osc occupies too many LRU pages and kernel is
 			 * stealing one of them.
 			 * cl_lru_shrinkers is to avoid recursive call in case
-			 * we're already in the context of osc_lru_shrink(). */
+			 * we're already in the context of osc_lru_shrink().
+			 */
 			if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
 			    !memory_pressure_get())
 				osc_lru_shrink(cli, osc_cache_too_much(cli));
@@ -819,7 +732,7 @@
 	int max_scans;
 	int rc;
 
-	LASSERT(cache != NULL);
+	LASSERT(cache);
 
 	rc = osc_lru_shrink(cli, lru_shrink_min);
 	if (rc != 0) {
@@ -834,7 +747,8 @@
 		atomic_read(&cli->cl_lru_busy));
 
 	/* Reclaim LRU slots from other client_obd as it can't free enough
-	 * from its own. This should rarely happen. */
+	 * from its own. This should rarely happen.
+	 */
 	spin_lock(&cache->ccc_lru_lock);
 	LASSERT(!list_empty(&cache->ccc_lru));
 
@@ -875,7 +789,7 @@
 	struct client_obd *cli = osc_cli(obj);
 	int rc = 0;
 
-	if (cli->cl_cache == NULL) /* shall not be in LRU */
+	if (!cli->cl_cache) /* shall not be in LRU */
 		return 0;
 
 	LASSERT(atomic_read(cli->cl_lru_left) >= 0);
@@ -892,7 +806,8 @@
 		cond_resched();
 
 		/* slowest case, all of caching pages are busy, notifying
-		 * other OSCs that we're lack of LRU slots. */
+		 * other OSCs that we're lack of LRU slots.
+		 */
 		atomic_inc(&osc_lru_waiters);
 
 		gen = atomic_read(&cli->cl_lru_in_list);
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index e70e796..208f72b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -13,11 +13,6 @@
  * General Public License version 2 for more details (a copy is included
  * in the LICENSE file that accompanied this code).
  *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
  * GPL HEADER END
  */
 /*
@@ -36,7 +31,7 @@
 	struct osc_quota_info *oqi;
 
 	oqi = kmem_cache_alloc(osc_quota_kmem, GFP_NOFS | __GFP_ZERO);
-	if (oqi != NULL)
+	if (oqi)
 		oqi->oqi_id = id;
 
 	return oqi;
@@ -52,10 +47,12 @@
 		oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
 		if (oqi) {
 			/* do not try to access oqi here, it could have been
-			 * freed by osc_quota_setdq() */
+			 * freed by osc_quota_setdq()
+			 */
 
 			/* the slot is busy, the user is about to run out of
-			 * quota space on this OST */
+			 * quota space on this OST
+			 */
 			CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
 			       type == USRQUOTA ? "user" : "grout", qid[type]);
 			return NO_QUOTA;
@@ -89,12 +86,13 @@
 		oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
 		if ((flags & FL_QUOTA_FLAG(type)) != 0) {
 			/* This ID is getting close to its quota limit, let's
-			 * switch to sync I/O */
-			if (oqi != NULL)
+			 * switch to sync I/O
+			 */
+			if (oqi)
 				continue;
 
 			oqi = osc_oqi_alloc(qid[type]);
-			if (oqi == NULL) {
+			if (!oqi) {
 				rc = -ENOMEM;
 				break;
 			}
@@ -113,8 +111,9 @@
 			       qid[type], rc);
 		} else {
 			/* This ID is now off the hook, let's remove it from
-			 * the hash table */
-			if (oqi == NULL)
+			 * the hash table
+			 */
+			if (!oqi)
 				continue;
 
 			oqi = cfs_hash_del_key(cli->cl_quota_hash[type],
@@ -147,7 +146,7 @@
 	struct osc_quota_info *oqi;
 	u32 uid;
 
-	LASSERT(key != NULL);
+	LASSERT(key);
 	uid = *((u32 *)key);
 	oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
 
@@ -218,7 +217,7 @@
 							   CFS_HASH_MAX_THETA,
 							   &quota_hash_ops,
 							   CFS_HASH_DEFAULT);
-		if (cli->cl_quota_hash[type] == NULL)
+		if (!cli->cl_quota_hash[type])
 			break;
 	}
 
@@ -252,7 +251,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
 					OST_QUOTACTL);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -294,7 +293,7 @@
 	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
 					&RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
 					OST_QUOTACHECK);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -302,8 +301,8 @@
 
 	ptlrpc_request_set_replen(req);
 
-	/* the next poll will find -ENODATA, that means quotacheck is
-	 * going on */
+	/* the next poll will find -ENODATA, that means quotacheck is going on
+	 */
 	cli->cl_qchk_stat = -ENODATA;
 	rc = ptlrpc_queue_wait(req);
 	if (rc)
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 7034f0a..10f262f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -104,7 +104,6 @@
 static void osc_release_ppga(struct brw_page **ppga, u32 count);
 static int brw_interpret(const struct lu_env *env,
 			 struct ptlrpc_request *req, void *data, int rc);
-static int osc_cleanup(struct obd_device *obd);
 
 /* Pack OSC object metadata for disk storage (LE byte order). */
 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
@@ -113,18 +112,18 @@
 	int lmm_size;
 
 	lmm_size = sizeof(**lmmp);
-	if (lmmp == NULL)
+	if (!lmmp)
 		return lmm_size;
 
-	if (*lmmp != NULL && lsm == NULL) {
+	if (*lmmp && !lsm) {
 		kfree(*lmmp);
 		*lmmp = NULL;
 		return 0;
-	} else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) {
+	} else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) {
 		return -EBADF;
 	}
 
-	if (*lmmp == NULL) {
+	if (!*lmmp) {
 		*lmmp = kzalloc(lmm_size, GFP_NOFS);
 		if (!*lmmp)
 			return -ENOMEM;
@@ -143,7 +142,7 @@
 	int lsm_size;
 	struct obd_import *imp = class_exp2cliimp(exp);
 
-	if (lmm != NULL) {
+	if (lmm) {
 		if (lmm_bytes < sizeof(*lmm)) {
 			CERROR("%s: lov_mds_md too small: %d, need %d\n",
 			       exp->exp_obd->obd_name, lmm_bytes,
@@ -160,23 +159,23 @@
 	}
 
 	lsm_size = lov_stripe_md_size(1);
-	if (lsmp == NULL)
+	if (!lsmp)
 		return lsm_size;
 
-	if (*lsmp != NULL && lmm == NULL) {
+	if (*lsmp && !lmm) {
 		kfree((*lsmp)->lsm_oinfo[0]);
 		kfree(*lsmp);
 		*lsmp = NULL;
 		return 0;
 	}
 
-	if (*lsmp == NULL) {
+	if (!*lsmp) {
 		*lsmp = kzalloc(lsm_size, GFP_NOFS);
-		if (unlikely(*lsmp == NULL))
+		if (unlikely(!*lsmp))
 			return -ENOMEM;
 		(*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
 						GFP_NOFS);
-		if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
+		if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
 			kfree(*lsmp);
 			return -ENOMEM;
 		}
@@ -185,11 +184,11 @@
 		return -EBADF;
 	}
 
-	if (lmm != NULL)
+	if (lmm)
 		/* XXX zero *lsmp? */
 		ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
 
-	if (imp != NULL &&
+	if (imp &&
 	    (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
 		(*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
 	else
@@ -246,7 +245,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
@@ -276,7 +275,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
@@ -294,7 +293,7 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -321,7 +320,7 @@
 	LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
@@ -339,7 +338,7 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -362,7 +361,7 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -384,7 +383,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
@@ -451,7 +450,7 @@
 	}
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -482,7 +481,7 @@
 		goto out_req;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EPROTO;
 		goto out_req;
 	}
@@ -500,7 +499,7 @@
 	lsm->lsm_oi = oa->o_oi;
 	*ea = lsm;
 
-	if (oti != NULL) {
+	if (oti) {
 		oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
 
 		if (oa->o_valid & OBD_MD_FLCOOKIE) {
@@ -530,7 +529,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
@@ -573,7 +572,7 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body == NULL) {
+	if (!body) {
 		CERROR("can't unpack ost_body\n");
 		rc = -EPROTO;
 		goto out;
@@ -595,7 +594,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
@@ -629,10 +628,11 @@
 
 /* Find and cancel locally locks matched by @mode in the resource found by
  * @objid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list. */
+ * locks added to @cancels list.
+ */
 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
 				   struct list_head *cancels,
-				   ldlm_mode_t mode, __u64 lock_flags)
+				   enum ldlm_mode mode, __u64 lock_flags)
 {
 	struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
 	struct ldlm_res_id res_id;
@@ -644,13 +644,14 @@
 	 *
 	 * This distinguishes from a case when ELC is not supported originally,
 	 * when we still want to cancel locks in advance and just cancel them
-	 * locally, without sending any RPC. */
+	 * locally, without sending any RPC.
+	 */
 	if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
 		return 0;
 
 	ostid_build_res_name(&oa->o_oi, &res_id);
 	res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
-	if (res == NULL)
+	if (!res)
 		return 0;
 
 	LDLM_RESOURCE_ADDREF(res);
@@ -723,7 +724,8 @@
  * If the client dies, or the OST is down when the object should be destroyed,
  * the records are not cancelled, and when the OST reconnects to the MDS next,
  * it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions. */
+ * cookies to the MDS after committing destroy transactions.
+ */
 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
 		       struct obdo *oa, struct lov_stripe_md *ea,
 		       struct obd_trans_info *oti, struct obd_export *md_export)
@@ -743,7 +745,7 @@
 					LDLM_FL_DISCARD_DATA);
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
-	if (req == NULL) {
+	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
@@ -758,7 +760,7 @@
 	req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
 	ptlrpc_at_set_req_timeout(req);
 
-	if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
+	if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
 		oa->o_lcookie = *oti->oti_logcookies;
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	LASSERT(body);
@@ -769,7 +771,8 @@
 	/* If osc_destroy is for destroying the unlink orphan,
 	 * sent from MDT to OST, which should not be blocked here,
 	 * because the process might be triggered by ptlrpcd, and
-	 * it is not good to block ptlrpcd thread (b=16006)*/
+	 * it is not good to block ptlrpcd thread (b=16006
+	 **/
 	if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
 		req->rq_interpret_reply = osc_destroy_interpret;
 		if (!osc_can_send_destroy(cli)) {
@@ -810,7 +813,8 @@
 			    (long)(obd_max_dirty_pages + 1))) {
 		/* The atomic_read() allowing the atomic_inc() are
 		 * not covered by a lock thus they may safely race and trip
-		 * this CERROR() unless we add in a small fudge factor (+1). */
+		 * this CERROR() unless we add in a small fudge factor (+1).
+		 */
 		CERROR("dirty %d - %d > system dirty_max %d\n",
 		       atomic_read(&obd_dirty_pages),
 		       atomic_read(&obd_dirty_transit_pages),
@@ -839,7 +843,7 @@
 {
 	cli->cl_next_shrink_grant =
 		cfs_time_shift(cli->cl_grant_shrink_interval);
-	CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
+	CDEBUG(D_CACHE, "next time %ld to shrink grant\n",
 	       cli->cl_next_shrink_grant);
 }
 
@@ -900,7 +904,8 @@
 /* Shrink the current grant, either from some large amount to enough for a
  * full set of in-flight RPCs, or if we have already shrunk to that limit
  * then to enough for a single RPC.  This avoids keeping more grant than
- * needed, and avoids shrinking the grant piecemeal. */
+ * needed, and avoids shrinking the grant piecemeal.
+ */
 static int osc_shrink_grant(struct client_obd *cli)
 {
 	__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
@@ -922,7 +927,8 @@
 	client_obd_list_lock(&cli->cl_loi_list_lock);
 	/* Don't shrink if we are already above or below the desired limit
 	 * We don't want to shrink below a single RPC, as that will negatively
-	 * impact block allocation and long-term performance. */
+	 * impact block allocation and long-term performance.
+	 */
 	if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
 		target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
 
@@ -970,7 +976,8 @@
 	if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
 		/* Get the current RPC size directly, instead of going via:
 		 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
-		 * Keep comment here so that it can be found by searching. */
+		 * Keep comment here so that it can be found by searching.
+		 */
 		int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
 
 		if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
@@ -1007,7 +1014,7 @@
 			client->cl_import->imp_obd->obd_name, rc);
 		return rc;
 	}
-	CDEBUG(D_CACHE, "add grant client %s \n",
+	CDEBUG(D_CACHE, "add grant client %s\n",
 	       client->cl_import->imp_obd->obd_name);
 	osc_update_next_shrink(client);
 	return 0;
@@ -1040,7 +1047,8 @@
 		      cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
 		      ocd->ocd_grant, cli->cl_dirty);
 		/* workaround for servers which do not have the patch from
-		 * LU-2679 */
+		 * LU-2679
+		 */
 		cli->cl_avail_grant = ocd->ocd_grant;
 	}
 
@@ -1060,7 +1068,8 @@
 /* We assume that the reason this OSC got a short read is because it read
  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
  * via the LOV, and it _knows_ it's reading inside the file, it's just that
- * this stripe never got written at or beyond this stripe offset yet. */
+ * this stripe never got written at or beyond this stripe offset yet.
+ */
 static void handle_short_read(int nob_read, u32 page_count,
 			      struct brw_page **pga)
 {
@@ -1106,7 +1115,7 @@
 	remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
 						  sizeof(*remote_rcs) *
 						  niocount);
-	if (remote_rcs == NULL) {
+	if (!remote_rcs) {
 		CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
 		return -EPROTO;
 	}
@@ -1139,7 +1148,8 @@
 				  OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
 
 		/* warn if we try to combine flags that we don't know to be
-		 * safe to combine */
+		 * safe to combine
+		 */
 		if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
 			CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
 			      p1->flag, p2->flag);
@@ -1152,7 +1162,7 @@
 
 static u32 osc_checksum_bulk(int nob, u32 pg_count,
 			     struct brw_page **pga, int opc,
-			     cksum_type_t cksum_type)
+			     enum cksum_type cksum_type)
 {
 	__u32 cksum;
 	int i = 0;
@@ -1174,7 +1184,8 @@
 		int count = pga[i]->count > nob ? nob : pga[i]->count;
 
 		/* corrupt the data before we compute the checksum, to
-		 * simulate an OST->client data error */
+		 * simulate an OST->client data error
+		 */
 		if (i == 0 && opc == OST_READ &&
 		    OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
 			unsigned char *ptr = kmap(pga[i]->pg);
@@ -1205,7 +1216,8 @@
 		cfs_crypto_hash_final(hdesc, NULL, NULL);
 
 	/* For sending we only compute the wrong checksum instead
-	 * of corrupting the data so it is still correct on a redo */
+	 * of corrupting the data so it is still correct on a redo
+	 */
 	if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
 		cksum++;
 
@@ -1244,7 +1256,7 @@
 		opc = OST_READ;
 		req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
 	}
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	for (niocount = i = 1; i < page_count; i++) {
@@ -1266,7 +1278,8 @@
 	req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
 	ptlrpc_at_set_req_timeout(req);
 	/* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
-	 * retry logic */
+	 * retry logic
+	 */
 	req->rq_no_retry_einprogress = 1;
 
 	desc = ptlrpc_prep_bulk_imp(req, page_count,
@@ -1274,7 +1287,7 @@
 		opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
 		OST_BULK_PORTAL);
 
-	if (desc == NULL) {
+	if (!desc) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1283,7 +1296,7 @@
 	body = req_capsule_client_get(pill, &RMF_OST_BODY);
 	ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
 	niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
-	LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
+	LASSERT(body && ioobj && niobuf);
 
 	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 
@@ -1293,7 +1306,8 @@
 	 * that might be send for this request.  The actual number is decided
 	 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
 	 * "max - 1" for old client compatibility sending "0", and also so the
-	 * the actual maximum is a power-of-two number, not one less. LU-1431 */
+	 * the actual maximum is a power-of-two number, not one less. LU-1431
+	 */
 	ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
 	LASSERT(page_count > 0);
 	pg_prev = pga[0];
@@ -1355,8 +1369,9 @@
 		if (cli->cl_checksum &&
 		    !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
 			/* store cl_cksum_type in a local variable since
-			 * it can be changed via lprocfs */
-			cksum_type_t cksum_type = cli->cl_cksum_type;
+			 * it can be changed via lprocfs
+			 */
+			enum cksum_type cksum_type = cli->cl_cksum_type;
 
 			if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
 				oa->o_flags &= OBD_FL_LOCAL_MASK;
@@ -1375,7 +1390,8 @@
 			oa->o_flags |= cksum_type_pack(cksum_type);
 		} else {
 			/* clear out the checksum flag, in case this is a
-			 * resend but cl_checksum is no longer set. b=11238 */
+			 * resend but cl_checksum is no longer set. b=11238
+			 */
 			oa->o_valid &= ~OBD_MD_FLCKSUM;
 		}
 		oa->o_cksum = body->oa.o_cksum;
@@ -1415,11 +1431,11 @@
 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
 				__u32 client_cksum, __u32 server_cksum, int nob,
 				u32 page_count, struct brw_page **pga,
-				cksum_type_t client_cksum_type)
+				enum cksum_type client_cksum_type)
 {
 	__u32 new_cksum;
 	char *msg;
-	cksum_type_t cksum_type;
+	enum cksum_type cksum_type;
 
 	if (server_cksum == client_cksum) {
 		CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
@@ -1472,9 +1488,9 @@
 		return rc;
 	}
 
-	LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
+	LASSERTF(req->rq_repmsg, "rc = %d\n", rc);
 	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body == NULL) {
+	if (!body) {
 		DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
 		return -EPROTO;
 	}
@@ -1550,7 +1566,7 @@
 		__u32 server_cksum = body->oa.o_cksum;
 		char *via = "";
 		char *router = "";
-		cksum_type_t cksum_type;
+		enum cksum_type cksum_type;
 
 		cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
 					       body->oa.o_flags : 0);
@@ -1627,7 +1643,7 @@
 		return rc;
 
 	list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
-		if (oap->oap_request != NULL) {
+		if (oap->oap_request) {
 			LASSERTF(request == oap->oap_request,
 				 "request %p != oap_request %p\n",
 				 request, oap->oap_request);
@@ -1638,12 +1654,14 @@
 		}
 	}
 	/* New request takes over pga and oaps from old request.
-	 * Note that copying a list_head doesn't work, need to move it... */
+	 * Note that copying a list_head doesn't work, need to move it...
+	 */
 	aa->aa_resends++;
 	new_req->rq_interpret_reply = request->rq_interpret_reply;
 	new_req->rq_async_args = request->rq_async_args;
 	/* cap resend delay to the current request timeout, this is similar to
-	 * what ptlrpc does (see after_reply()) */
+	 * what ptlrpc does (see after_reply())
+	 */
 	if (aa->aa_resends > new_req->rq_timeout)
 		new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
 	else
@@ -1669,7 +1687,8 @@
 	/* XXX: This code will run into problem if we're going to support
 	 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
 	 * and wait for all of them to be finished. We should inherit request
-	 * set from old request. */
+	 * set from old request.
+	 */
 	ptlrpcd_add_req(new_req);
 
 	DEBUG_REQ(D_INFO, new_req, "new request");
@@ -1709,7 +1728,7 @@
 
 static void osc_release_ppga(struct brw_page **ppga, u32 count)
 {
-	LASSERT(ppga != NULL);
+	LASSERT(ppga);
 	kfree(ppga);
 }
 
@@ -1725,7 +1744,8 @@
 	rc = osc_brw_fini_request(req, rc);
 	CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
 	/* When server return -EINPROGRESS, client should always retry
-	 * regardless of the number of times the bulk was resent already. */
+	 * regardless of the number of times the bulk was resent already.
+	 */
 	if (osc_recoverable_error(rc)) {
 		if (req->rq_import_generation !=
 		    req->rq_import->imp_generation) {
@@ -1748,7 +1768,7 @@
 	}
 
 	list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
-		if (obj == NULL && rc == 0) {
+		if (!obj && rc == 0) {
 			obj = osc2cl(ext->oe_obj);
 			cl_object_get(obj);
 		}
@@ -1759,7 +1779,7 @@
 	LASSERT(list_empty(&aa->aa_exts));
 	LASSERT(list_empty(&aa->aa_oaps));
 
-	if (obj != NULL) {
+	if (obj) {
 		struct obdo *oa = aa->aa_oa;
 		struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
 		unsigned long valid = 0;
@@ -1798,7 +1818,8 @@
 	client_obd_list_lock(&cli->cl_loi_list_lock);
 	/* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
 	 * is called so we know whether to go to sync BRWs or wait for more
-	 * RPCs to complete */
+	 * RPCs to complete
+	 */
 	if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
 		cli->cl_w_in_flight--;
 	else
@@ -1871,13 +1892,13 @@
 	}
 
 	pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
-	if (pga == NULL) {
+	if (!pga) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
-	if (oa == NULL) {
+	if (!oa) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1886,7 +1907,7 @@
 	list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
 		struct cl_page *page = oap2cl_page(oap);
 
-		if (clerq == NULL) {
+		if (!clerq) {
 			clerq = cl_req_alloc(env, page, crt,
 					     1 /* only 1-object rpcs for now */);
 			if (IS_ERR(clerq)) {
@@ -1907,7 +1928,7 @@
 	}
 
 	/* always get the data for the obdo for the rpc */
-	LASSERT(clerq != NULL);
+	LASSERT(clerq);
 	crattr->cra_oa = oa;
 	cl_req_attr_set(env, clerq, crattr, ~0ULL);
 	if (lock) {
@@ -1938,7 +1959,8 @@
 	 * we race with setattr (locally or in queue at OST).  If OST gets
 	 * later setattr before earlier BRW (as determined by the request xid),
 	 * the OST will not use BRW timestamps.  Sadly, there is no obvious
-	 * way to do this in a single call.  bug 10150 */
+	 * way to do this in a single call.  bug 10150
+	 */
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	crattr->cra_oa = &body->oa;
 	cl_req_attr_set(env, clerq, crattr,
@@ -1955,11 +1977,12 @@
 	aa->aa_clerq = clerq;
 
 	/* queued sync pages can be torn down while the pages
-	 * were between the pending list and the rpc */
+	 * were between the pending list and the rpc
+	 */
 	tmp = NULL;
 	list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
 		/* only one oap gets a request reference */
-		if (tmp == NULL)
+		if (!tmp)
 			tmp = oap;
 		if (oap->oap_interrupted && !req->rq_intr) {
 			CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
@@ -1967,7 +1990,7 @@
 			ptlrpc_mark_interrupted(req);
 		}
 	}
-	if (tmp != NULL)
+	if (tmp)
 		tmp->oap_request = ptlrpc_request_addref(req);
 
 	client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -2001,13 +2024,14 @@
 	kfree(crattr);
 
 	if (rc != 0) {
-		LASSERT(req == NULL);
+		LASSERT(!req);
 
 		if (oa)
 			kmem_cache_free(obdo_cachep, oa);
 		kfree(pga);
 		/* this should happen rarely and is pretty bad, it makes the
-		 * pending list not follow the dirty order */
+		 * pending list not follow the dirty order
+		 */
 		while (!list_empty(ext_list)) {
 			ext = list_entry(ext_list->next, struct osc_extent,
 					     oe_link);
@@ -2026,7 +2050,6 @@
 	void *data = einfo->ei_cbdata;
 	int set = 0;
 
-	LASSERT(lock != NULL);
 	LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
 	LASSERT(lock->l_resource->lr_type == einfo->ei_type);
 	LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
@@ -2035,7 +2058,7 @@
 	lock_res_and_lock(lock);
 	spin_lock(&osc_ast_guard);
 
-	if (lock->l_ast_data == NULL)
+	if (!lock->l_ast_data)
 		lock->l_ast_data = data;
 	if (lock->l_ast_data == data)
 		set = 1;
@@ -2052,7 +2075,7 @@
 	struct ldlm_lock *lock = ldlm_handle2lock(lockh);
 	int set = 0;
 
-	if (lock != NULL) {
+	if (lock) {
 		set = osc_set_lock_data_with_check(lock, einfo);
 		LDLM_LOCK_PUT(lock);
 	} else
@@ -2064,7 +2087,8 @@
 /* find any ldlm lock of the inode in osc
  * return 0    not find
  *	1    find one
- *      < 0    error */
+ *      < 0    error
+ */
 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
 			   ldlm_iterator_t replace, void *data)
 {
@@ -2095,7 +2119,6 @@
 			rep = req_capsule_server_get(&req->rq_pill,
 						     &RMF_DLM_REP);
 
-			LASSERT(rep != NULL);
 			rep->lock_policy_res1 =
 				ptlrpc_status_ntoh(rep->lock_policy_res1);
 			if (rep->lock_policy_res1)
@@ -2127,18 +2150,21 @@
 	__u64 *flags = aa->oa_flags;
 
 	/* Make a local copy of a lock handle and a mode, because aa->oa_*
-	 * might be freed anytime after lock upcall has been called. */
+	 * might be freed anytime after lock upcall has been called.
+	 */
 	lustre_handle_copy(&handle, aa->oa_lockh);
 	mode = aa->oa_ei->ei_mode;
 
 	/* ldlm_cli_enqueue is holding a reference on the lock, so it must
-	 * be valid. */
+	 * be valid.
+	 */
 	lock = ldlm_handle2lock(&handle);
 
 	/* Take an additional reference so that a blocking AST that
 	 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
 	 * to arrive after an upcall has been executed by
-	 * osc_enqueue_fini(). */
+	 * osc_enqueue_fini().
+	 */
 	ldlm_lock_addref(&handle, mode);
 
 	/* Let CP AST to grant the lock first. */
@@ -2170,7 +2196,7 @@
 		 */
 		ldlm_lock_decref(&handle, mode);
 
-	LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
+	LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
 		 aa->oa_lockh, req, aa);
 	ldlm_lock_decref(&handle, mode);
 	LDLM_LOCK_PUT(lock);
@@ -2185,7 +2211,8 @@
  * others may take a considerable amount of time in a case of ost failure; and
  * when other sync requests do not get released lock from a client, the client
  * is excluded from the cluster -- such scenarious make the life difficult, so
- * release locks just after they are obtained. */
+ * release locks just after they are obtained.
+ */
 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
 		     __u64 *flags, ldlm_policy_data_t *policy,
 		     struct ost_lvb *lvb, int kms_valid,
@@ -2198,11 +2225,12 @@
 	struct ptlrpc_request *req = NULL;
 	int intent = *flags & LDLM_FL_HAS_INTENT;
 	__u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
-	ldlm_mode_t mode;
+	enum ldlm_mode mode;
 	int rc;
 
 	/* Filesystem lock extents are extended to page boundaries so that
-	 * dealing with the page cache is a little smoother.  */
+	 * dealing with the page cache is a little smoother.
+	 */
 	policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
 	policy->l_extent.end |= ~CFS_PAGE_MASK;
 
@@ -2226,7 +2254,8 @@
 	 *
 	 * At some point we should cancel the read lock instead of making them
 	 * send us a blocking callback, but there are problems with canceling
-	 * locks out from other users right now, too. */
+	 * locks out from other users right now, too.
+	 */
 	mode = einfo->ei_mode;
 	if (einfo->ei_mode == LCK_PR)
 		mode |= LCK_PW;
@@ -2238,7 +2267,8 @@
 		if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
 			/* For AGL, if enqueue RPC is sent but the lock is not
 			 * granted, then skip to process this strpe.
-			 * Return -ECANCELED to tell the caller. */
+			 * Return -ECANCELED to tell the caller.
+			 */
 			ldlm_lock_decref(lockh, mode);
 			LDLM_LOCK_PUT(matched);
 			return -ECANCELED;
@@ -2247,19 +2277,22 @@
 		if (osc_set_lock_data_with_check(matched, einfo)) {
 			*flags |= LDLM_FL_LVB_READY;
 			/* addref the lock only if not async requests and PW
-			 * lock is matched whereas we asked for PR. */
+			 * lock is matched whereas we asked for PR.
+			 */
 			if (!rqset && einfo->ei_mode != mode)
 				ldlm_lock_addref(lockh, LCK_PR);
 			if (intent) {
 				/* I would like to be able to ASSERT here that
 				 * rss <= kms, but I can't, for reasons which
-				 * are explained in lov_enqueue() */
+				 * are explained in lov_enqueue()
+				 */
 			}
 
 			/* We already have a lock, and it's referenced.
 			 *
 			 * At this point, the cl_lock::cll_state is CLS_QUEUING,
-			 * AGL upcall may change it to CLS_HELD directly. */
+			 * AGL upcall may change it to CLS_HELD directly.
+			 */
 			(*upcall)(cookie, ELDLM_OK);
 
 			if (einfo->ei_mode != mode)
@@ -2281,7 +2314,7 @@
 
 		req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 					   &RQF_LDLM_ENQUEUE_LVB);
-		if (req == NULL)
+		if (!req)
 			return -ENOMEM;
 
 		rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
@@ -2341,27 +2374,29 @@
 {
 	struct obd_device *obd = exp->exp_obd;
 	__u64 lflags = *flags;
-	ldlm_mode_t rc;
+	enum ldlm_mode rc;
 
 	if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
 		return -EIO;
 
 	/* Filesystem lock extents are extended to page boundaries so that
-	 * dealing with the page cache is a little smoother */
+	 * dealing with the page cache is a little smoother
+	 */
 	policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
 	policy->l_extent.end |= ~CFS_PAGE_MASK;
 
 	/* Next, search for already existing extent locks that will cover us */
 	/* If we're trying to read, we also search for an existing PW lock.  The
 	 * VFS and page cache already protect us locally, so lots of readers/
-	 * writers can share a single PW lock. */
+	 * writers can share a single PW lock.
+	 */
 	rc = mode;
 	if (mode == LCK_PR)
 		rc |= LCK_PW;
 	rc = ldlm_lock_match(obd->obd_namespace, lflags,
 			     res_id, type, policy, rc, lockh, unref);
 	if (rc) {
-		if (data != NULL) {
+		if (data) {
 			if (!osc_set_data_with_check(lockh, data)) {
 				if (!(lflags & LDLM_FL_TEST_LOCK))
 					ldlm_lock_decref(lockh, rc);
@@ -2398,8 +2433,9 @@
 		 * due to issues at a higher level (LOV).
 		 * Exit immediately since the caller is
 		 * aware of the problem and takes care
-		 * of the clean up */
-		 return rc;
+		 * of the clean up
+		 */
+		return rc;
 
 	if ((rc == -ENOTCONN || rc == -EAGAIN) &&
 	    (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
@@ -2411,7 +2447,7 @@
 		goto out;
 
 	msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
-	if (msfs == NULL) {
+	if (!msfs) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -2436,9 +2472,10 @@
 	 * extra calls into the filesystem if that isn't necessary (e.g.
 	 * during mount that would help a bit).  Having relative timestamps
 	 * is not so great if request processing is slow, while absolute
-	 * timestamps are not ideal because they need time synchronization. */
+	 * timestamps are not ideal because they need time synchronization.
+	 */
 	req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
@@ -2474,8 +2511,9 @@
 	struct obd_import *imp = NULL;
 	int rc;
 
-	/*Since the request might also come from lprocfs, so we need
-	 *sync this with client_disconnect_export Bug15684*/
+	/* Since the request might also come from lprocfs, so we need
+	 * sync this with client_disconnect_export Bug15684
+	 */
 	down_read(&obd->u.cli.cl_sem);
 	if (obd->u.cli.cl_import)
 		imp = class_import_get(obd->u.cli.cl_import);
@@ -2488,12 +2526,13 @@
 	 * extra calls into the filesystem if that isn't necessary (e.g.
 	 * during mount that would help a bit).  Having relative timestamps
 	 * is not so great if request processing is slow, while absolute
-	 * timestamps are not ideal because they need time synchronization. */
+	 * timestamps are not ideal because they need time synchronization.
+	 */
 	req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
 
 	class_import_put(imp);
 
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
@@ -2516,7 +2555,7 @@
 		goto out;
 
 	msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
-	if (msfs == NULL) {
+	if (!msfs) {
 		rc = -EPROTO;
 		goto out;
 	}
@@ -2534,7 +2573,8 @@
  * the maximum number of OST indices which will fit in the user buffer.
  * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
  */
-static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
+static int osc_getstripe(struct lov_stripe_md *lsm,
+			 struct lov_user_md __user *lump)
 {
 	/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
 	struct lov_user_md_v3 lum, *lumk;
@@ -2545,7 +2585,8 @@
 		return -ENODATA;
 
 	/* we only need the header part from user space to get lmm_magic and
-	 * lmm_stripe_count, (the header part is common to v1 and v3) */
+	 * lmm_stripe_count, (the header part is common to v1 and v3)
+	 */
 	lum_size = sizeof(struct lov_user_md_v1);
 	if (copy_from_user(&lum, lump, lum_size))
 		return -EFAULT;
@@ -2560,7 +2601,8 @@
 	LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
 
 	/* we can use lov_mds_md_size() to compute lum_size
-	 * because lov_user_md_vX and lov_mds_md_vX have the same size */
+	 * because lov_user_md_vX and lov_mds_md_vX have the same size
+	 */
 	if (lum.lmm_stripe_count > 0) {
 		lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
 		lumk = kzalloc(lum_size, GFP_NOFS);
@@ -2591,7 +2633,7 @@
 }
 
 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
-			 void *karg, void *uarg)
+			 void *karg, void __user *uarg)
 {
 	struct obd_device *obd = exp->exp_obd;
 	struct obd_ioctl_data *data = karg;
@@ -2700,7 +2742,7 @@
 
 		req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 					   &RQF_OST_GET_INFO_LAST_ID);
-		if (req == NULL)
+		if (!req)
 			return -ENOMEM;
 
 		req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
@@ -2721,7 +2763,7 @@
 			goto out;
 
 		reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
-		if (reply == NULL) {
+		if (!reply) {
 			rc = -EPROTO;
 			goto out;
 		}
@@ -2735,7 +2777,7 @@
 		struct ldlm_res_id res_id;
 		ldlm_policy_data_t policy;
 		struct lustre_handle lockh;
-		ldlm_mode_t mode = 0;
+		enum ldlm_mode mode = 0;
 		struct ptlrpc_request *req;
 		struct ll_user_fiemap *reply;
 		char *tmp;
@@ -2774,7 +2816,7 @@
 skip_locking:
 		req = ptlrpc_request_alloc(class_exp2cliimp(exp),
 					   &RQF_OST_GET_INFO_FIEMAP);
-		if (req == NULL) {
+		if (!req) {
 			rc = -ENOMEM;
 			goto drop_lock;
 		}
@@ -2803,7 +2845,7 @@
 			goto fini_req;
 
 		reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
-		if (reply == NULL) {
+		if (!reply) {
 			rc = -EPROTO;
 			goto fini_req;
 		}
@@ -2852,7 +2894,7 @@
 	if (KEY_IS(KEY_CACHE_SET)) {
 		struct client_obd *cli = &obd->u.cli;
 
-		LASSERT(cli->cl_cache == NULL); /* only once */
+		LASSERT(!cli->cl_cache); /* only once */
 		cli->cl_cache = val;
 		atomic_inc(&cli->cl_cache->ccc_users);
 		cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
@@ -2880,16 +2922,17 @@
 		return -EINVAL;
 
 	/* We pass all other commands directly to OST. Since nobody calls osc
-	   methods directly and everybody is supposed to go through LOV, we
-	   assume lov checked invalid values for us.
-	   The only recognised values so far are evict_by_nid and mds_conn.
-	   Even if something bad goes through, we'd get a -EINVAL from OST
-	   anyway. */
+	 * methods directly and everybody is supposed to go through LOV, we
+	 * assume lov checked invalid values for us.
+	 * The only recognised values so far are evict_by_nid and mds_conn.
+	 * Even if something bad goes through, we'd get a -EINVAL from OST
+	 * anyway.
+	 */
 
 	req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
 						&RQF_OST_SET_GRANT_INFO :
 						&RQF_OBD_SET_INFO);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
@@ -2928,7 +2971,7 @@
 
 	ptlrpc_request_set_replen(req);
 	if (!KEY_IS(KEY_GRANT_SHRINK)) {
-		LASSERT(set != NULL);
+		LASSERT(set);
 		ptlrpc_set_add_req(set, req);
 		ptlrpc_check_set(NULL, set);
 	} else {
@@ -2946,7 +2989,7 @@
 {
 	struct client_obd *cli = &obd->u.cli;
 
-	if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
+	if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
 		long lost_grant;
 
 		client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -2987,7 +3030,7 @@
 	 * So the osc should be disconnected from the shrink list, after we
 	 * are sure the import has been destroyed. BUG18662
 	 */
-	if (obd->u.cli.cl_import == NULL)
+	if (!obd->u.cli.cl_import)
 		osc_del_shrink_grant(&obd->u.cli);
 	return rc;
 }
@@ -3024,7 +3067,8 @@
 			/* Reset grants */
 			cli = &obd->u.cli;
 			/* all pages go to failing rpcs due to the invalid
-			 * import */
+			 * import
+			 */
 			osc_io_unplug(env, cli, NULL);
 
 			ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
@@ -3206,13 +3250,13 @@
 	return 0;
 }
 
-int osc_cleanup(struct obd_device *obd)
+static int osc_cleanup(struct obd_device *obd)
 {
 	struct client_obd *cli = &obd->u.cli;
 	int rc;
 
 	/* lru cleanup */
-	if (cli->cl_cache != NULL) {
+	if (cli->cl_cache) {
 		LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
 		spin_lock(&cli->cl_cache->ccc_lru_lock);
 		list_del_init(&cli->cl_lru_osc);
@@ -3255,7 +3299,7 @@
 	return osc_process_config_base(obd, buf);
 }
 
-struct obd_ops osc_obd_ops = {
+static struct obd_ops osc_obd_ops = {
 	.owner          = THIS_MODULE,
 	.setup          = osc_setup,
 	.precleanup     = osc_precleanup,
@@ -3298,7 +3342,8 @@
 
 	/* print an address of _any_ initialized kernel symbol from this
 	 * module, to allow debugging with gdb that doesn't support data
-	 * symbols from modules.*/
+	 * symbols from modules.
+	 */
 	CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
 
 	rc = lu_kmem_init(osc_caches);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index efdda09..9b89068 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -145,7 +145,7 @@
 
 	LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
 	desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
-	if (desc == NULL)
+	if (!desc)
 		return NULL;
 
 	desc->bd_import_generation = req->rq_import_generation;
@@ -171,7 +171,7 @@
 			     struct page *page, int pageoffset, int len, int pin)
 {
 	LASSERT(desc->bd_iov_count < desc->bd_max_iov);
-	LASSERT(page != NULL);
+	LASSERT(page);
 	LASSERT(pageoffset >= 0);
 	LASSERT(len > 0);
 	LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
@@ -193,7 +193,6 @@
 {
 	int i;
 
-	LASSERT(desc != NULL);
 	LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
 	LASSERT(desc->bd_md_count == 0);	 /* network hands off */
 	LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
@@ -353,6 +352,7 @@
  * If anything goes wrong just ignore it - same as if it never happened
  */
 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
+	__must_hold(&req->rq_lock)
 {
 	struct ptlrpc_request *early_req;
 	time64_t olddl;
@@ -411,7 +411,7 @@
 	request_cache = kmem_cache_create("ptlrpc_cache",
 					  sizeof(struct ptlrpc_request),
 					  0, SLAB_HWCACHE_ALIGN, NULL);
-	return request_cache == NULL ? -ENOMEM : 0;
+	return !request_cache ? -ENOMEM : 0;
 }
 
 void ptlrpc_request_cache_fini(void)
@@ -441,8 +441,6 @@
 	struct list_head *l, *tmp;
 	struct ptlrpc_request *req;
 
-	LASSERT(pool != NULL);
-
 	spin_lock(&pool->prp_lock);
 	list_for_each_safe(l, tmp, &pool->prp_req_list) {
 		req = list_entry(l, struct ptlrpc_request, rq_list);
@@ -752,7 +750,7 @@
 	struct ptlrpc_request *request;
 
 	request = __ptlrpc_request_alloc(imp, pool);
-	if (request == NULL)
+	if (!request)
 		return NULL;
 
 	req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
@@ -951,10 +949,10 @@
 	atomic_inc(&set->set_remaining);
 	req->rq_queued_time = cfs_time_current();
 
-	if (req->rq_reqmsg != NULL)
+	if (req->rq_reqmsg)
 		lustre_msg_set_jobid(req->rq_reqmsg, NULL);
 
-	if (set->set_producer != NULL)
+	if (set->set_producer)
 		/*
 		 * If the request set has a producer callback, the RPC must be
 		 * sent straight away
@@ -974,7 +972,7 @@
 	struct ptlrpc_request_set *set = pc->pc_set;
 	int count, i;
 
-	LASSERT(req->rq_set == NULL);
+	LASSERT(!req->rq_set);
 	LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
 
 	spin_lock(&set->set_new_req_lock);
@@ -1015,7 +1013,6 @@
 {
 	int delay = 0;
 
-	LASSERT(status != NULL);
 	*status = 0;
 
 	if (req->rq_ctx_init || req->rq_ctx_fini) {
@@ -1078,7 +1075,7 @@
 	__u32 opc;
 	int err;
 
-	LASSERT(req->rq_reqmsg != NULL);
+	LASSERT(req->rq_reqmsg);
 	opc = lustre_msg_get_opc(req->rq_reqmsg);
 
 	/*
@@ -1167,7 +1164,7 @@
 	struct timespec64 work_start;
 	long timediff;
 
-	LASSERT(obd != NULL);
+	LASSERT(obd);
 	/* repbuf must be unlinked */
 	LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
 
@@ -1247,7 +1244,7 @@
 	ktime_get_real_ts64(&work_start);
 	timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
 		   (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
-	if (obd->obd_svc_stats != NULL) {
+	if (obd->obd_svc_stats) {
 		lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
 				    timediff);
 		ptlrpc_lprocfs_rpc_sent(req, timediff);
@@ -1310,7 +1307,7 @@
 			/* version recovery */
 			ptlrpc_save_versions(req);
 			ptlrpc_retain_replayable_request(req, imp);
-		} else if (req->rq_commit_cb != NULL &&
+		} else if (req->rq_commit_cb &&
 			   list_empty(&req->rq_replay_list)) {
 			/*
 			 * NB: don't call rq_commit_cb if it's already on
@@ -1437,7 +1434,7 @@
 {
 	int remaining, rc;
 
-	LASSERT(set->set_producer != NULL);
+	LASSERT(set->set_producer);
 
 	remaining = atomic_read(&set->set_remaining);
 
@@ -1750,7 +1747,7 @@
 			 * process the reply. Similarly if the RPC returned
 			 * an error, and therefore the bulk will never arrive.
 			 */
-			if (req->rq_bulk == NULL || req->rq_status < 0) {
+			if (!req->rq_bulk || req->rq_status < 0) {
 				ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
 				goto interpret;
 			}
@@ -1802,7 +1799,7 @@
 		}
 		ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
 
-		CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
+		CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0,
 		       "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
 		       current_comm(), imp->imp_obd->obd_uuid.uuid,
 		       lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
@@ -1882,8 +1879,8 @@
 		      "timed out for sent delay" : "timed out for slow reply"),
 		  (s64)req->rq_sent, (s64)req->rq_real_sent);
 
-	if (imp != NULL && obd_debug_peer_on_timeout)
-		LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
+	if (imp && obd_debug_peer_on_timeout)
+		LNetDebugPeer(imp->imp_connection->c_peer);
 
 	ptlrpc_unregister_reply(req, async_unlink);
 	ptlrpc_unregister_bulk(req, async_unlink);
@@ -1891,7 +1888,7 @@
 	if (obd_dump_on_timeout)
 		libcfs_debug_dumplog();
 
-	if (imp == NULL) {
+	if (!imp) {
 		DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
 		return 1;
 	}
@@ -1944,8 +1941,6 @@
 	struct list_head *tmp;
 	time64_t now = ktime_get_real_seconds();
 
-	LASSERT(set != NULL);
-
 	/* A timeout expired. See which reqs it applies to...  */
 	list_for_each(tmp, &set->set_requests) {
 		struct ptlrpc_request *req =
@@ -2002,7 +1997,6 @@
 	struct ptlrpc_request_set *set = data;
 	struct list_head *tmp;
 
-	LASSERT(set != NULL);
 	CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
 
 	list_for_each(tmp, &set->set_requests) {
@@ -2174,7 +2168,7 @@
 			rc = req->rq_status;
 	}
 
-	if (set->set_interpret != NULL) {
+	if (set->set_interpret) {
 		int (*interpreter)(struct ptlrpc_request_set *set, void *, int) =
 			set->set_interpret;
 		rc = interpreter(set, set->set_arg, rc);
@@ -2206,10 +2200,10 @@
  */
 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 {
-	if (request == NULL)
+	if (!request)
 		return;
 	LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
-	LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */
+	LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */
 	LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
 	LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
 	LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
@@ -2221,7 +2215,7 @@
 	 * We must take it off the imp_replay_list first.  Otherwise, we'll set
 	 * request->rq_reqmsg to NULL while osc_close is dereferencing it.
 	 */
-	if (request->rq_import != NULL) {
+	if (request->rq_import) {
 		if (!locked)
 			spin_lock(&request->rq_import->imp_lock);
 		list_del_init(&request->rq_replay_list);
@@ -2236,20 +2230,20 @@
 		LBUG();
 	}
 
-	if (request->rq_repbuf != NULL)
+	if (request->rq_repbuf)
 		sptlrpc_cli_free_repbuf(request);
-	if (request->rq_export != NULL) {
+	if (request->rq_export) {
 		class_export_put(request->rq_export);
 		request->rq_export = NULL;
 	}
-	if (request->rq_import != NULL) {
+	if (request->rq_import) {
 		class_import_put(request->rq_import);
 		request->rq_import = NULL;
 	}
-	if (request->rq_bulk != NULL)
+	if (request->rq_bulk)
 		ptlrpc_free_bulk_pin(request->rq_bulk);
 
-	if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
+	if (request->rq_reqbuf || request->rq_clrbuf)
 		sptlrpc_cli_free_reqbuf(request);
 
 	if (request->rq_cli_ctx)
@@ -2269,7 +2263,7 @@
  */
 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
 {
-	if (request == NULL)
+	if (!request)
 		return 1;
 
 	if (request == LP_POISON ||
@@ -2351,7 +2345,7 @@
 	 * a chance to run reply_in_callback(), and to make sure we've
 	 * unlinked before returning a req to the pool.
 	 */
-	if (request->rq_set != NULL)
+	if (request->rq_set)
 		wq = &request->rq_set->set_waitq;
 	else
 		wq = &request->rq_reply_waitq;
@@ -2386,7 +2380,7 @@
 	req->rq_replay = 0;
 	spin_unlock(&req->rq_lock);
 
-	if (req->rq_commit_cb != NULL)
+	if (req->rq_commit_cb)
 		req->rq_commit_cb(req);
 	list_del_init(&req->rq_replay_list);
 
@@ -2427,7 +2421,6 @@
 	struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
 	bool skip_committed_list = true;
 
-	LASSERT(imp != NULL);
 	assert_spin_locked(&imp->imp_lock);
 
 	if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
@@ -2611,11 +2604,11 @@
 	struct ptlrpc_request_set *set;
 	int rc;
 
-	LASSERT(req->rq_set == NULL);
+	LASSERT(!req->rq_set);
 	LASSERT(!req->rq_receiving_reply);
 
 	set = ptlrpc_prep_set();
-	if (set == NULL) {
+	if (!set) {
 		CERROR("Unable to allocate ptlrpc set.");
 		return -ENOMEM;
 	}
@@ -2847,8 +2840,6 @@
 {
 	struct list_head *tmp, *pos;
 
-	LASSERT(set != NULL);
-
 	list_for_each_safe(pos, tmp, &set->set_requests) {
 		struct ptlrpc_request *req =
 			list_entry(pos, struct ptlrpc_request,
@@ -2994,7 +2985,6 @@
 	struct ptlrpc_work_async_args *arg = data;
 
 	LASSERT(ptlrpcd_check_work(req));
-	LASSERT(arg->cb != NULL);
 
 	rc = arg->cb(env, arg->cbdata);
 
@@ -3026,12 +3016,12 @@
 
 	might_sleep();
 
-	if (cb == NULL)
+	if (!cb)
 		return ERR_PTR(-EINVAL);
 
 	/* copy some code from deprecated fakereq. */
 	req = ptlrpc_request_cache_alloc(GFP_NOFS);
-	if (req == NULL) {
+	if (!req) {
 		CERROR("ptlrpc: run out of memory!\n");
 		return ERR_PTR(-ENOMEM);
 	}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index da1f0b1..a14daff 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -72,7 +72,8 @@
 	 * returned and may be compared against out object.
 	 */
 	/* In the function below, .hs_keycmp resolves to
-	 * conn_keycmp() */
+	 * conn_keycmp()
+	 */
 	/* coverity[overrun-buffer-val] */
 	conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
 	if (conn != conn2) {
@@ -172,7 +173,7 @@
 	struct ptlrpc_connection *conn;
 	const lnet_process_id_t *conn_key;
 
-	LASSERT(key != NULL);
+	LASSERT(key);
 	conn_key = key;
 	conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
 
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 9901569..47be21a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -71,7 +71,8 @@
 	if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
 
 		/* Failed send: make it seem like the reply timed out, just
-		 * like failing sends in client.c does currently...  */
+		 * like failing sends in client.c does currently...
+		 */
 
 		req->rq_net_err = 1;
 		ptlrpc_client_wake_req(req);
@@ -95,7 +96,8 @@
 	LASSERT(ev->md.start == req->rq_repbuf);
 	LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
 	/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
-	   for adaptive timeouts' early reply. */
+	 * for adaptive timeouts' early reply.
+	 */
 	LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
 
 	spin_lock(&req->rq_lock);
@@ -151,7 +153,8 @@
 		req->rq_reply_off = ev->offset;
 		req->rq_nob_received = ev->mlength;
 		/* LNetMDUnlink can't be called under the LNET_LOCK,
-		   so we must unlink in ptlrpc_unregister_reply */
+		 * so we must unlink in ptlrpc_unregister_reply
+		 */
 		DEBUG_REQ(D_INFO, req,
 			  "reply in flags=%x mlen=%u offset=%d replen=%d",
 			  lustre_msg_get_flags(req->rq_reqmsg),
@@ -162,7 +165,8 @@
 
 out_wake:
 	/* NB don't unlock till after wakeup; req can disappear under us
-	 * since we don't have our own ref */
+	 * since we don't have our own ref
+	 */
 	ptlrpc_client_wake_req(req);
 	spin_unlock(&req->rq_lock);
 }
@@ -213,7 +217,8 @@
 		desc->bd_failure = 1;
 
 	/* NB don't unlock till after wakeup; desc can disappear under us
-	 * otherwise */
+	 * otherwise
+	 */
 	if (desc->bd_md_count == 0)
 		ptlrpc_client_wake_req(desc->bd_req);
 
@@ -250,7 +255,8 @@
 	__u64 new_seq;
 
 	/* set sequence ID for request and add it to history list,
-	 * it must be called with hold svcpt::scp_lock */
+	 * it must be called with hold svcpt::scp_lock
+	 */
 
 	new_seq = (sec << REQS_SEC_SHIFT) |
 		  (usec << REQS_USEC_SHIFT) |
@@ -258,7 +264,8 @@
 
 	if (new_seq > svcpt->scp_hist_seq) {
 		/* This handles the initial case of scp_hist_seq == 0 or
-		 * we just jumped into a new time window */
+		 * we just jumped into a new time window
+		 */
 		svcpt->scp_hist_seq = new_seq;
 	} else {
 		LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
@@ -266,7 +273,8 @@
 		 * however, it's possible that we used up all bits for
 		 * sequence and jumped into the next usec bucket (future time),
 		 * then we hope there will be less RPCs per bucket at some
-		 * point, and sequence will catch up again */
+		 * point, and sequence will catch up again
+		 */
 		svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
 		new_seq = svcpt->scp_hist_seq;
 	}
@@ -302,7 +310,8 @@
 		 * request buffer we can use the request object embedded in
 		 * rqbd.  Note that if we failed to allocate a request,
 		 * we'd have to re-post the rqbd, which we can't do in this
-		 * context. */
+		 * context.
+		 */
 		req = &rqbd->rqbd_req;
 		memset(req, 0, sizeof(*req));
 	} else {
@@ -312,7 +321,7 @@
 			return;
 		}
 		req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
-		if (req == NULL) {
+		if (!req) {
 			CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
 			       service->srv_name,
 			       libcfs_id2str(ev->initiator));
@@ -322,7 +331,8 @@
 
 	/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
 	 * flags are reset and scalars are zero.  We only set the message
-	 * size to non-zero if this was a successful receive. */
+	 * size to non-zero if this was a successful receive.
+	 */
 	req->rq_xid = ev->match_bits;
 	req->rq_reqbuf = ev->md.start + ev->offset;
 	if (ev->type == LNET_EVENT_PUT && ev->status == 0)
@@ -352,7 +362,8 @@
 		       svcpt->scp_nrqbds_posted);
 
 		/* Normally, don't complain about 0 buffers posted; LNET won't
-		 * drop incoming reqs since we set the portal lazy */
+		 * drop incoming reqs since we set the portal lazy
+		 */
 		if (test_req_buffer_pressure &&
 		    ev->type != LNET_EVENT_UNLINK &&
 		    svcpt->scp_nrqbds_posted == 0)
@@ -369,7 +380,8 @@
 	svcpt->scp_nreqs_incoming++;
 
 	/* NB everything can disappear under us once the request
-	 * has been queued and we unlock, so do the wake now... */
+	 * has been queued and we unlock, so do the wake now...
+	 */
 	wake_up(&svcpt->scp_waitq);
 
 	spin_unlock(&svcpt->scp_lock);
@@ -390,7 +402,8 @@
 
 	if (!rs->rs_difficult) {
 		/* 'Easy' replies have no further processing so I drop the
-		 * net's ref on 'rs' */
+		 * net's ref on 'rs'
+		 */
 		LASSERT(ev->unlinked);
 		ptlrpc_rs_decref(rs);
 		return;
@@ -400,7 +413,8 @@
 
 	if (ev->unlinked) {
 		/* Last network callback. The net's ref on 'rs' stays put
-		 * until ptlrpc_handle_rs() is done with it */
+		 * until ptlrpc_handle_rs() is done with it
+		 */
 		spin_lock(&svcpt->scp_rep_lock);
 		spin_lock(&rs->rs_lock);
 
@@ -438,15 +452,12 @@
 	__u32 best_order = 0;
 	int count = 0;
 	int rc = -ENOENT;
-	int portals_compatibility;
 	int dist;
 	__u32 order;
 	lnet_nid_t dst_nid;
 	lnet_nid_t src_nid;
 
-	portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
-
-	peer->pid = LUSTRE_SRV_LNET_PID;
+	peer->pid = LNET_PID_LUSTRE;
 
 	/* Choose the matching UUID that's closest */
 	while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
@@ -466,14 +477,6 @@
 			best_dist = dist;
 			best_order = order;
 
-			if (portals_compatibility > 1) {
-				/* Strong portals compatibility: Zero the nid's
-				 * NET, so if I'm reading new config logs, or
-				 * getting configured by (new) lconf I can
-				 * still talk to old servers. */
-				dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
-				src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
-			}
 			peer->nid = dst_nid;
 			*self = src_nid;
 			rc = 0;
@@ -494,7 +497,8 @@
 	/* Wait for the event queue to become idle since there may still be
 	 * messages in flight with pending events (i.e. the fire-and-forget
 	 * messages == client requests and "non-difficult" server
-	 * replies */
+	 * replies
+	 */
 
 	for (retries = 0;; retries++) {
 		rc = LNetEQFree(ptlrpc_eq_h);
@@ -524,7 +528,7 @@
 {
 	lnet_pid_t pid;
 
-	pid = LUSTRE_SRV_LNET_PID;
+	pid = LNET_PID_LUSTRE;
 	return pid;
 }
 
@@ -544,11 +548,13 @@
 	}
 
 	/* CAVEAT EMPTOR: how we process portals events is _radically_
-	 * different depending on... */
+	 * different depending on...
+	 */
 	/* kernel LNet calls our master callback when there are new event,
 	 * because we are guaranteed to get every event via callback,
 	 * so we just set EQ size to 0 to avoid overhead of serializing
-	 * enqueue/dequeue operations in LNet. */
+	 * enqueue/dequeue operations in LNet.
+	 */
 	rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
 	if (rc == 0)
 		return 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index f752c78..70fcac1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -112,7 +112,8 @@
  * CLOSED. I would rather refcount the import and free it after
  * disconnection like we do with exports. To do that, the client_obd
  * will need to save the peer info somewhere other than in the import,
- * though. */
+ * though.
+ */
 int ptlrpc_init_import(struct obd_import *imp)
 {
 	spin_lock(&imp->imp_lock);
@@ -282,11 +283,13 @@
 	/* Wait forever until inflight == 0. We really can't do it another
 	 * way because in some cases we need to wait for very long reply
 	 * unlink. We can't do anything before that because there is really
-	 * no guarantee that some rdma transfer is not in progress right now. */
+	 * no guarantee that some rdma transfer is not in progress right now.
+	 */
 	do {
 		/* Calculate max timeout for waiting on rpcs to error
 		 * out. Use obd_timeout if calculated value is smaller
-		 * than it. */
+		 * than it.
+		 */
 		if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
 			timeout = ptlrpc_inflight_timeout(imp);
 			timeout += timeout / 3;
@@ -304,7 +307,8 @@
 
 		/* Wait for all requests to error out and call completion
 		 * callbacks. Cap it at obd_timeout -- these should all
-		 * have been locally cancelled by ptlrpc_abort_inflight. */
+		 * have been locally cancelled by ptlrpc_abort_inflight.
+		 */
 		lwi = LWI_TIMEOUT_INTERVAL(
 			cfs_timeout_cap(cfs_time_seconds(timeout)),
 			(timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
@@ -328,13 +332,15 @@
 				 * maybe waiting for long reply unlink in
 				 * sluggish nets). Let's check this. If there
 				 * is no inflight and unregistering != 0, this
-				 * is bug. */
+				 * is bug.
+				 */
 				LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
 					 count);
 
 				/* Let's save one loop as soon as inflight have
 				 * dropped to zero. No new inflights possible at
-				 * this point. */
+				 * this point.
+				 */
 				rc = 0;
 			} else {
 				list_for_each_safe(tmp, n,
@@ -501,7 +507,8 @@
 		       conn->oic_last_attempt);
 
 		/* If we have not tried this connection since
-		   the last successful attempt, go with this one */
+		 * the last successful attempt, go with this one
+		 */
 		if ((conn->oic_last_attempt == 0) ||
 		    cfs_time_beforeq_64(conn->oic_last_attempt,
 				       imp->imp_last_success_conn)) {
@@ -511,8 +518,9 @@
 		}
 
 		/* If all of the connections have already been tried
-		   since the last successful connection; just choose the
-		   least recently used */
+		 * since the last successful connection; just choose the
+		 * least recently used
+		 */
 		if (!imp_conn)
 			imp_conn = conn;
 		else if (cfs_time_before_64(conn->oic_last_attempt,
@@ -529,10 +537,11 @@
 	LASSERT(imp_conn->oic_conn);
 
 	/* If we've tried everything, and we're back to the beginning of the
-	   list, increase our timeout and try again. It will be reset when
-	   we do finally connect. (FIXME: really we should wait for all network
-	   state associated with the last connection attempt to drain before
-	   trying to reconnect on it.) */
+	 * list, increase our timeout and try again. It will be reset when
+	 * we do finally connect. (FIXME: really we should wait for all network
+	 * state associated with the last connection attempt to drain before
+	 * trying to reconnect on it.)
+	 */
 	if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
 		struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
 
@@ -553,7 +562,6 @@
 	imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
 
 	dlmexp = class_conn2export(&imp->imp_dlm_handle);
-	LASSERT(dlmexp != NULL);
 	ptlrpc_connection_put(dlmexp->exp_connection);
 	dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
 	class_export_put(dlmexp);
@@ -590,7 +598,8 @@
 	struct list_head *tmp;
 
 	/* The requests in committed_list always have smaller transnos than
-	 * the requests in replay_list */
+	 * the requests in replay_list
+	 */
 	if (!list_empty(&imp->imp_committed_list)) {
 		tmp = imp->imp_committed_list.next;
 		req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
@@ -674,7 +683,8 @@
 		goto out;
 
 	/* Reset connect flags to the originally requested flags, in case
-	 * the server is updated on-the-fly we will get the new features. */
+	 * the server is updated on-the-fly we will get the new features.
+	 */
 	imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
 	/* Reset ocd_version each time so the server knows the exact versions */
 	imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
@@ -687,7 +697,7 @@
 		goto out;
 
 	request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
-	if (request == NULL) {
+	if (!request) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -700,7 +710,8 @@
 	}
 
 	/* Report the rpc service time to the server so that it knows how long
-	 * to wait for clients to join recovery */
+	 * to wait for clients to join recovery
+	 */
 	lustre_msg_set_service_time(request->rq_reqmsg,
 				    at_timeout2est(request->rq_timeout));
 
@@ -708,7 +719,8 @@
 	 * import_select_connection will increase the net latency on
 	 * repeated reconnect attempts to cover slow networks.
 	 * We override/ignore the server rpc completion estimate here,
-	 * which may be large if this is a reconnect attempt */
+	 * which may be large if this is a reconnect attempt
+	 */
 	request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
 	lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
 
@@ -799,7 +811,8 @@
 
 	if (rc) {
 		/* if this reconnect to busy export - not need select new target
-		 * for connecting*/
+		 * for connecting
+		 */
 		imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
 		spin_unlock(&imp->imp_lock);
 		ptlrpc_maybe_ping_import_soon(imp);
@@ -817,7 +830,7 @@
 	ocd = req_capsule_server_sized_get(&request->rq_pill,
 					   &RMF_CONNECT_DATA, ret);
 
-	if (ocd == NULL) {
+	if (!ocd) {
 		CERROR("%s: no connect data from server\n",
 		       imp->imp_obd->obd_name);
 		rc = -EPROTO;
@@ -851,7 +864,8 @@
 
 	if (!exp) {
 		/* This could happen if export is cleaned during the
-		   connect attempt */
+		 * connect attempt
+		 */
 		CERROR("%s: missing export after connect\n",
 		       imp->imp_obd->obd_name);
 		rc = -ENODEV;
@@ -877,14 +891,16 @@
 		}
 
 		/* if applies, adjust the imp->imp_msg_magic here
-		 * according to reply flags */
+		 * according to reply flags
+		 */
 
 		imp->imp_remote_handle =
 				*lustre_msg_get_handle(request->rq_repmsg);
 
 		/* Initial connects are allowed for clients with non-random
 		 * uuids when servers are in recovery.  Simply signal the
-		 * servers replay is complete and wait in REPLAY_WAIT. */
+		 * servers replay is complete and wait in REPLAY_WAIT.
+		 */
 		if (msg_flags & MSG_CONNECT_RECOVERING) {
 			CDEBUG(D_HA, "connect to %s during recovery\n",
 			       obd2cli_tgt(imp->imp_obd));
@@ -923,7 +939,8 @@
 			 * already erased all of our state because of previous
 			 * eviction. If it is in recovery - we are safe to
 			 * participate since we can reestablish all of our state
-			 * with server again */
+			 * with server again
+			 */
 			if ((msg_flags & MSG_CONNECT_RECOVERING)) {
 				CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n",
 				       obd2cli_tgt(imp->imp_obd),
@@ -1039,7 +1056,8 @@
 		     ocd->ocd_version < LUSTRE_VERSION_CODE -
 					LUSTRE_VERSION_OFFSET_WARN)) {
 			/* Sigh, some compilers do not like #ifdef in the middle
-			   of macro arguments */
+			 * of macro arguments
+			 */
 			const char *older = "older. Consider upgrading server or downgrading client"
 				;
 			const char *newer = "newer than client version. Consider upgrading client"
@@ -1061,7 +1079,8 @@
 		 * fixup is version-limited, because we don't want to carry the
 		 * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
 		 * need interop with unpatched 2.2 servers.  For newer servers,
-		 * the client will do MNE swabbing only as needed.  LU-1644 */
+		 * the client will do MNE swabbing only as needed.  LU-1644
+		 */
 		if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
 			     !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
 			     OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
@@ -1079,7 +1098,8 @@
 		if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
 			/* We sent to the server ocd_cksum_types with bits set
 			 * for algorithms we understand. The server masked off
-			 * the checksum types it doesn't support */
+			 * the checksum types it doesn't support
+			 */
 			if ((ocd->ocd_cksum_types &
 			     cksum_types_supported_client()) == 0) {
 				LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
@@ -1093,7 +1113,8 @@
 			}
 		} else {
 			/* The server does not support OBD_CONNECT_CKSUM.
-			 * Enforce ADLER for backward compatibility*/
+			 * Enforce ADLER for backward compatibility
+			 */
 			cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
 		}
 		cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
@@ -1109,7 +1130,8 @@
 		/* Reset ns_connect_flags only for initial connect. It might be
 		 * changed in while using FS and if we reset it in reconnect
 		 * this leads to losing user settings done before such as
-		 * disable lru_resize, etc. */
+		 * disable lru_resize, etc.
+		 */
 		if (old_connect_flags != exp_connect_flags(exp) ||
 		    aa->pcaa_initial_connect) {
 			CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
@@ -1123,13 +1145,14 @@
 		if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
 		    (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
 			/* We need a per-message support flag, because
-			   a. we don't know if the incoming connect reply
-			      supports AT or not (in reply_in_callback)
-			      until we unpack it.
-			   b. failovered server means export and flags are gone
-			      (in ptlrpc_send_reply).
-			   Can only be set when we know AT is supported at
-			   both ends */
+			 * a. we don't know if the incoming connect reply
+			 *    supports AT or not (in reply_in_callback)
+			 *    until we unpack it.
+			 * b. failovered server means export and flags are gone
+			 *    (in ptlrpc_send_reply).
+			 * Can only be set when we know AT is supported at
+			 * both ends
+			 */
 			imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
 		else
 			imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
@@ -1162,7 +1185,7 @@
 			struct obd_connect_data *ocd;
 
 			/* reply message might not be ready */
-			if (request->rq_repmsg == NULL)
+			if (!request->rq_repmsg)
 				return -EPROTO;
 
 			ocd = req_capsule_server_get(&request->rq_pill,
@@ -1243,7 +1266,7 @@
 
 	req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
 					OBD_PING);
-	if (req == NULL) {
+	if (!req) {
 		atomic_dec(&imp->imp_replay_inflight);
 		return -ENOMEM;
 	}
@@ -1339,7 +1362,8 @@
 		/* bug 17802:  XXX client_disconnect_export vs connect request
 		 * race. if client will evicted at this time, we start
 		 * invalidate thread without reference to import and import can
-		 * be freed at same time. */
+		 * be freed at same time.
+		 */
 		class_import_get(imp);
 		task = kthread_run(ptlrpc_invalidate_import_thread, imp,
 				     "ll_imp_inval");
@@ -1471,11 +1495,13 @@
 	if (req) {
 		/* We are disconnecting, do not retry a failed DISCONNECT rpc if
 		 * it fails.  We can get through the above with a down server
-		 * if the client doesn't know the server is gone yet. */
+		 * if the client doesn't know the server is gone yet.
+		 */
 		req->rq_no_resend = 1;
 
 		/* We want client umounts to happen quickly, no matter the
-		   server state... */
+		 * server state...
+		 */
 		req->rq_timeout = min_t(int, req->rq_timeout,
 					INITIAL_CONNECT_TIMEOUT);
 
@@ -1507,9 +1533,10 @@
 extern unsigned int at_min, at_max, at_history;
 
 /* Bin into timeslices using AT_BINS bins.
-   This gives us a max of the last binlimit*AT_BINS secs without the storage,
-   but still smoothing out a return to normalcy from a slow response.
-   (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
+ * This gives us a max of the last binlimit*AT_BINS secs without the storage,
+ * but still smoothing out a return to normalcy from a slow response.
+ * (E.g. remember the maximum latency in each minute of the last 4 minutes.)
+ */
 int at_measured(struct adaptive_timeout *at, unsigned int val)
 {
 	unsigned int old = at->at_current;
@@ -1523,7 +1550,8 @@
 
 	if (val == 0)
 		/* 0's don't count, because we never want our timeout to
-		   drop to 0, and because 0 could mean an error */
+		 * drop to 0, and because 0 could mean an error
+		 */
 		return 0;
 
 	spin_lock(&at->at_lock);
@@ -1565,7 +1593,8 @@
 
 	if (at->at_flags & AT_FLG_NOHIST)
 		/* Only keep last reported val; keeping the rest of the history
-		   for proc only */
+		 * for debugfs only
+		 */
 		at->at_current = val;
 
 	if (at_max > 0)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index c0e613c..0e60264 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -118,25 +118,6 @@
 	&RMF_OBD_QUOTACTL
 };
 
-static const struct req_msg_field *quota_body_only[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *ldlm_intent_quota_client[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_DLM_REQ,
-	&RMF_LDLM_INTENT,
-	&RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *ldlm_intent_quota_server[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_DLM_REP,
-	&RMF_DLM_LVB,
-	&RMF_QUOTA_BODY
-};
-
 static const struct req_msg_field *mdt_close_client[] = {
 	&RMF_PTLRPC_BODY,
 	&RMF_MDT_EPOCH,
@@ -514,16 +495,6 @@
 	&RMF_CAPA2
 };
 
-static const struct req_msg_field *mds_update_client[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_UPDATE,
-};
-
-static const struct req_msg_field *mds_update_server[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_UPDATE_REPLY,
-};
-
 static const struct req_msg_field *llog_origin_handle_create_client[] = {
 	&RMF_PTLRPC_BODY,
 	&RMF_LLOGD_BODY,
@@ -551,16 +522,6 @@
 	&RMF_EADATA
 };
 
-static const struct req_msg_field *obd_idx_read_client[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_IDX_INFO
-};
-
-static const struct req_msg_field *obd_idx_read_server[] = {
-	&RMF_PTLRPC_BODY,
-	&RMF_IDX_INFO
-};
-
 static const struct req_msg_field *ost_body_only[] = {
 	&RMF_PTLRPC_BODY,
 	&RMF_OST_BODY
@@ -676,7 +637,6 @@
 static struct req_format *req_formats[] = {
 	&RQF_OBD_PING,
 	&RQF_OBD_SET_INFO,
-	&RQF_OBD_IDX_READ,
 	&RQF_SEC_CTX,
 	&RQF_MGS_TARGET_REG,
 	&RQF_MGS_SET_INFO,
@@ -721,7 +681,6 @@
 	&RQF_MDS_HSM_ACTION,
 	&RQF_MDS_HSM_REQUEST,
 	&RQF_MDS_SWAP_LAYOUTS,
-	&RQF_UPDATE_OBJ,
 	&RQF_QC_CALLBACK,
 	&RQF_OST_CONNECT,
 	&RQF_OST_DISCONNECT,
@@ -759,8 +718,6 @@
 	&RQF_LDLM_INTENT_CREATE,
 	&RQF_LDLM_INTENT_UNLINK,
 	&RQF_LDLM_INTENT_GETXATTR,
-	&RQF_LDLM_INTENT_QUOTA,
-	&RQF_QUOTA_DQACQ,
 	&RQF_LOG_CANCEL,
 	&RQF_LLOG_ORIGIN_HANDLE_CREATE,
 	&RQF_LLOG_ORIGIN_HANDLE_DESTROY,
@@ -899,11 +856,6 @@
 		    lustre_swab_obd_quotactl, NULL);
 EXPORT_SYMBOL(RMF_OBD_QUOTACTL);
 
-struct req_msg_field RMF_QUOTA_BODY =
-	DEFINE_MSGF("quota_body", 0,
-		    sizeof(struct quota_body), lustre_swab_quota_body, NULL);
-EXPORT_SYMBOL(RMF_QUOTA_BODY);
-
 struct req_msg_field RMF_MDT_EPOCH =
 	DEFINE_MSGF("mdt_ioepoch", 0,
 		    sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL);
@@ -1105,10 +1057,6 @@
 	DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL);
 EXPORT_SYMBOL(RMF_FIEMAP_VAL);
 
-struct req_msg_field RMF_IDX_INFO =
-	DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info),
-		    lustre_swab_idx_info, NULL);
-EXPORT_SYMBOL(RMF_IDX_INFO);
 struct req_msg_field RMF_HSM_USER_STATE =
 	DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state),
 		    lustre_swab_hsm_user_state, NULL);
@@ -1145,15 +1093,6 @@
 		    lustre_swab_hsm_request, NULL);
 EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST);
 
-struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1,
-					      lustre_swab_update_buf, NULL);
-EXPORT_SYMBOL(RMF_UPDATE);
-
-struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1,
-						lustre_swab_update_reply_buf,
-						    NULL);
-EXPORT_SYMBOL(RMF_UPDATE_REPLY);
-
 struct req_msg_field RMF_SWAP_LAYOUTS =
 	DEFINE_MSGF("swap_layouts", 0, sizeof(struct  mdc_swap_layouts),
 		    lustre_swab_swap_layouts, NULL);
@@ -1196,12 +1135,6 @@
 	DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty);
 EXPORT_SYMBOL(RQF_OBD_SET_INFO);
 
-/* Read index file through the network */
-struct req_format RQF_OBD_IDX_READ =
-	DEFINE_REQ_FMT0("OBD_IDX_READ",
-			obd_idx_read_client, obd_idx_read_server);
-EXPORT_SYMBOL(RQF_OBD_IDX_READ);
-
 struct req_format RQF_SEC_CTX =
 	DEFINE_REQ_FMT0("SEC_CTX", empty, empty);
 EXPORT_SYMBOL(RQF_SEC_CTX);
@@ -1253,16 +1186,6 @@
 	DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
 EXPORT_SYMBOL(RQF_QC_CALLBACK);
 
-struct req_format RQF_QUOTA_DQACQ =
-	DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only);
-EXPORT_SYMBOL(RQF_QUOTA_DQACQ);
-
-struct req_format RQF_LDLM_INTENT_QUOTA =
-	DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA",
-			ldlm_intent_quota_client,
-			ldlm_intent_quota_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA);
-
 struct req_format RQF_MDS_GETSTATUS =
 	DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
 EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
@@ -1357,11 +1280,6 @@
 			mds_getinfo_server);
 EXPORT_SYMBOL(RQF_MDS_GET_INFO);
 
-struct req_format RQF_UPDATE_OBJ =
-	DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client,
-			mds_update_server);
-EXPORT_SYMBOL(RQF_UPDATE_OBJ);
-
 struct req_format RQF_LDLM_ENQUEUE =
 	DEFINE_REQ_FMT0("LDLM_ENQUEUE",
 			ldlm_enqueue_client, ldlm_enqueue_lvb_server);
@@ -1712,7 +1630,7 @@
 	 * high-priority RPC queue getting peeked at before ost_handle()
 	 * handles an OST RPC.
 	 */
-	if (req != NULL && pill == &req->rq_pill && req->rq_pill_init)
+	if (req && pill == &req->rq_pill && req->rq_pill_init)
 		return;
 
 	memset(pill, 0, sizeof(*pill));
@@ -1720,7 +1638,7 @@
 	pill->rc_loc = location;
 	req_capsule_init_area(pill);
 
-	if (req != NULL && pill == &req->rq_pill)
+	if (req && pill == &req->rq_pill)
 		req->rq_pill_init = 1;
 }
 EXPORT_SYMBOL(req_capsule_init);
@@ -1752,7 +1670,7 @@
  */
 void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt)
 {
-	LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt);
+	LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt);
 	LASSERT(__req_format_is_sane(fmt));
 
 	pill->rc_fmt = fmt;
@@ -1773,8 +1691,6 @@
 	const struct req_format *fmt = pill->rc_fmt;
 	int i;
 
-	LASSERT(fmt != NULL);
-
 	for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
 		if (pill->rc_area[loc][i] == -1) {
 			pill->rc_area[loc][i] =
@@ -1810,7 +1726,7 @@
 
 	LASSERT(pill->rc_loc == RCL_SERVER);
 	fmt = pill->rc_fmt;
-	LASSERT(fmt != NULL);
+	LASSERT(fmt);
 
 	count = req_capsule_filled_sizes(pill, RCL_SERVER);
 	rc = lustre_pack_reply(pill->rc_req, count,
@@ -1865,7 +1781,7 @@
 	swabber = swabber ?: field->rmf_swabber;
 
 	if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) &&
-	    swabber != NULL && value != NULL)
+	    swabber && value)
 		do_swab = 1;
 	else
 		do_swab = 0;
@@ -1947,17 +1863,15 @@
 		[RCL_SERVER] = "server"
 	};
 
-	LASSERT(pill != NULL);
-	LASSERT(pill != LP_POISON);
 	fmt = pill->rc_fmt;
-	LASSERT(fmt != NULL);
+	LASSERT(fmt);
 	LASSERT(fmt != LP_POISON);
 	LASSERT(__req_format_is_sane(fmt));
 
 	offset = __req_capsule_offset(pill, field, loc);
 
 	msg = __req_msg(pill, loc);
-	LASSERT(msg != NULL);
+	LASSERT(msg);
 
 	getter = (field->rmf_flags & RMF_F_STRING) ?
 		(typeof(getter))lustre_msg_string : lustre_msg_buf;
@@ -1980,7 +1894,7 @@
 	}
 	value = getter(msg, offset, len);
 
-	if (value == NULL) {
+	if (!value) {
 		DEBUG_REQ(D_ERROR, pill->rc_req,
 			  "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n",
 			  field->rmf_name, offset, lustre_msg_bufcount(msg),
@@ -2209,7 +2123,7 @@
 
 	const struct req_format *old;
 
-	LASSERT(pill->rc_fmt != NULL);
+	LASSERT(pill->rc_fmt);
 	LASSERT(__req_format_is_sane(fmt));
 
 	old = pill->rc_fmt;
@@ -2222,7 +2136,7 @@
 			const struct req_msg_field *ofield = FMT_FIELD(old, i, j);
 
 			/* "opaque" fields can be transmogrified */
-			if (ofield->rmf_swabber == NULL &&
+			if (!ofield->rmf_swabber &&
 			    (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 &&
 			    (ofield->rmf_size == -1 ||
 			    ofield->rmf_flags == RMF_F_NO_SIZE_CHECK))
@@ -2289,7 +2203,7 @@
 	int offset;
 
 	fmt = pill->rc_fmt;
-	LASSERT(fmt != NULL);
+	LASSERT(fmt);
 	LASSERT(__req_format_is_sane(fmt));
 	LASSERT(req_capsule_has_field(pill, field, loc));
 	LASSERT(req_capsule_field_present(pill, field, loc));
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index e877020..a23ac5f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -75,7 +75,8 @@
 } while (0)
 
 /* This is a callback from the llog_* functions.
- * Assumes caller has already pushed us into the kernel context. */
+ * Assumes caller has already pushed us into the kernel context.
+ */
 static int llog_client_open(const struct lu_env *env,
 			    struct llog_handle *lgh, struct llog_logid *logid,
 			    char *name, enum llog_open_param open_param)
@@ -93,7 +94,7 @@
 	LASSERT(lgh);
 
 	req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -130,7 +131,7 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -158,7 +159,7 @@
 	req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
 					LUSTRE_LOG_VERSION,
 					LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto err_exit;
 	}
@@ -179,14 +180,14 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EFAULT;
 		goto out;
 	}
 
 	/* The log records are swabbed as they are processed */
 	ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
-	if (ptr == NULL) {
+	if (!ptr) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -216,7 +217,7 @@
 	req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
 					LUSTRE_LOG_VERSION,
 					LLOG_ORIGIN_HANDLE_PREV_BLOCK);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto err_exit;
 	}
@@ -236,13 +237,13 @@
 		goto out;
 
 	body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
-	if (body == NULL) {
+	if (!body) {
 		rc = -EFAULT;
 		goto out;
 	}
 
 	ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
-	if (ptr == NULL) {
+	if (!ptr) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -269,7 +270,7 @@
 	req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
 					LUSTRE_LOG_VERSION,
 					LLOG_ORIGIN_HANDLE_READ_HEADER);
-	if (req == NULL) {
+	if (!req) {
 		rc = -ENOMEM;
 		goto err_exit;
 	}
@@ -285,7 +286,7 @@
 		goto out;
 
 	hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
-	if (hdr == NULL) {
+	if (!hdr) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -316,8 +317,9 @@
 			     struct llog_handle *handle)
 {
 	/* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because
-	   the servers all close the file at the end of every
-	   other LLOG_ RPC. */
+	 *  the servers all close the file at the end of every
+	 * other LLOG_ RPC.
+	 */
 	return 0;
 }
 
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index dac66f5..fbccb62 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -58,7 +58,7 @@
 
 	LASSERT(ctxt);
 	new_imp = ctxt->loc_obd->u.cli.cl_import;
-	LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
+	LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp,
 		 "%p - %p\n", ctxt->loc_imp, new_imp);
 	mutex_lock(&ctxt->loc_mutex);
 	if (ctxt->loc_imp != new_imp) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cc55b79..fcaa289 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -131,7 +131,6 @@
 	{ SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
 	{ SEC_CTX_FINI,     "sec_ctx_fini" },
 	{ FLD_QUERY,	"fld_query" },
-	{ UPDATE_OBJ,	    "update_obj" },
 };
 
 static struct ll_eopcode {
@@ -192,15 +191,15 @@
 	unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
 					  LPROCFS_CNTR_STDDEV;
 
-	LASSERT(*debugfs_root_ret == NULL);
-	LASSERT(*stats_ret == NULL);
+	LASSERT(!*debugfs_root_ret);
+	LASSERT(!*stats_ret);
 
 	svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
 					0);
-	if (svc_stats == NULL)
+	if (!svc_stats)
 		return;
 
-	if (dir != NULL) {
+	if (dir) {
 		svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL);
 		if (IS_ERR(svc_debugfs_entry)) {
 			lprocfs_free_stats(&svc_stats);
@@ -246,11 +245,11 @@
 
 	rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats);
 	if (rc < 0) {
-		if (dir != NULL)
+		if (dir)
 			ldebugfs_remove(&svc_debugfs_entry);
 		lprocfs_free_stats(&svc_stats);
 	} else {
-		if (dir != NULL)
+		if (dir)
 			*debugfs_root_ret = svc_debugfs_entry;
 		*stats_ret = svc_stats;
 	}
@@ -307,7 +306,8 @@
 
 	/* This sanity check is more of an insanity check; we can still
 	 * hose a kernel by allowing the request history to grow too
-	 * far. */
+	 * far.
+	 */
 	bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	if (val > totalram_pages / (2 * bufpages))
 		return -ERANGE;
@@ -456,8 +456,6 @@
 static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
 				struct ptlrpc_nrs_pol_info *info)
 {
-	LASSERT(policy != NULL);
-	LASSERT(info != NULL);
 	assert_spin_locked(&policy->pol_nrs->nrs_lock);
 
 	memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
@@ -508,7 +506,7 @@
 	spin_unlock(&nrs->nrs_lock);
 
 	infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS);
-	if (infos == NULL) {
+	if (!infos) {
 		rc = -ENOMEM;
 		goto unlock;
 	}
@@ -676,7 +674,7 @@
 	/**
 	 * No [reg|hp] token has been specified
 	 */
-	if (cmd == NULL)
+	if (!cmd)
 		goto default_queue;
 
 	/**
@@ -733,15 +731,15 @@
 	struct list_head *e;
 	struct ptlrpc_request *req;
 
-	if (srhi->srhi_req != NULL &&
-	    srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
+	if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
 	    srhi->srhi_seq <= seq) {
 		/* If srhi_req was set previously, hasn't been culled and
 		 * we're searching for a seq on or after it (i.e. more
 		 * recent), search from it onwards.
 		 * Since the service history is LRU (i.e. culled reqs will
 		 * be near the head), we shouldn't have to do long
-		 * re-scans */
+		 * re-scans
+		 */
 		LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
 			 "%s:%d: seek seq %llu, request seq %llu\n",
 			 svcpt->scp_service->srv_name, svcpt->scp_cpt,
@@ -919,7 +917,8 @@
 		 * here.  The request could contain any old crap, so you
 		 * must be just as careful as the service's request
 		 * parser. Currently I only print stuff here I know is OK
-		 * to look at coz it was set up in request_in_callback()!!! */
+		 * to look at coz it was set up in request_in_callback()!!!
+		 */
 		seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
 			   req->rq_history_seq, nidstr,
 			   libcfs_id2str(req->rq_peer), req->rq_xid,
@@ -927,7 +926,7 @@
 			   (s64)req->rq_arrival_time.tv_sec,
 			   (long)(req->rq_sent - req->rq_arrival_time.tv_sec),
 			   (long)(req->rq_sent - req->rq_deadline));
-		if (svc->srv_ops.so_req_printer == NULL)
+		if (!svc->srv_ops.so_req_printer)
 			seq_putc(s, '\n');
 		else
 			svc->srv_ops.so_req_printer(s, srhi->srhi_req);
@@ -1103,7 +1102,7 @@
 				 "stats", &svc->srv_debugfs_entry,
 				 &svc->srv_stats);
 
-	if (svc->srv_debugfs_entry == NULL)
+	if (IS_ERR_OR_NULL(svc->srv_debugfs_entry))
 		return;
 
 	ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL);
@@ -1129,7 +1128,7 @@
 	int opc = opcode_offset(op);
 
 	svc_stats = req->rq_import->imp_obd->obd_svc_stats;
-	if (svc_stats == NULL || opc <= 0)
+	if (!svc_stats || opc <= 0)
 		return;
 	LASSERT(opc < LUSTRE_MAX_OPCODES);
 	if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
@@ -1166,7 +1165,7 @@
 
 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
 {
-	if (svc->srv_debugfs_entry != NULL)
+	if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry))
 		ldebugfs_remove(&svc->srv_debugfs_entry);
 
 	if (svc->srv_stats)
@@ -1198,7 +1197,7 @@
 
 	req = ptlrpc_prep_ping(obd->u.cli.cl_import);
 	up_read(&obd->u.cli.cl_sem);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req->rq_send_state = LUSTRE_IMP_FULL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index c5d7ff5..c5bbf7b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -56,7 +56,6 @@
 	lnet_md_t md;
 
 	LASSERT(portal != 0);
-	LASSERT(conn != NULL);
 	CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
 	md.start = base;
 	md.length = len;
@@ -88,7 +87,8 @@
 		int rc2;
 		/* We're going to get an UNLINK event when I unlink below,
 		 * which will complete just like any other failed send, so
-		 * I fall through and return success here! */
+		 * I fall through and return success here!
+		 */
 		CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
 		       libcfs_id2str(conn->c_peer), portal, xid, rc);
 		rc2 = LNetMDUnlink(*mdh);
@@ -130,7 +130,7 @@
 	LASSERT(desc->bd_md_count == 0);
 	LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
 	LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
-	LASSERT(desc->bd_req != NULL);
+	LASSERT(desc->bd_req);
 	LASSERT(desc->bd_type == BULK_PUT_SINK ||
 		desc->bd_type == BULK_GET_SOURCE);
 
@@ -153,7 +153,8 @@
 	 * using the same RDMA match bits after an error.
 	 *
 	 * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
-	 * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
+	 * first bulk XID is power-of-two aligned before rq_xid. LU-1431
+	 */
 	xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
 	LASSERTF(!(desc->bd_registered &&
 		   req->rq_send_state != LUSTRE_IMP_REPLAY) ||
@@ -209,7 +210,8 @@
 	}
 
 	/* Set rq_xid to matchbits of the final bulk so that server can
-	 * infer the number of bulks that were prepared */
+	 * infer the number of bulks that were prepared
+	 */
 	req->rq_xid = --xid;
 	LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
 		 "bd_last_xid = x%llu, rq_xid = x%llu\n",
@@ -260,7 +262,8 @@
 	/* the unlink ensures the callback happens ASAP and is the last
 	 * one.  If it fails, it must be because completion just happened,
 	 * but we must still l_wait_event() in this case to give liblustre
-	 * a chance to run client_bulk_callback() */
+	 * a chance to run client_bulk_callback()
+	 */
 	mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
 
 	if (ptlrpc_client_bulk_active(req) == 0)	/* completed or */
@@ -273,14 +276,15 @@
 	if (async)
 		return 0;
 
-	if (req->rq_set != NULL)
+	if (req->rq_set)
 		wq = &req->rq_set->set_waitq;
 	else
 		wq = &req->rq_reply_waitq;
 
 	for (;;) {
 		/* Network access will complete in finite time but the HUGE
-		 * timeout lets us CWARN for visibility of sluggish NALs */
+		 * timeout lets us CWARN for visibility of sluggish LNDs
+		 */
 		lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
 					   cfs_time_seconds(1), NULL, NULL);
 		rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
@@ -305,13 +309,13 @@
 				 req->rq_arrival_time.tv_sec, 1);
 
 	if (!(flags & PTLRPC_REPLY_EARLY) &&
-	    (req->rq_type != PTL_RPC_MSG_ERR) &&
-	    (req->rq_reqmsg != NULL) &&
+	    (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg &&
 	    !(lustre_msg_get_flags(req->rq_reqmsg) &
 	      (MSG_RESENT | MSG_REPLAY |
 	       MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
 		/* early replies, errors and recovery requests don't count
-		 * toward our service time estimate */
+		 * toward our service time estimate
+		 */
 		int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
 
 		if (oldse != 0) {
@@ -325,7 +329,8 @@
 	lustre_msg_set_service_time(req->rq_repmsg, service_time);
 	/* Report service time estimate for future client reqs, but report 0
 	 * (to be ignored by client) if it's a error reply during recovery.
-	 * (bz15815) */
+	 * (bz15815)
+	 */
 	if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export)
 		lustre_msg_set_timeout(req->rq_repmsg, 0);
 	else
@@ -360,10 +365,10 @@
 	 * target_queue_final_reply().
 	 */
 	LASSERT(req->rq_no_reply == 0);
-	LASSERT(req->rq_reqbuf != NULL);
-	LASSERT(rs != NULL);
+	LASSERT(req->rq_reqbuf);
+	LASSERT(rs);
 	LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
-	LASSERT(req->rq_repmsg != NULL);
+	LASSERT(req->rq_repmsg);
 	LASSERT(req->rq_repmsg == rs->rs_msg);
 	LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
 	LASSERT(rs->rs_cb_id.cbid_arg == rs);
@@ -403,12 +408,12 @@
 
 	ptlrpc_at_set_reply(req, flags);
 
-	if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
+	if (!req->rq_export || !req->rq_export->exp_connection)
 		conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
 	else
 		conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
 
-	if (unlikely(conn == NULL)) {
+	if (unlikely(!conn)) {
 		CERROR("not replying on NULL connection\n"); /* bug 9635 */
 		return -ENOTCONN;
 	}
@@ -498,12 +503,13 @@
 	LASSERT(request->rq_wait_ctx == 0);
 
 	/* If this is a re-transmit, we're required to have disengaged
-	 * cleanly from the previous attempt */
+	 * cleanly from the previous attempt
+	 */
 	LASSERT(!request->rq_receiving_reply);
 	LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
 		(request->rq_import->imp_state == LUSTRE_IMP_FULL)));
 
-	if (unlikely(obd != NULL && obd->obd_fail)) {
+	if (unlikely(obd && obd->obd_fail)) {
 		CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
 			obd->obd_name);
 		/* this prevents us from waiting in ptlrpc_queue_wait */
@@ -535,7 +541,7 @@
 		goto out;
 
 	/* bulk register should be done after wrap_request() */
-	if (request->rq_bulk != NULL) {
+	if (request->rq_bulk) {
 		rc = ptlrpc_register_bulk(request);
 		if (rc != 0)
 			goto out;
@@ -543,14 +549,15 @@
 
 	if (!noreply) {
 		LASSERT(request->rq_replen != 0);
-		if (request->rq_repbuf == NULL) {
-			LASSERT(request->rq_repdata == NULL);
-			LASSERT(request->rq_repmsg == NULL);
+		if (!request->rq_repbuf) {
+			LASSERT(!request->rq_repdata);
+			LASSERT(!request->rq_repmsg);
 			rc = sptlrpc_cli_alloc_repbuf(request,
 						      request->rq_replen);
 			if (rc) {
 				/* this prevents us from looping in
-				 * ptlrpc_queue_wait */
+				 * ptlrpc_queue_wait
+				 */
 				spin_lock(&request->rq_lock);
 				request->rq_err = 1;
 				spin_unlock(&request->rq_lock);
@@ -602,7 +609,8 @@
 		reply_md.eq_handle = ptlrpc_eq_h;
 
 		/* We must see the unlink callback to unset rq_reply_unlink,
-		   so we can't auto-unlink */
+		 * so we can't auto-unlink
+		 */
 		rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
 				  &request->rq_reply_md_h);
 		if (rc != 0) {
@@ -623,7 +631,7 @@
 
 	/* add references on request for request_out_callback */
 	ptlrpc_request_addref(request);
-	if (obd != NULL && obd->obd_svc_stats != NULL)
+	if (obd && obd->obd_svc_stats)
 		lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
 			atomic_read(&request->rq_import->imp_inflight));
 
@@ -632,7 +640,8 @@
 	ktime_get_real_ts64(&request->rq_arrival_time);
 	request->rq_sent = ktime_get_real_seconds();
 	/* We give the server rq_timeout secs to process the req, and
-	   add the network latency for our local timeout. */
+	 * add the network latency for our local timeout.
+	 */
 	request->rq_deadline = request->rq_sent + request->rq_timeout +
 		ptlrpc_at_get_net_latency(request);
 
@@ -656,7 +665,8 @@
  cleanup_me:
 	/* MEUnlink is safe; the PUT didn't even get off the ground, and
 	 * nobody apart from the PUT's target has the right nid+XID to
-	 * access the reply buffer. */
+	 * access the reply buffer.
+	 */
 	rc2 = LNetMEUnlink(reply_me_h);
 	LASSERT(rc2 == 0);
 	/* UNLINKED callback called synchronously */
@@ -664,7 +674,8 @@
 
  cleanup_bulk:
 	/* We do sync unlink here as there was no real transfer here so
-	 * the chance to have long unlink to sluggish net is smaller here. */
+	 * the chance to have long unlink to sluggish net is smaller here.
+	 */
 	ptlrpc_unregister_bulk(request, 0);
  out:
 	if (request->rq_memalloc)
@@ -692,7 +703,8 @@
 
 	/* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
 	 * which means buffer can only be attached on local CPT, and LND
-	 * threads can find it by grabbing a local lock */
+	 * threads can find it by grabbing a local lock
+	 */
 	rc = LNetMEAttach(service->srv_req_portal,
 			  match_id, 0, ~0, LNET_UNLINK,
 			  rqbd->rqbd_svcpt->scp_cpt >= 0 ?
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 7044e1f..58e5d86 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -13,10 +13,6 @@
  * GNU General Public License version 2 for more details.  A copy is
  * included in the COPYING file that accompanied this code.
 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  * GPL HEADER END
  */
 /*
@@ -47,9 +43,6 @@
 #include "../../include/linux/libcfs/libcfs.h"
 #include "ptlrpc_internal.h"
 
-/* XXX: This is just for liblustre. Remove the #if defined directive when the
- * "cfs_" prefix is dropped from cfs_list_head. */
-
 /**
  * NRS core object.
  */
@@ -57,7 +50,7 @@
 
 static int nrs_policy_init(struct ptlrpc_nrs_policy *policy)
 {
-	return policy->pol_desc->pd_ops->op_policy_init != NULL ?
+	return policy->pol_desc->pd_ops->op_policy_init ?
 	       policy->pol_desc->pd_ops->op_policy_init(policy) : 0;
 }
 
@@ -66,7 +59,7 @@
 	LASSERT(policy->pol_ref == 0);
 	LASSERT(policy->pol_req_queued == 0);
 
-	if (policy->pol_desc->pd_ops->op_policy_fini != NULL)
+	if (policy->pol_desc->pd_ops->op_policy_fini)
 		policy->pol_desc->pd_ops->op_policy_fini(policy);
 }
 
@@ -82,7 +75,7 @@
 	if (policy->pol_state == NRS_POL_STATE_STOPPED)
 		return -ENODEV;
 
-	return policy->pol_desc->pd_ops->op_policy_ctl != NULL ?
+	return policy->pol_desc->pd_ops->op_policy_ctl ?
 	       policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) :
 	       -ENOSYS;
 }
@@ -91,7 +84,7 @@
 {
 	struct ptlrpc_nrs *nrs = policy->pol_nrs;
 
-	if (policy->pol_desc->pd_ops->op_policy_stop != NULL) {
+	if (policy->pol_desc->pd_ops->op_policy_stop) {
 		spin_unlock(&nrs->nrs_lock);
 
 		policy->pol_desc->pd_ops->op_policy_stop(policy);
@@ -154,7 +147,7 @@
 {
 	struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
 
-	if (tmp == NULL)
+	if (!tmp)
 		return;
 
 	nrs->nrs_policy_primary = NULL;
@@ -220,12 +213,12 @@
 		 * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can
 		 * register with NRS core.
 		 */
-		LASSERT(nrs->nrs_policy_fallback == NULL);
+		LASSERT(!nrs->nrs_policy_fallback);
 	} else {
 		/**
 		 * Shouldn't start primary policy if w/o fallback policy.
 		 */
-		if (nrs->nrs_policy_fallback == NULL)
+		if (!nrs->nrs_policy_fallback)
 			return -EPERM;
 
 		if (policy->pol_state == NRS_POL_STATE_STARTED)
@@ -348,10 +341,10 @@
 {
 	struct ptlrpc_nrs_policy *policy = res->res_policy;
 
-	if (policy->pol_desc->pd_ops->op_res_put != NULL) {
+	if (policy->pol_desc->pd_ops->op_res_put) {
 		struct ptlrpc_nrs_resource *parent;
 
-		for (; res != NULL; res = parent) {
+		for (; res; res = parent) {
 			parent = res->res_parent;
 			policy->pol_desc->pd_ops->op_res_put(policy, res);
 		}
@@ -390,12 +383,11 @@
 		rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res,
 							  &tmp, moving_req);
 		if (rc < 0) {
-			if (res != NULL)
+			if (res)
 				nrs_resource_put(res);
 			return NULL;
 		}
 
-		LASSERT(tmp != NULL);
 		tmp->res_parent = res;
 		tmp->res_policy = policy;
 		res = tmp;
@@ -445,7 +437,7 @@
 	nrs_policy_get_locked(fallback);
 
 	primary = nrs->nrs_policy_primary;
-	if (primary != NULL)
+	if (primary)
 		nrs_policy_get_locked(primary);
 
 	spin_unlock(&nrs->nrs_lock);
@@ -454,9 +446,9 @@
 	 * Obtain resource hierarchy references.
 	 */
 	resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req);
-	LASSERT(resp[NRS_RES_FALLBACK] != NULL);
+	LASSERT(resp[NRS_RES_FALLBACK]);
 
-	if (primary != NULL) {
+	if (primary) {
 		resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq,
 							 moving_req);
 		/**
@@ -465,7 +457,7 @@
 		 * reference on the policy as it will not be used for this
 		 * request.
 		 */
-		if (resp[NRS_RES_PRIMARY] == NULL)
+		if (!resp[NRS_RES_PRIMARY])
 			nrs_policy_put(primary);
 	}
 }
@@ -482,11 +474,10 @@
 static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp)
 {
 	struct ptlrpc_nrs_policy *pols[NRS_RES_MAX];
-	struct ptlrpc_nrs *nrs = NULL;
 	int i;
 
 	for (i = 0; i < NRS_RES_MAX; i++) {
-		if (resp[i] != NULL) {
+		if (resp[i]) {
 			pols[i] = resp[i]->res_policy;
 			nrs_resource_put(resp[i]);
 			resp[i] = NULL;
@@ -496,18 +487,9 @@
 	}
 
 	for (i = 0; i < NRS_RES_MAX; i++) {
-		if (pols[i] == NULL)
-			continue;
-
-		if (nrs == NULL) {
-			nrs = pols[i]->pol_nrs;
-			spin_lock(&nrs->nrs_lock);
-		}
-		nrs_policy_put_locked(pols[i]);
+		if (pols[i])
+			nrs_policy_put(pols[i]);
 	}
-
-	if (nrs != NULL)
-		spin_unlock(&nrs->nrs_lock);
 }
 
 /**
@@ -536,7 +518,7 @@
 
 	nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force);
 
-	LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy));
+	LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy));
 
 	return nrq;
 }
@@ -562,7 +544,7 @@
 	 * the preferred choice.
 	 */
 	for (i = NRS_RES_MAX - 1; i >= 0; i--) {
-		if (nrq->nr_res_ptrs[i] == NULL)
+		if (!nrq->nr_res_ptrs[i])
 			continue;
 
 		nrq->nr_res_idx = i;
@@ -632,7 +614,7 @@
 	spin_lock(&nrs->nrs_lock);
 
 	policy = nrs_policy_find_locked(nrs, name);
-	if (policy == NULL) {
+	if (!policy) {
 		rc = -ENOENT;
 		goto out;
 	}
@@ -654,7 +636,7 @@
 		break;
 	}
 out:
-	if (policy != NULL)
+	if (policy)
 		nrs_policy_put_locked(policy);
 
 	spin_unlock(&nrs->nrs_lock);
@@ -679,7 +661,7 @@
 	spin_lock(&nrs->nrs_lock);
 
 	policy = nrs_policy_find_locked(nrs, name);
-	if (policy == NULL) {
+	if (!policy) {
 		spin_unlock(&nrs->nrs_lock);
 
 		CERROR("Can't find NRS policy %s\n", name);
@@ -712,7 +694,7 @@
 
 	nrs_policy_fini(policy);
 
-	LASSERT(policy->pol_private == NULL);
+	LASSERT(!policy->pol_private);
 	kfree(policy);
 
 	return 0;
@@ -736,18 +718,16 @@
 	struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
 	int rc;
 
-	LASSERT(svcpt != NULL);
-	LASSERT(desc->pd_ops != NULL);
-	LASSERT(desc->pd_ops->op_res_get != NULL);
-	LASSERT(desc->pd_ops->op_req_get != NULL);
-	LASSERT(desc->pd_ops->op_req_enqueue != NULL);
-	LASSERT(desc->pd_ops->op_req_dequeue != NULL);
-	LASSERT(desc->pd_compat != NULL);
+	LASSERT(desc->pd_ops->op_res_get);
+	LASSERT(desc->pd_ops->op_req_get);
+	LASSERT(desc->pd_ops->op_req_enqueue);
+	LASSERT(desc->pd_ops->op_req_dequeue);
+	LASSERT(desc->pd_compat);
 
 	policy = kzalloc_node(sizeof(*policy), GFP_NOFS,
 			cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
 					    svcpt->scp_cpt));
-	if (policy == NULL)
+	if (!policy)
 		return -ENOMEM;
 
 	policy->pol_nrs = nrs;
@@ -767,7 +747,7 @@
 	spin_lock(&nrs->nrs_lock);
 
 	tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
-	if (tmp != NULL) {
+	if (tmp) {
 		CERROR("NRS policy %s has been registered, can't register it for %s\n",
 		       policy->pol_desc->pd_name,
 		       svcpt->scp_service->srv_name);
@@ -957,14 +937,14 @@
 	/**
 	 * Optionally allocate a high-priority NRS head.
 	 */
-	if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL)
+	if (!svcpt->scp_service->srv_ops.so_hpreq_handler)
 		goto out;
 
 	svcpt->scp_nrs_hp =
 		kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS,
 			cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
 					    svcpt->scp_cpt));
-	if (svcpt->scp_nrs_hp == NULL) {
+	if (!svcpt->scp_nrs_hp) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1089,7 +1069,7 @@
 			}
 		}
 
-		if (desc->pd_ops->op_lprocfs_fini != NULL)
+		if (desc->pd_ops->op_lprocfs_fini)
 			desc->pd_ops->op_lprocfs_fini(svc);
 	}
 
@@ -1117,13 +1097,12 @@
 	struct ptlrpc_nrs_pol_desc *desc;
 	int rc = 0;
 
-	LASSERT(conf != NULL);
-	LASSERT(conf->nc_ops != NULL);
-	LASSERT(conf->nc_compat != NULL);
+	LASSERT(conf->nc_ops);
+	LASSERT(conf->nc_compat);
 	LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one,
-		conf->nc_compat_svc_name != NULL));
+		conf->nc_compat_svc_name));
 	LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0,
-		     conf->nc_owner != NULL));
+		     conf->nc_owner));
 
 	conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
 
@@ -1146,7 +1125,7 @@
 
 	mutex_lock(&nrs_core.nrs_mutex);
 
-	if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) {
+	if (nrs_policy_find_desc_locked(conf->nc_name)) {
 		CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n",
 		       conf->nc_name);
 		rc = -EEXIST;
@@ -1224,7 +1203,7 @@
 		 * No need to take a reference to other modules here, as we
 		 * will be calling from the module's init() function.
 		 */
-		if (desc->pd_ops->op_lprocfs_init != NULL) {
+		if (desc->pd_ops->op_lprocfs_init) {
 			rc = desc->pd_ops->op_lprocfs_init(svc);
 			if (rc != 0) {
 				rc2 = nrs_policy_unregister_locked(desc);
@@ -1288,7 +1267,7 @@
 		if (!nrs_policy_compatible(svc, desc))
 			continue;
 
-		if (desc->pd_ops->op_lprocfs_init != NULL) {
+		if (desc->pd_ops->op_lprocfs_init) {
 			rc = desc->pd_ops->op_lprocfs_init(svc);
 			if (rc != 0)
 				goto failed;
@@ -1329,7 +1308,7 @@
 		if (!nrs_policy_compatible(svc, desc))
 			continue;
 
-		if (desc->pd_ops->op_lprocfs_fini != NULL)
+		if (desc->pd_ops->op_lprocfs_fini)
 			desc->pd_ops->op_lprocfs_fini(svc);
 	}
 
@@ -1376,7 +1355,8 @@
 	if (req->rq_nrq.nr_initialized) {
 		nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs);
 		/* no protection on bit nr_initialized because no
-		 * contention at this late stage */
+		 * contention at this late stage
+		 */
 		req->rq_nrq.nr_finalized = 1;
 	}
 }
@@ -1469,7 +1449,7 @@
 	list_for_each_entry(policy, &nrs->nrs_policy_queued,
 				pol_list_queued) {
 		nrq = nrs_request_get(policy, peek, force);
-		if (nrq != NULL) {
+		if (nrq) {
 			if (likely(!peek)) {
 				nrq->nr_started = 1;
 
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
index 8e21f0c..0d8da35 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
@@ -13,10 +13,6 @@
  * GNU General Public License version 2 for more details.  A copy is
  * included in the COPYING file that accompanied this code.
 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  * GPL HEADER END
  */
 /*
@@ -83,7 +79,7 @@
 	head = kzalloc_node(sizeof(*head), GFP_NOFS,
 			    cfs_cpt_spread_node(nrs_pol2cptab(policy),
 						nrs_pol2cptid(policy)));
-	if (head == NULL)
+	if (!head)
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&head->fh_list);
@@ -104,7 +100,7 @@
 {
 	struct nrs_fifo_head *head = policy->pol_private;
 
-	LASSERT(head != NULL);
+	LASSERT(head);
 	LASSERT(list_empty(&head->fh_list));
 
 	kfree(head);
@@ -169,7 +165,7 @@
 	      list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
 			     nr_u.fifo.fr_list);
 
-	if (likely(!peek && nrq != NULL)) {
+	if (likely(!peek && nrq)) {
 		struct ptlrpc_request *req = container_of(nrq,
 							  struct ptlrpc_request,
 							  rq_nrq);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index f3cb518..f3ac810 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -133,7 +133,8 @@
  * NOTE: this should only be used for NEW requests, and should always be
  *       in the form of a v2 request.  If this is a connection to a v1
  *       target then the first buffer will be stripped because the ptlrpc
- *       data is part of the lustre_msg_v1 header. b=14043 */
+ *       data is part of the lustre_msg_v1 header. b=14043
+ */
 int lustre_msg_size(__u32 magic, int count, __u32 *lens)
 {
 	__u32 size[] = { sizeof(struct ptlrpc_body) };
@@ -157,7 +158,8 @@
 EXPORT_SYMBOL(lustre_msg_size);
 
 /* This is used to determine the size of a buffer that was already packed
- * and will correctly handle the different message formats. */
+ * and will correctly handle the different message formats.
+ */
 int lustre_packed_msg_size(struct lustre_msg *msg)
 {
 	switch (msg->lm_magic) {
@@ -183,7 +185,7 @@
 	for (i = 0; i < count; i++)
 		msg->lm_buflens[i] = lens[i];
 
-	if (bufs == NULL)
+	if (!bufs)
 		return;
 
 	ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
@@ -267,7 +269,8 @@
 
 		spin_unlock(&svcpt->scp_rep_lock);
 		/* If we cannot get anything for some long time, we better
-		 * bail out instead of waiting infinitely */
+		 * bail out instead of waiting infinitely
+		 */
 		lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
 		rc = l_wait_event(svcpt->scp_rep_waitq,
 				  !list_empty(&svcpt->scp_rep_idle), &lwi);
@@ -306,7 +309,7 @@
 	struct ptlrpc_reply_state *rs;
 	int msg_len, rc;
 
-	LASSERT(req->rq_reply_state == NULL);
+	LASSERT(!req->rq_reply_state);
 
 	if ((flags & LPRFL_EARLY_REPLY) == 0) {
 		spin_lock(&req->rq_lock);
@@ -383,7 +386,6 @@
 {
 	int i, offset, buflen, bufcount;
 
-	LASSERT(m != NULL);
 	LASSERT(n >= 0);
 
 	bufcount = m->lm_bufcount;
@@ -488,7 +490,7 @@
 	LASSERT(!rs->rs_difficult || rs->rs_handled);
 	LASSERT(!rs->rs_on_net);
 	LASSERT(!rs->rs_scheduled);
-	LASSERT(rs->rs_export == NULL);
+	LASSERT(!rs->rs_export);
 	LASSERT(rs->rs_nlocks == 0);
 	LASSERT(list_empty(&rs->rs_exp_list));
 	LASSERT(list_empty(&rs->rs_obd_list));
@@ -677,7 +679,8 @@
 EXPORT_SYMBOL(lustre_msg_buflen);
 
 /* NB return the bufcount for lustre_msg_v2 format, so if message is packed
- * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
+ * in V1 format, the result is one bigger. (add struct ptlrpc_body).
+ */
 int lustre_msg_bufcount(struct lustre_msg *m)
 {
 	switch (m->lm_magic) {
@@ -705,7 +708,7 @@
 		LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
 	}
 
-	if (str == NULL) {
+	if (!str) {
 		CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
 		return NULL;
 	}
@@ -740,7 +743,6 @@
 {
 	void *ptr = NULL;
 
-	LASSERT(msg != NULL);
 	switch (msg->lm_magic) {
 	case LUSTRE_MSG_MAGIC_V2:
 		ptr = lustre_msg_buf_v2(msg, index, min_size);
@@ -799,7 +801,8 @@
 	/* no break */
 	default:
 		/* flags might be printed in debug code while message
-		 * uninitialized */
+		 * uninitialized
+		 */
 		return 0;
 	}
 }
@@ -1032,7 +1035,8 @@
 	/* no break */
 	default:
 		/* status might be printed in debug code while message
-		 * uninitialized */
+		 * uninitialized
+		 */
 		return -EINVAL;
 	}
 }
@@ -1368,7 +1372,8 @@
 		struct ptlrpc_body *pb;
 
 		/* Don't set jobid for ldlm ast RPCs, they've been shrunk.
-		 * See the comment in ptlrpc_request_pack(). */
+		 * See the comment in ptlrpc_request_pack().
+		 */
 		if (!opc || opc == LDLM_BL_CALLBACK ||
 		    opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
 			return;
@@ -1377,7 +1382,7 @@
 				       sizeof(struct ptlrpc_body));
 		LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
 
-		if (jobid != NULL)
+		if (jobid)
 			memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE);
 		else if (pb->pb_jobid[0] == '\0')
 			lustre_get_jobid(pb->pb_jobid);
@@ -1427,7 +1432,7 @@
 	int rc;
 
 	req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
@@ -1488,7 +1493,8 @@
 	 * clients and servers without ptlrpc_body_v2 (< 2.3)
 	 * do not swab any fields beyond pb_jobid, as we are
 	 * using this swab function for both ptlrpc_body
-	 * and ptlrpc_body_v2. */
+	 * and ptlrpc_body_v2.
+	 */
 	CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
 }
 EXPORT_SYMBOL(lustre_swab_ptlrpc_body);
@@ -1502,7 +1508,8 @@
 	__swab32s(&ocd->ocd_index);
 	__swab32s(&ocd->ocd_brw_size);
 	/* ocd_blocksize and ocd_inodespace don't need to be swabbed because
-	 * they are 8-byte values */
+	 * they are 8-byte values
+	 */
 	__swab16s(&ocd->ocd_grant_extent);
 	__swab32s(&ocd->ocd_unused);
 	__swab64s(&ocd->ocd_transno);
@@ -1512,7 +1519,8 @@
 	/* Fields after ocd_cksum_types are only accessible by the receiver
 	 * if the corresponding flag in ocd_connect_flags is set. Accessing
 	 * any field after ocd_maxbytes on the receiver without a valid flag
-	 * may result in out-of-bound memory access and kernel oops. */
+	 * may result in out-of-bound memory access and kernel oops.
+	 */
 	if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
 		__swab32s(&ocd->ocd_max_easize);
 	if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
@@ -1848,20 +1856,6 @@
 }
 EXPORT_SYMBOL(lustre_swab_fiemap);
 
-void lustre_swab_idx_info(struct idx_info *ii)
-{
-	__swab32s(&ii->ii_magic);
-	__swab32s(&ii->ii_flags);
-	__swab16s(&ii->ii_count);
-	__swab32s(&ii->ii_attrs);
-	lustre_swab_lu_fid(&ii->ii_fid);
-	__swab64s(&ii->ii_version);
-	__swab64s(&ii->ii_hash_start);
-	__swab64s(&ii->ii_hash_end);
-	__swab16s(&ii->ii_keysize);
-	__swab16s(&ii->ii_recsize);
-}
-
 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
 {
 	__swab32s(&rr->rr_opcode);
@@ -1986,7 +1980,8 @@
 {
 	/* the lock data is a union and the first two fields are always an
 	 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
-	 * data the same way. */
+	 * data the same way.
+	 */
 	__swab64s(&d->l_extent.start);
 	__swab64s(&d->l_extent.end);
 	__swab64s(&d->l_extent.gid);
@@ -2035,16 +2030,6 @@
 }
 EXPORT_SYMBOL(lustre_swab_ldlm_reply);
 
-void lustre_swab_quota_body(struct quota_body *b)
-{
-	lustre_swab_lu_fid(&b->qb_fid);
-	lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
-	__swab32s(&b->qb_flags);
-	__swab64s(&b->qb_count);
-	__swab64s(&b->qb_usage);
-	__swab64s(&b->qb_slv_ver);
-}
-
 /* Dump functions */
 void dump_ioo(struct obd_ioobj *ioo)
 {
@@ -2288,24 +2273,6 @@
 }
 EXPORT_SYMBOL(lustre_swab_hsm_request);
 
-void lustre_swab_update_buf(struct update_buf *ub)
-{
-	__swab32s(&ub->ub_magic);
-	__swab32s(&ub->ub_count);
-}
-EXPORT_SYMBOL(lustre_swab_update_buf);
-
-void lustre_swab_update_reply_buf(struct update_reply *ur)
-{
-	int i;
-
-	__swab32s(&ur->ur_version);
-	__swab32s(&ur->ur_count);
-	for (i = 0; i < ur->ur_count; i++)
-		__swab32s(&ur->ur_lens[i]);
-}
-EXPORT_SYMBOL(lustre_swab_update_reply_buf);
-
 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
 {
 	__swab64s(&msl->msl_flags);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index fb2d523..44bf026 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -68,7 +68,7 @@
 	struct ptlrpc_request *req;
 
 	req = ptlrpc_prep_ping(obd->u.cli.cl_import);
-	if (req == NULL)
+	if (!req)
 		return -ENOMEM;
 
 	req->rq_send_state = LUSTRE_IMP_FULL;
@@ -86,7 +86,7 @@
 	struct ptlrpc_request *req;
 
 	req = ptlrpc_prep_ping(imp);
-	if (req == NULL) {
+	if (!req) {
 		CERROR("OOM trying to ping %s->%s\n",
 		       imp->imp_obd->obd_uuid.uuid,
 		       obd2cli_tgt(imp->imp_obd));
@@ -257,11 +257,12 @@
 		/* Wait until the next ping time, or until we're stopped. */
 		time_to_next_wake = pinger_check_timeout(this_ping);
 		/* The ping sent by ptlrpc_send_rpc may get sent out
-		   say .01 second after this.
-		   ptlrpc_pinger_sending_on_import will then set the
-		   next ping time to next_ping + .01 sec, which means
-		   we will SKIP the next ping at next_ping, and the
-		   ping will get sent 2 timeouts from now!  Beware. */
+		 * say .01 second after this.
+		 * ptlrpc_pinger_sending_on_import will then set the
+		 * next ping time to next_ping + .01 sec, which means
+		 * we will SKIP the next ping at next_ping, and the
+		 * ping will get sent 2 timeouts from now!  Beware.
+		 */
 		CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n",
 		       time_to_next_wake,
 		       cfs_time_add(this_ping,
@@ -293,6 +294,7 @@
 int ptlrpc_start_pinger(void)
 {
 	struct l_wait_info lwi = { 0 };
+	struct task_struct *task;
 	int rc;
 
 	if (!thread_is_init(&pinger_thread) &&
@@ -303,10 +305,11 @@
 
 	strcpy(pinger_thread.t_name, "ll_ping");
 
-	rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
-				 "%s", pinger_thread.t_name));
-	if (IS_ERR_VALUE(rc)) {
-		CERROR("cannot start thread: %d\n", rc);
+	task = kthread_run(ptlrpc_pinger_main, &pinger_thread,
+			   pinger_thread.t_name);
+	if (IS_ERR(task)) {
+		rc = PTR_ERR(task);
+		CERROR("cannot start pinger thread: rc = %d\n", rc);
 		return rc;
 	}
 	l_wait_event(pinger_thread.t_ctl_waitq,
@@ -489,7 +492,6 @@
 			break;
 		}
 	}
-	LASSERTF(ti != NULL, "ti is NULL !\n");
 	if (list_empty(&ti->ti_obd_list)) {
 		list_del(&ti->ti_chain);
 		kfree(ti);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index 8f67e05..6ca26c9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -101,8 +101,6 @@
 	 * registration/unregistration, and NRS core lprocfs operations.
 	 */
 	struct mutex nrs_mutex;
-	/* XXX: This is just for liblustre. Remove the #if defined directive
-	 * when the * "cfs_" prefix is dropped from cfs_list_head. */
 	/**
 	 * List of all policy descriptors registered with NRS core; protected
 	 * by nrs_core::nrs_mutex.
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 60fb0ce..0c36663 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -163,8 +163,6 @@
 {
 	struct ptlrpc_request_set *rq_set = req->rq_set;
 
-	LASSERT(rq_set != NULL);
-
 	wake_up(&rq_set->set_waitq);
 }
 EXPORT_SYMBOL(ptlrpcd_wake);
@@ -176,7 +174,7 @@
 	int		cpt;
 	int		idx;
 
-	if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
+	if (req && req->rq_send_state != LUSTRE_IMP_FULL)
 		return &ptlrpcd_rcv;
 
 	cpt = cfs_cpt_current(cfs_cpt_table, 1);
@@ -240,10 +238,11 @@
 
 		req->rq_invalid_rqset = 0;
 		spin_unlock(&req->rq_lock);
-		l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+		l_wait_event(req->rq_set_waitq, !req->rq_set, &lwi);
 	} else if (req->rq_set) {
 		/* If we have a valid "rq_set", just reuse it to avoid double
-		 * linked. */
+		 * linked.
+		 */
 		LASSERT(req->rq_phase == RQ_PHASE_NEW);
 		LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
 
@@ -321,7 +320,8 @@
 		rc |= ptlrpc_check_set(env, set);
 
 	/* NB: ptlrpc_check_set has already moved completed request at the
-	 * head of seq::set_requests */
+	 * head of seq::set_requests
+	 */
 	list_for_each_safe(pos, tmp, &set->set_requests) {
 		req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
 		if (req->rq_phase != RQ_PHASE_COMPLETE)
@@ -339,7 +339,8 @@
 		rc = atomic_read(&set->set_new_count);
 
 		/* If we have nothing to do, check whether we can take some
-		 * work from our partner threads. */
+		 * work from our partner threads.
+		 */
 		if (rc == 0 && pc->pc_npartners > 0) {
 			struct ptlrpcd_ctl *partner;
 			struct ptlrpc_request_set *ps;
@@ -349,12 +350,12 @@
 				partner = pc->pc_partners[pc->pc_cursor++];
 				if (pc->pc_cursor >= pc->pc_npartners)
 					pc->pc_cursor = 0;
-				if (partner == NULL)
+				if (!partner)
 					continue;
 
 				spin_lock(&partner->pc_lock);
 				ps = partner->pc_set;
-				if (ps == NULL) {
+				if (!ps) {
 					spin_unlock(&partner->pc_lock);
 					continue;
 				}
@@ -580,7 +581,7 @@
 	return 0;
 
 out_set:
-	if (pc->pc_set != NULL) {
+	if (pc->pc_set) {
 		struct ptlrpc_request_set *set = pc->pc_set;
 
 		spin_lock(&pc->pc_lock);
@@ -631,7 +632,7 @@
 
 out:
 	if (pc->pc_npartners > 0) {
-		LASSERT(pc->pc_partners != NULL);
+		LASSERT(pc->pc_partners);
 
 		kfree(pc->pc_partners);
 		pc->pc_partners = NULL;
@@ -645,7 +646,7 @@
 	int i;
 	int j;
 
-	if (ptlrpcds != NULL) {
+	if (ptlrpcds) {
 		for (i = 0; i < ptlrpcds_num; i++) {
 			if (!ptlrpcds[i])
 				break;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index db6626c..e5d344d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -114,7 +114,8 @@
 		if (req->rq_transno > last_transno) {
 			/* Since the imp_committed_list is immutable before
 			 * all of it's requests being replayed, it's safe to
-			 * use a cursor to accelerate the search */
+			 * use a cursor to accelerate the search
+			 */
 			imp->imp_replay_cursor = imp->imp_replay_cursor->next;
 
 			while (imp->imp_replay_cursor !=
@@ -137,8 +138,9 @@
 	}
 
 	/* All the requests in committed list have been replayed, let's replay
-	 * the imp_replay_list */
-	if (req == NULL) {
+	 * the imp_replay_list
+	 */
+	if (!req) {
 		list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
 			req = list_entry(tmp, struct ptlrpc_request,
 					 rq_replay_list);
@@ -152,15 +154,16 @@
 	/* If need to resend the last sent transno (because a reconnect
 	 * has occurred), then stop on the matching req and send it again.
 	 * If, however, the last sent transno has been committed then we
-	 * continue replay from the next request. */
-	if (req != NULL && imp->imp_resend_replay)
+	 * continue replay from the next request.
+	 */
+	if (req && imp->imp_resend_replay)
 		lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
 
 	spin_lock(&imp->imp_lock);
 	imp->imp_resend_replay = 0;
 	spin_unlock(&imp->imp_lock);
 
-	if (req != NULL) {
+	if (req) {
 		rc = ptlrpc_replay_req(req);
 		if (rc) {
 			CERROR("recovery replay error %d for req %llu\n",
@@ -249,7 +252,8 @@
 	}
 
 	/* Wait for recovery to complete and resend. If evicted, then
-	   this request will be errored out later.*/
+	 * this request will be errored out later.
+	 */
 	spin_lock(&failed_req->rq_lock);
 	if (!failed_req->rq_no_resend)
 		failed_req->rq_resend = 1;
@@ -260,7 +264,7 @@
  * Administratively active/deactive a client.
  * This should only be called by the ioctl interface, currently
  *  - the lctl deactivate and activate commands
- *  - echo 0/1 >> /proc/osc/XXX/active
+ *  - echo 0/1 >> /sys/fs/lustre/osc/XXX/active
  *  - client umount -f (ll_umount_begin)
  */
 int ptlrpc_set_import_active(struct obd_import *imp, int active)
@@ -271,13 +275,15 @@
 	LASSERT(obd);
 
 	/* When deactivating, mark import invalid, and abort in-flight
-	 * requests. */
+	 * requests.
+	 */
 	if (!active) {
 		LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
 			      obd2cli_tgt(imp->imp_obd));
 
 		/* set before invalidate to avoid messages about imp_inval
-		 * set without imp_deactive in ptlrpc_import_delay_req */
+		 * set without imp_deactive in ptlrpc_import_delay_req
+		 */
 		spin_lock(&imp->imp_lock);
 		imp->imp_deactive = 1;
 		spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 39f5261..85f8f7c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -94,7 +94,7 @@
 	LASSERT(number < SPTLRPC_POLICY_MAX);
 
 	write_lock(&policy_lock);
-	if (unlikely(policies[number] == NULL)) {
+	if (unlikely(!policies[number])) {
 		write_unlock(&policy_lock);
 		CERROR("%s: already unregistered\n", policy->sp_name);
 		return -EINVAL;
@@ -126,11 +126,11 @@
 		policy = policies[number];
 		if (policy && !try_module_get(policy->sp_owner))
 			policy = NULL;
-		if (policy == NULL)
+		if (!policy)
 			flag = atomic_read(&loaded);
 		read_unlock(&policy_lock);
 
-		if (policy != NULL || flag != 0 ||
+		if (policy || flag != 0 ||
 		    number != SPTLRPC_POLICY_GSS)
 			break;
 
@@ -327,7 +327,7 @@
 	}
 
 	*sec = sptlrpc_import_sec_ref(imp);
-	if (*sec == NULL) {
+	if (!*sec) {
 		CERROR("import %p (%s) with no sec\n",
 		       imp, ptlrpc_import_state_name(imp->imp_state));
 		return -EACCES;
@@ -429,7 +429,7 @@
 	reqmsg_size = req->rq_reqlen;
 	if (reqmsg_size != 0) {
 		reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS);
-		if (reqmsg == NULL)
+		if (!reqmsg)
 			return -ENOMEM;
 		memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
 	}
@@ -445,7 +445,8 @@
 
 	/* alloc new request buffer
 	 * we don't need to alloc reply buffer here, leave it to the
-	 * rest procedure of ptlrpc */
+	 * rest procedure of ptlrpc
+	 */
 	if (reqmsg_size != 0) {
 		rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
 		if (!rc) {
@@ -798,7 +799,8 @@
 	spin_unlock(&sec->ps_lock);
 
 	/* force SVC_NULL for context initiation rpc, SVC_INTG for context
-	 * destruction rpc */
+	 * destruction rpc
+	 */
 	if (unlikely(req->rq_ctx_init))
 		flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
 	else if (unlikely(req->rq_ctx_fini))
@@ -938,7 +940,7 @@
 	LASSERT(ctx->cc_sec);
 	LASSERT(req->rq_repbuf);
 	LASSERT(req->rq_repdata);
-	LASSERT(req->rq_repmsg == NULL);
+	LASSERT(!req->rq_repmsg);
 
 	req->rq_rep_swab_mask = 0;
 
@@ -1000,8 +1002,8 @@
 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
 {
 	LASSERT(req->rq_repbuf);
-	LASSERT(req->rq_repdata == NULL);
-	LASSERT(req->rq_repmsg == NULL);
+	LASSERT(!req->rq_repdata);
+	LASSERT(!req->rq_repmsg);
 	LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
 
 	if (req->rq_reply_off == 0 &&
@@ -1046,13 +1048,13 @@
 	int rc;
 
 	early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
-	if (early_req == NULL)
+	if (!early_req)
 		return -ENOMEM;
 
 	early_size = req->rq_nob_received;
 	early_bufsz = size_roundup_power2(early_size);
 	early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
-	if (early_buf == NULL) {
+	if (!early_buf) {
 		rc = -ENOMEM;
 		goto err_req;
 	}
@@ -1067,8 +1069,8 @@
 	}
 
 	LASSERT(req->rq_repbuf);
-	LASSERT(req->rq_repdata == NULL);
-	LASSERT(req->rq_repmsg == NULL);
+	LASSERT(!req->rq_repdata);
+	LASSERT(!req->rq_repmsg);
 
 	if (req->rq_reply_off != 0) {
 		CERROR("early reply with offset %u\n", req->rq_reply_off);
@@ -1354,12 +1356,12 @@
 
 	might_sleep();
 
-	if (imp == NULL)
+	if (!imp)
 		return 0;
 
 	conn = imp->imp_connection;
 
-	if (svc_ctx == NULL) {
+	if (!svc_ctx) {
 		struct client_obd *cliobd = &imp->imp_obd->u.cli;
 		/*
 		 * normal import, determine flavor from rule set, except
@@ -1447,11 +1449,11 @@
 {
 	struct ptlrpc_sec *sec;
 
-	if (imp == NULL)
+	if (!imp)
 		return;
 
 	sec = sptlrpc_import_sec_ref(imp);
-	if (sec == NULL)
+	if (!sec)
 		return;
 
 	sec_cop_flush_ctx_cache(sec, uid, grace, force);
@@ -1484,7 +1486,7 @@
 	LASSERT(ctx);
 	LASSERT(ctx->cc_sec);
 	LASSERT(ctx->cc_sec->ps_policy);
-	LASSERT(req->rq_reqmsg == NULL);
+	LASSERT(!req->rq_reqmsg);
 	LASSERT_ATOMIC_POS(&ctx->cc_refcount);
 
 	policy = ctx->cc_sec->ps_policy;
@@ -1515,7 +1517,7 @@
 	LASSERT(ctx->cc_sec->ps_policy);
 	LASSERT_ATOMIC_POS(&ctx->cc_refcount);
 
-	if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
+	if (!req->rq_reqbuf && !req->rq_clrbuf)
 		return;
 
 	policy = ctx->cc_sec->ps_policy;
@@ -1632,7 +1634,7 @@
 	LASSERT(ctx->cc_sec->ps_policy);
 	LASSERT_ATOMIC_POS(&ctx->cc_refcount);
 
-	if (req->rq_repbuf == NULL)
+	if (!req->rq_repbuf)
 		return;
 	LASSERT(req->rq_repbuf_len);
 
@@ -1684,12 +1686,13 @@
 {
 	struct sptlrpc_flavor flavor;
 
-	if (exp == NULL)
+	if (!exp)
 		return 0;
 
 	/* client side export has no imp_reverse, skip
-	 * FIXME maybe we should check flavor this as well??? */
-	if (exp->exp_imp_reverse == NULL)
+	 * FIXME maybe we should check flavor this as well???
+	 */
+	if (!exp->exp_imp_reverse)
 		return 0;
 
 	/* don't care about ctx fini rpc */
@@ -1702,11 +1705,13 @@
 	 * the first req with the new flavor, then treat it as current flavor,
 	 * adapt reverse sec according to it.
 	 * note the first rpc with new flavor might not be with root ctx, in
-	 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
+	 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
+	 */
 	if (unlikely(exp->exp_flvr_changed) &&
 	    flavor_allowed(&exp->exp_flvr_old[1], req)) {
 		/* make the new flavor as "current", and old ones as
-		 * about-to-expire */
+		 * about-to-expire
+		 */
 		CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
 		       exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
 		flavor = exp->exp_flvr_old[1];
@@ -1742,10 +1747,12 @@
 	}
 
 	/* if it equals to the current flavor, we accept it, but need to
-	 * dealing with reverse sec/ctx */
+	 * dealing with reverse sec/ctx
+	 */
 	if (likely(flavor_allowed(&exp->exp_flvr, req))) {
 		/* most cases should return here, we only interested in
-		 * gss root ctx init */
+		 * gss root ctx init
+		 */
 		if (!req->rq_auth_gss || !req->rq_ctx_init ||
 		    (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
 		     !req->rq_auth_usr_ost)) {
@@ -1755,7 +1762,8 @@
 
 		/* if flavor just changed, we should not proceed, just leave
 		 * it and current flavor will be discovered and replaced
-		 * shortly, and let _this_ rpc pass through */
+		 * shortly, and let _this_ rpc pass through
+		 */
 		if (exp->exp_flvr_changed) {
 			LASSERT(exp->exp_flvr_adapt);
 			spin_unlock(&exp->exp_lock);
@@ -1809,7 +1817,8 @@
 	}
 
 	/* now it doesn't match the current flavor, the only chance we can
-	 * accept it is match the old flavors which is not expired. */
+	 * accept it is match the old flavors which is not expired.
+	 */
 	if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
 		if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
 			if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
@@ -1915,9 +1924,9 @@
 	int rc;
 
 	LASSERT(msg);
-	LASSERT(req->rq_reqmsg == NULL);
-	LASSERT(req->rq_repmsg == NULL);
-	LASSERT(req->rq_svc_ctx == NULL);
+	LASSERT(!req->rq_reqmsg);
+	LASSERT(!req->rq_repmsg);
+	LASSERT(!req->rq_svc_ctx);
 
 	req->rq_req_swab_mask = 0;
 
@@ -1994,7 +2003,7 @@
 
 		/* failed alloc, try emergency pool */
 		rs = lustre_get_emerg_rs(svcpt);
-		if (rs == NULL)
+		if (!rs)
 			return -ENOMEM;
 
 		req->rq_reply_state = rs;
@@ -2059,7 +2068,7 @@
 {
 	struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
 
-	if (ctx != NULL)
+	if (ctx)
 		atomic_inc(&ctx->sc_refcount);
 }
 
@@ -2067,7 +2076,7 @@
 {
 	struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
 
-	if (ctx == NULL)
+	if (!ctx)
 		return;
 
 	LASSERT_ATOMIC_POS(&ctx->sc_refcount);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 6152c1b..72d5b9b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -120,7 +120,7 @@
 } page_pools;
 
 /*
- * /proc/fs/lustre/sptlrpc/encrypt_page_pools
+ * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools
  */
 int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
 {
@@ -195,7 +195,7 @@
 
 	while (npages--) {
 		LASSERT(page_pools.epp_pools[p_idx]);
-		LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
+		LASSERT(page_pools.epp_pools[p_idx][g_idx]);
 
 		__free_page(page_pools.epp_pools[p_idx][g_idx]);
 		page_pools.epp_pools[p_idx][g_idx] = NULL;
@@ -304,7 +304,6 @@
 static inline void enc_pools_wakeup(void)
 {
 	assert_spin_locked(&page_pools.epp_lock);
-	LASSERT(page_pools.epp_waitqlen >= 0);
 
 	if (unlikely(page_pools.epp_waitqlen)) {
 		LASSERT(waitqueue_active(&page_pools.epp_waitq));
@@ -317,7 +316,7 @@
 	int p_idx, g_idx;
 	int i;
 
-	if (desc->bd_enc_iov == NULL)
+	if (!desc->bd_enc_iov)
 		return;
 
 	LASSERT(desc->bd_iov_count > 0);
@@ -332,9 +331,9 @@
 	LASSERT(page_pools.epp_pools[p_idx]);
 
 	for (i = 0; i < desc->bd_iov_count; i++) {
-		LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
+		LASSERT(desc->bd_enc_iov[i].kiov_page);
 		LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
-		LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
+		LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
 
 		page_pools.epp_pools[p_idx][g_idx] =
 					desc->bd_enc_iov[i].kiov_page;
@@ -413,7 +412,7 @@
 	page_pools.epp_st_max_wait = 0;
 
 	enc_pools_alloc();
-	if (page_pools.epp_pools == NULL)
+	if (!page_pools.epp_pools)
 		return -ENOMEM;
 
 	register_shrinker(&pools_shrinker);
@@ -476,7 +475,7 @@
 	int			  size = msg->lm_buflens[offset];
 
 	bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
-	if (bsd == NULL) {
+	if (!bsd) {
 		CERROR("Invalid bulk sec desc: size %d\n", size);
 		return -EINVAL;
 	}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 4b0b81c..93b91bf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -78,7 +78,7 @@
 
 	memset(flvr, 0, sizeof(*flvr));
 
-	if (str == NULL || str[0] == '\0') {
+	if (!str || str[0] == '\0') {
 		flvr->sf_rpc = SPTLRPC_FLVR_INVALID;
 		return 0;
 	}
@@ -103,7 +103,7 @@
 			 * format: plain-hash:<hash_alg>
 			 */
 			alg = strchr(bulk, ':');
-			if (alg == NULL)
+			if (!alg)
 				goto err_out;
 			*alg++ = '\0';
 
@@ -166,7 +166,7 @@
 	sptlrpc_rule_init(rule);
 
 	flavor = strchr(param, '=');
-	if (flavor == NULL) {
+	if (!flavor) {
 		CERROR("invalid param, no '='\n");
 		return -EINVAL;
 	}
@@ -216,7 +216,7 @@
 static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
 {
 	LASSERT(rset->srs_nslot ||
-		(rset->srs_nrule == 0 && rset->srs_rules == NULL));
+		(rset->srs_nrule == 0 && !rset->srs_rules));
 
 	if (rset->srs_nslot) {
 		kfree(rset->srs_rules);
@@ -241,7 +241,7 @@
 
 	/* better use realloc() if available */
 	rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS);
-	if (rules == NULL)
+	if (!rules)
 		return -ENOMEM;
 
 	if (rset->srs_nrule) {
@@ -450,7 +450,7 @@
 	}
 
 	/* if we didn't find the pattern, treat the whole string as fsname */
-	if (ptr == NULL)
+	if (!ptr)
 		len = strlen(tgt);
 	else
 		len = ptr - tgt;
@@ -579,13 +579,13 @@
 	int rc;
 
 	target = lustre_cfg_string(lcfg, 1);
-	if (target == NULL) {
+	if (!target) {
 		CERROR("missing target name\n");
 		return -EINVAL;
 	}
 
 	param = lustre_cfg_string(lcfg, 2);
-	if (param == NULL) {
+	if (!param) {
 		CERROR("missing parameter\n");
 		return -EINVAL;
 	}
@@ -603,12 +603,12 @@
 	if (rc)
 		return -EINVAL;
 
-	if (conf == NULL) {
+	if (!conf) {
 		target2fsname(target, fsname, sizeof(fsname));
 
 		mutex_lock(&sptlrpc_conf_lock);
 		conf = sptlrpc_conf_get(fsname, 0);
-		if (conf == NULL) {
+		if (!conf) {
 			CERROR("can't find conf\n");
 			rc = -ENOMEM;
 		} else {
@@ -638,7 +638,7 @@
 	int len;
 
 	ptr = strrchr(logname, '-');
-	if (ptr == NULL || strcmp(ptr, "-sptlrpc")) {
+	if (!ptr || strcmp(ptr, "-sptlrpc")) {
 		CERROR("%s is not a sptlrpc config log\n", logname);
 		return -EINVAL;
 	}
@@ -772,7 +772,7 @@
 	mutex_lock(&sptlrpc_conf_lock);
 
 	conf = sptlrpc_conf_get(name, 0);
-	if (conf == NULL)
+	if (!conf)
 		goto out;
 
 	/* convert uuid name (supposed end with _UUID) to target name */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index 6e58d5f..d5afc4f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -166,11 +166,13 @@
 		 * is not optimal. we perhaps want to use balanced binary tree
 		 * to trace each sec as order of expiry time.
 		 * another issue here is we wakeup as fixed interval instead of
-		 * according to each sec's expiry time */
+		 * according to each sec's expiry time
+		 */
 		mutex_lock(&sec_gc_mutex);
 		list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
 			/* if someone is waiting to be deleted, let it
-			 * proceed as soon as possible. */
+			 * proceed as soon as possible.
+			 */
 			if (atomic_read(&sec_gc_wait_del)) {
 				CDEBUG(D_SEC, "deletion pending, start over\n");
 				mutex_unlock(&sec_gc_mutex);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
index bda9a77..e610a8d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -82,7 +82,7 @@
 
 	if (cli->cl_import)
 		sec = sptlrpc_import_sec_ref(cli->cl_import);
-	if (sec == NULL)
+	if (!sec)
 		goto out;
 
 	sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
@@ -121,7 +121,7 @@
 
 	if (cli->cl_import)
 		sec = sptlrpc_import_sec_ref(cli->cl_import);
-	if (sec == NULL)
+	if (!sec)
 		goto out;
 
 	if (sec->ps_policy->sp_cops->display)
@@ -178,7 +178,7 @@
 {
 	int rc;
 
-	LASSERT(sptlrpc_debugfs_dir == NULL);
+	LASSERT(!sptlrpc_debugfs_dir);
 
 	sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root,
 						sptlrpc_lprocfs_vars, NULL);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index ebfa609..40e5349 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -250,7 +250,7 @@
 		alloc_size = size_roundup_power2(newmsg_size);
 
 		newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS);
-		if (newbuf == NULL)
+		if (!newbuf)
 			return -ENOMEM;
 
 		/* Must lock this, so that otherwise unprotected change of
@@ -258,7 +258,8 @@
 		 * imp_replay_list traversing threads. See LU-3333
 		 * This is a bandaid at best, we really need to deal with this
 		 * in request enlarging code before unpacking that's already
-		 * there */
+		 * there
+		 */
 		if (req->rq_import)
 			spin_lock(&req->rq_import->imp_lock);
 		memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
@@ -319,7 +320,7 @@
 		LASSERT(rs->rs_size >= rs_size);
 	} else {
 		rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
-		if (rs == NULL)
+		if (!rs)
 			return -ENOMEM;
 
 		rs->rs_size = rs_size;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 905a414..6276bf5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -104,7 +104,7 @@
 		return -EPROTO;
 
 	bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
-	if (bsd == NULL) {
+	if (!bsd) {
 		CERROR("bulk sec desc has short size %d\n",
 		       lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
 		return -EPROTO;
@@ -227,7 +227,7 @@
 	swabbed = ptlrpc_rep_need_swab(req);
 
 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
-	if (phdr == NULL) {
+	if (!phdr) {
 		CERROR("missing plain header\n");
 		return -EPROTO;
 	}
@@ -264,7 +264,8 @@
 		}
 	} else {
 		/* whether we sent with bulk or not, we expect the same
-		 * in reply, except for early reply */
+		 * in reply, except for early reply
+		 */
 		if (!req->rq_early &&
 		    !equi(req->rq_pack_bulk == 1,
 			  phdr->ph_flags & PLAIN_FL_BULK)) {
@@ -419,7 +420,7 @@
 	LASSERT(sec->ps_import);
 	LASSERT(atomic_read(&sec->ps_refcount) == 0);
 	LASSERT(atomic_read(&sec->ps_nctx) == 0);
-	LASSERT(plsec->pls_ctx == NULL);
+	LASSERT(!plsec->pls_ctx);
 
 	class_import_put(sec->ps_import);
 
@@ -468,7 +469,7 @@
 	/* install ctx immediately if this is a reverse sec */
 	if (svc_ctx) {
 		ctx = plain_sec_install_ctx(plsec);
-		if (ctx == NULL) {
+		if (!ctx) {
 			plain_destroy_sec(sec);
 			return NULL;
 		}
@@ -492,7 +493,7 @@
 		atomic_inc(&ctx->cc_refcount);
 	read_unlock(&plsec->pls_lock);
 
-	if (unlikely(ctx == NULL))
+	if (unlikely(!ctx))
 		ctx = plain_sec_install_ctx(plsec);
 
 	return ctx;
@@ -665,7 +666,7 @@
 		newbuf_size = size_roundup_power2(newbuf_size);
 
 		newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
-		if (newbuf == NULL)
+		if (!newbuf)
 			return -ENOMEM;
 
 		/* Must lock this, so that otherwise unprotected change of
@@ -673,7 +674,8 @@
 		 * imp_replay_list traversing threads. See LU-3333
 		 * This is a bandaid at best, we really need to deal with this
 		 * in request enlarging code before unpacking that's already
-		 * there */
+		 * there
+		 */
 		if (req->rq_import)
 			spin_lock(&req->rq_import->imp_lock);
 
@@ -732,7 +734,7 @@
 	swabbed = ptlrpc_req_need_swab(req);
 
 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
-	if (phdr == NULL) {
+	if (!phdr) {
 		CERROR("missing plain header\n");
 		return -EPROTO;
 	}
@@ -801,7 +803,7 @@
 		LASSERT(rs->rs_size >= rs_size);
 	} else {
 		rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
-		if (rs == NULL)
+		if (!rs)
 			return -ENOMEM;
 
 		rs->rs_size = rs_size;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 8598300..6f71ecc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -77,7 +77,7 @@
 	rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS,
 			    cfs_cpt_spread_node(svc->srv_cptable,
 						svcpt->scp_cpt));
-	if (rqbd == NULL)
+	if (!rqbd)
 		return NULL;
 
 	rqbd->rqbd_svcpt = svcpt;
@@ -89,7 +89,7 @@
 						svcpt->scp_cpt,
 						svc->srv_buf_size,
 						GFP_KERNEL);
-	if (rqbd->rqbd_buffer == NULL) {
+	if (!rqbd->rqbd_buffer) {
 		kfree(rqbd);
 		return NULL;
 	}
@@ -144,13 +144,14 @@
 
 	for (i = 0; i < svc->srv_nbuf_per_group; i++) {
 		/* NB: another thread might have recycled enough rqbds, we
-		 * need to make sure it wouldn't over-allocate, see LU-1212. */
+		 * need to make sure it wouldn't over-allocate, see LU-1212.
+		 */
 		if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
 			break;
 
 		rqbd = ptlrpc_alloc_rqbd(svcpt);
 
-		if (rqbd == NULL) {
+		if (!rqbd) {
 			CERROR("%s: Can't allocate request buffer\n",
 			       svc->srv_name);
 			rc = -ENOMEM;
@@ -322,7 +323,8 @@
 	list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
 
 	/* Don't complain if no request buffers are posted right now; LNET
-	 * won't drop requests because we set the portal lazy! */
+	 * won't drop requests because we set the portal lazy!
+	 */
 
 	spin_unlock(&svcpt->scp_lock);
 
@@ -363,13 +365,15 @@
 	init = max_t(int, init, tc->tc_nthrs_init);
 
 	/* NB: please see comments in lustre_lnet.h for definition
-	 * details of these members */
+	 * details of these members
+	 */
 	LASSERT(tc->tc_nthrs_max != 0);
 
 	if (tc->tc_nthrs_user != 0) {
 		/* In case there is a reason to test a service with many
 		 * threads, we give a less strict check here, it can
-		 * be up to 8 * nthrs_max */
+		 * be up to 8 * nthrs_max
+		 */
 		total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
 		nthrs = total / svc->srv_ncpts;
 		init = max(init, nthrs);
@@ -379,7 +383,8 @@
 	total = tc->tc_nthrs_max;
 	if (tc->tc_nthrs_base == 0) {
 		/* don't care about base threads number per partition,
-		 * this is most for non-affinity service */
+		 * this is most for non-affinity service
+		 */
 		nthrs = total / svc->srv_ncpts;
 		goto out;
 	}
@@ -390,7 +395,8 @@
 
 		/* NB: Increase the base number if it's single partition
 		 * and total number of cores/HTs is larger or equal to 4.
-		 * result will always < 2 * nthrs_base */
+		 * result will always < 2 * nthrs_base
+		 */
 		weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
 		for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
 			    (tc->tc_nthrs_base >> i) != 0; i++)
@@ -490,7 +496,7 @@
 	array->paa_reqs_array =
 		kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS,
 			     cfs_cpt_spread_node(svc->srv_cptable, cpt));
-	if (array->paa_reqs_array == NULL)
+	if (!array->paa_reqs_array)
 		return -ENOMEM;
 
 	for (index = 0; index < size; index++)
@@ -499,14 +505,15 @@
 	array->paa_reqs_count =
 		kzalloc_node(sizeof(__u32) * size, GFP_NOFS,
 			     cfs_cpt_spread_node(svc->srv_cptable, cpt));
-	if (array->paa_reqs_count == NULL)
+	if (!array->paa_reqs_count)
 		goto free_reqs_array;
 
 	setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
 		    (unsigned long)svcpt);
 
 	/* At SOW, service time should be quick; 10s seems generous. If client
-	 * timeout is less than this, we'll be sending an early reply. */
+	 * timeout is less than this, we'll be sending an early reply.
+	 */
 	at_init(&svcpt->scp_at_estimate, 10, 0);
 
 	/* assign this before call ptlrpc_grow_req_bufs */
@@ -514,7 +521,8 @@
 	/* Now allocate the request buffers, but don't post them now */
 	rc = ptlrpc_grow_req_bufs(svcpt, 0);
 	/* We shouldn't be under memory pressure at startup, so
-	 * fail if we can't allocate all our buffers at this time. */
+	 * fail if we can't allocate all our buffers at this time.
+	 */
 	if (rc != 0)
 		goto free_reqs_count;
 
@@ -556,14 +564,14 @@
 	LASSERT(conf->psc_thr.tc_ctx_tags != 0);
 
 	cptable = cconf->cc_cptable;
-	if (cptable == NULL)
+	if (!cptable)
 		cptable = cfs_cpt_table;
 
 	if (!conf->psc_thr.tc_cpu_affinity) {
 		ncpts = 1;
 	} else {
 		ncpts = cfs_cpt_number(cptable);
-		if (cconf->cc_pattern != NULL) {
+		if (cconf->cc_pattern) {
 			struct cfs_expr_list *el;
 
 			rc = cfs_expr_list_parse(cconf->cc_pattern,
@@ -632,11 +640,11 @@
 		if (!conf->psc_thr.tc_cpu_affinity)
 			cpt = CFS_CPT_ANY;
 		else
-			cpt = cpts != NULL ? cpts[i] : i;
+			cpt = cpts ? cpts[i] : i;
 
 		svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS,
 				     cfs_cpt_spread_node(cptable, cpt));
-		if (svcpt == NULL) {
+		if (!svcpt) {
 			rc = -ENOMEM;
 			goto failed;
 		}
@@ -696,7 +704,8 @@
 	LASSERT(list_empty(&req->rq_timed_list));
 
 	 /* DEBUG_REQ() assumes the reply state of a request with a valid
-	  * ref will not be destroyed until that reference is dropped. */
+	  * ref will not be destroyed until that reference is dropped.
+	  */
 	ptlrpc_req_drop_rs(req);
 
 	sptlrpc_svc_ctx_decref(req);
@@ -704,7 +713,8 @@
 	if (req != &req->rq_rqbd->rqbd_req) {
 		/* NB request buffers use an embedded
 		 * req if the incoming req unlinked the
-		 * MD; this isn't one of them! */
+		 * MD; this isn't one of them!
+		 */
 		ptlrpc_request_cache_free(req);
 	}
 }
@@ -728,7 +738,8 @@
 	if (req->rq_at_linked) {
 		spin_lock(&svcpt->scp_at_lock);
 		/* recheck with lock, in case it's unlinked by
-		 * ptlrpc_at_check_timed() */
+		 * ptlrpc_at_check_timed()
+		 */
 		if (likely(req->rq_at_linked))
 			ptlrpc_at_remove_timed(req);
 		spin_unlock(&svcpt->scp_at_lock);
@@ -755,7 +766,8 @@
 		svcpt->scp_hist_nrqbds++;
 
 		/* cull some history?
-		 * I expect only about 1 or 2 rqbds need to be recycled here */
+		 * I expect only about 1 or 2 rqbds need to be recycled here
+		 */
 		while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
 			rqbd = list_entry(svcpt->scp_hist_rqbds.next,
 					      struct ptlrpc_request_buffer_desc,
@@ -765,7 +777,8 @@
 			svcpt->scp_hist_nrqbds--;
 
 			/* remove rqbd's reqs from svc's req history while
-			 * I've got the service lock */
+			 * I've got the service lock
+			 */
 			list_for_each(tmp, &rqbd->rqbd_reqs) {
 				req = list_entry(tmp, struct ptlrpc_request,
 						     rq_list);
@@ -846,7 +859,7 @@
 
 	ptlrpc_nrs_req_finalize(req);
 
-	if (req->rq_export != NULL)
+	if (req->rq_export)
 		class_export_rpc_dec(req->rq_export);
 
 	ptlrpc_server_finish_request(svcpt, req);
@@ -869,13 +882,13 @@
 			  req->rq_export->exp_conn_cnt);
 		return -EEXIST;
 	}
-	if (unlikely(obd == NULL || obd->obd_fail)) {
+	if (unlikely(!obd || obd->obd_fail)) {
 		/*
 		 * Failing over, don't handle any more reqs, send
 		 * error response instead.
 		 */
 		CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
-		       req, (obd != NULL) ? obd->obd_name : "unknown");
+		       req, obd ? obd->obd_name : "unknown");
 		rc = -ENODEV;
 	} else if (lustre_msg_get_flags(req->rq_reqmsg) &
 		   (MSG_REPLAY | MSG_REQ_REPLAY_DONE)) {
@@ -942,7 +955,8 @@
 	div_u64_rem(req->rq_deadline, array->paa_size, &index);
 	if (array->paa_reqs_count[index] > 0) {
 		/* latest rpcs will have the latest deadlines in the list,
-		 * so search backward. */
+		 * so search backward.
+		 */
 		list_for_each_entry_reverse(rq,
 						&array->paa_reqs_array[index],
 						rq_timed_list) {
@@ -1003,7 +1017,8 @@
 	int rc;
 
 	/* deadline is when the client expects us to reply, margin is the
-	   difference between clients' and servers' expectations */
+	 * difference between clients' and servers' expectations
+	 */
 	DEBUG_REQ(D_ADAPTTO, req,
 		  "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
 		  AT_OFF ? "AT off - not " : "",
@@ -1027,12 +1042,14 @@
 	}
 
 	/* Fake our processing time into the future to ask the clients
-	 * for some extra amount of time */
+	 * for some extra amount of time
+	 */
 	at_measured(&svcpt->scp_at_estimate, at_extra +
 		    ktime_get_real_seconds() - req->rq_arrival_time.tv_sec);
 
 	/* Check to see if we've actually increased the deadline -
-	 * we may be past adaptive_max */
+	 * we may be past adaptive_max
+	 */
 	if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
 	    at_get(&svcpt->scp_at_estimate)) {
 		DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
@@ -1044,7 +1061,7 @@
 	newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate);
 
 	reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
-	if (reqcopy == NULL)
+	if (!reqcopy)
 		return -ENOMEM;
 	reqmsg = libcfs_kvzalloc(req->rq_reqlen, GFP_NOFS);
 	if (!reqmsg) {
@@ -1074,7 +1091,7 @@
 	/* Connection ref */
 	reqcopy->rq_export = class_conn2export(
 				     lustre_msg_get_handle(reqcopy->rq_reqmsg));
-	if (reqcopy->rq_export == NULL) {
+	if (!reqcopy->rq_export) {
 		rc = -ENODEV;
 		goto out;
 	}
@@ -1102,7 +1119,8 @@
 	}
 
 	/* Free the (early) reply state from lustre_pack_reply.
-	   (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
+	 * (ptlrpc_send_reply takes it's own rs ref, so this is safe here)
+	 */
 	ptlrpc_req_drop_rs(reqcopy);
 
 out_put:
@@ -1117,7 +1135,8 @@
 }
 
 /* Send early replies to everybody expiring within at_early_margin
-   asking for at_extra time */
+ * asking for at_extra time
+ */
 static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
 {
 	struct ptlrpc_at_array *array = &svcpt->scp_at_array;
@@ -1152,7 +1171,8 @@
 	}
 
 	/* We're close to a timeout, and we don't know how much longer the
-	   server will take. Send early replies to everyone expiring soon. */
+	 * server will take. Send early replies to everyone expiring soon.
+	 */
 	INIT_LIST_HEAD(&work_list);
 	deadline = -1;
 	div_u64_rem(array->paa_deadline, array->paa_size, &index);
@@ -1194,7 +1214,8 @@
 	       first, at_extra, counter);
 	if (first < 0) {
 		/* We're already past request deadlines before we even get a
-		   chance to send early replies */
+		 * chance to send early replies
+		 */
 		LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
 			      svcpt->scp_service->srv_name);
 		CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
@@ -1204,7 +1225,8 @@
 	}
 
 	/* we took additional refcount so entries can't be deleted from list, no
-	 * locking is needed */
+	 * locking is needed
+	 */
 	while (!list_empty(&work_list)) {
 		rq = list_entry(work_list.next, struct ptlrpc_request,
 				    rq_timed_list);
@@ -1237,7 +1259,8 @@
 	if (req->rq_export && req->rq_ops) {
 		/* Perform request specific check. We should do this check
 		 * before the request is added into exp_hp_rpcs list otherwise
-		 * it may hit swab race at LU-1044. */
+		 * it may hit swab race at LU-1044.
+		 */
 		if (req->rq_ops->hpreq_check) {
 			rc = req->rq_ops->hpreq_check(req);
 			/**
@@ -1272,7 +1295,8 @@
 {
 	if (req->rq_export && req->rq_ops) {
 		/* refresh lock timeout again so that client has more
-		 * room to send lock cancel RPC. */
+		 * room to send lock cancel RPC.
+		 */
 		if (req->rq_ops->hpreq_fini)
 			req->rq_ops->hpreq_fini(req);
 
@@ -1316,7 +1340,7 @@
 		     CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
 		/* leave just 1 thread for normal RPCs */
 		running = PTLRPC_NTHRS_INIT;
-		if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
+		if (svcpt->scp_service->srv_ops.so_hpreq_handler)
 			running += 1;
 	}
 
@@ -1355,7 +1379,7 @@
 		     CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
 		/* leave just 1 thread for normal RPCs */
 		running = PTLRPC_NTHRS_INIT;
-		if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
+		if (svcpt->scp_service->srv_ops.so_hpreq_handler)
 			running += 1;
 	}
 
@@ -1405,7 +1429,7 @@
 
 	if (ptlrpc_server_high_pending(svcpt, force)) {
 		req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
-		if (req != NULL) {
+		if (req) {
 			svcpt->scp_hreq_count++;
 			goto got_request;
 		}
@@ -1413,7 +1437,7 @@
 
 	if (ptlrpc_server_normal_pending(svcpt, force)) {
 		req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
-		if (req != NULL) {
+		if (req) {
 			svcpt->scp_hreq_count = 0;
 			goto got_request;
 		}
@@ -1461,7 +1485,8 @@
 	list_del_init(&req->rq_list);
 	svcpt->scp_nreqs_incoming--;
 	/* Consider this still a "queued" request as far as stats are
-	 * concerned */
+	 * concerned
+	 */
 	spin_unlock(&svcpt->scp_lock);
 
 	/* go through security check/transform */
@@ -1598,7 +1623,7 @@
 	int fail_opc = 0;
 
 	request = ptlrpc_server_request_get(svcpt, false);
-	if (request == NULL)
+	if (!request)
 		return 0;
 
 	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
@@ -1620,7 +1645,7 @@
 	timediff = timespec64_sub(work_start, request->rq_arrival_time);
 	timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
 			 timediff.tv_nsec / NSEC_PER_USEC;
-	if (likely(svc->srv_stats != NULL)) {
+	if (likely(svc->srv_stats)) {
 		lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
 				    timediff_usecs);
 		lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
@@ -1652,7 +1677,8 @@
 	}
 
 	/* Discard requests queued for longer than the deadline.
-	   The deadline is increased if we send an early reply. */
+	 * The deadline is increased if we send an early reply.
+	 */
 	if (ktime_get_real_seconds() > request->rq_deadline) {
 		DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
 			  libcfs_id2str(request->rq_peer),
@@ -1718,7 +1744,7 @@
 	       request->rq_status,
 	       (request->rq_repmsg ?
 		lustre_msg_get_status(request->rq_repmsg) : -999));
-	if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
+	if (likely(svc->srv_stats && request->rq_reqmsg)) {
 		__u32 op = lustre_msg_get_opc(request->rq_reqmsg);
 		int opc = opcode_offset(op);
 
@@ -1804,7 +1830,8 @@
 
 	if (nlocks == 0 && !been_handled) {
 		/* If we see this, we should already have seen the warning
-		 * in mds_steal_ack_locks()  */
+		 * in mds_steal_ack_locks()
+		 */
 		CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n",
 		       rs,
 		       rs->rs_xid, rs->rs_transno, rs->rs_opc,
@@ -1858,7 +1885,8 @@
 	/* CAVEAT EMPTOR: We might be allocating buffers here because we've
 	 * allowed the request history to grow out of control.  We could put a
 	 * sanity check on that here and cull some history if we need the
-	 * space. */
+	 * space.
+	 */
 
 	if (avail <= low_water)
 		ptlrpc_grow_req_bufs(svcpt, 1);
@@ -1992,7 +2020,8 @@
 
 	/* NB: we will call cfs_cpt_bind() for all threads, because we
 	 * might want to run lustre server only on a subset of system CPUs,
-	 * in that case ->scp_cpt is CFS_CPT_ANY */
+	 * in that case ->scp_cpt is CFS_CPT_ANY
+	 */
 	rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
 	if (rc != 0) {
 		CWARN("%s: failed to bind %s on CPT %d\n",
@@ -2008,7 +2037,7 @@
 	set_current_groups(ginfo);
 	put_group_info(ginfo);
 
-	if (svc->srv_ops.so_thr_init != NULL) {
+	if (svc->srv_ops.so_thr_init) {
 		rc = svc->srv_ops.so_thr_init(thread);
 		if (rc)
 			goto out;
@@ -2057,7 +2086,8 @@
 	/* SVC_STOPPING may already be set here if someone else is trying
 	 * to stop the service while this new thread has been dynamically
 	 * forked. We still set SVC_RUNNING to let our creator know that
-	 * we are now running, however we will exit as soon as possible */
+	 * we are now running, however we will exit as soon as possible
+	 */
 	thread_add_flags(thread, SVC_RUNNING);
 	svcpt->scp_nthrs_running++;
 	spin_unlock(&svcpt->scp_lock);
@@ -2116,7 +2146,8 @@
 		    ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
 			/* I just failed to repost request buffers.
 			 * Wait for a timeout (unless something else
-			 * happens) before I try again */
+			 * happens) before I try again
+			 */
 			svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
 			CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
 			       svcpt->scp_nrqbds_posted);
@@ -2132,10 +2163,10 @@
 	/*
 	 * deconstruct service specific state created by ptlrpc_start_thread()
 	 */
-	if (svc->srv_ops.so_thr_done != NULL)
+	if (svc->srv_ops.so_thr_done)
 		svc->srv_ops.so_thr_done(thread);
 
-	if (env != NULL) {
+	if (env) {
 		lu_context_fini(&env->le_ctx);
 		kfree(env);
 	}
@@ -2229,14 +2260,14 @@
 	ptlrpc_hr.hr_stopping = 1;
 
 	cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
-		if (hrp->hrp_thrs == NULL)
+		if (!hrp->hrp_thrs)
 			continue; /* uninitialized */
 		for (j = 0; j < hrp->hrp_nthrs; j++)
 			wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
 	}
 
 	cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
-		if (hrp->hrp_thrs == NULL)
+		if (!hrp->hrp_thrs)
 			continue; /* uninitialized */
 		wait_event(ptlrpc_hr.hr_waitq,
 			       atomic_read(&hrp->hrp_nstopped) ==
@@ -2255,24 +2286,27 @@
 
 		for (j = 0; j < hrp->hrp_nthrs; j++) {
 			struct	ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
+			struct task_struct *task;
 
-			rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
+			task = kthread_run(ptlrpc_hr_main,
 						 &hrp->hrp_thrs[j],
 						 "ptlrpc_hr%02d_%03d",
 						 hrp->hrp_cpt,
-						 hrt->hrt_id));
-			if (IS_ERR_VALUE(rc))
+						 hrt->hrt_id);
+			if (IS_ERR(task)) {
+				rc = PTR_ERR(task);
 				break;
+			}
 		}
 		wait_event(ptlrpc_hr.hr_waitq,
 			       atomic_read(&hrp->hrp_nstarted) == j);
-		if (!IS_ERR_VALUE(rc))
-			continue;
 
-		CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n",
-		       i, j, rc);
-		ptlrpc_stop_hr_threads();
-		return rc;
+		if (rc < 0) {
+			CERROR("cannot start reply handler thread %d:%d: rc = %d\n",
+			       i, j, rc);
+			ptlrpc_stop_hr_threads();
+			return rc;
+		}
 	}
 	return 0;
 }
@@ -2333,7 +2367,7 @@
 	int i;
 
 	ptlrpc_service_for_each_part(svcpt, i, svc) {
-		if (svcpt->scp_service != NULL)
+		if (svcpt->scp_service)
 			ptlrpc_svcpt_stop_threads(svcpt);
 	}
 }
@@ -2374,10 +2408,9 @@
 	struct l_wait_info lwi = { 0 };
 	struct ptlrpc_thread *thread;
 	struct ptlrpc_service *svc;
+	struct task_struct *task;
 	int rc;
 
-	LASSERT(svcpt != NULL);
-
 	svc = svcpt->scp_service;
 
 	CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
@@ -2396,7 +2429,7 @@
 	thread = kzalloc_node(sizeof(*thread), GFP_NOFS,
 			      cfs_cpt_spread_node(svc->srv_cptable,
 						  svcpt->scp_cpt));
-	if (thread == NULL)
+	if (!thread)
 		return -ENOMEM;
 	init_waitqueue_head(&thread->t_ctl_waitq);
 
@@ -2409,7 +2442,8 @@
 
 	if (svcpt->scp_nthrs_starting != 0) {
 		/* serialize starting because some modules (obdfilter)
-		 * might require unique and contiguous t_id */
+		 * might require unique and contiguous t_id
+		 */
 		LASSERT(svcpt->scp_nthrs_starting == 1);
 		spin_unlock(&svcpt->scp_lock);
 		kfree(thread);
@@ -2442,9 +2476,10 @@
 	}
 
 	CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
-	rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
-	if (IS_ERR_VALUE(rc)) {
-		CERROR("cannot start thread '%s': rc %d\n",
+	task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name);
+	if (IS_ERR(task)) {
+		rc = PTR_ERR(task);
+		CERROR("cannot start thread '%s': rc = %d\n",
 		       thread->t_name, rc);
 		spin_lock(&svcpt->scp_lock);
 		--svcpt->scp_nthrs_starting;
@@ -2488,7 +2523,7 @@
 
 	ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
 						   sizeof(*hrp));
-	if (ptlrpc_hr.hr_partitions == NULL)
+	if (!ptlrpc_hr.hr_partitions)
 		return -ENOMEM;
 
 	init_waitqueue_head(&ptlrpc_hr.hr_waitq);
@@ -2509,7 +2544,7 @@
 			kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
 				cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
 						    i));
-		if (hrp->hrp_thrs == NULL) {
+		if (!hrp->hrp_thrs) {
 			rc = -ENOMEM;
 			goto out;
 		}
@@ -2537,7 +2572,7 @@
 	struct ptlrpc_hr_partition *hrp;
 	int i;
 
-	if (ptlrpc_hr.hr_partitions == NULL)
+	if (!ptlrpc_hr.hr_partitions)
 		return;
 
 	ptlrpc_stop_hr_threads();
@@ -2577,7 +2612,7 @@
 
 	/* early disarm AT timer... */
 	ptlrpc_service_for_each_part(svcpt, i, svc) {
-		if (svcpt->scp_service != NULL)
+		if (svcpt->scp_service)
 			del_timer(&svcpt->scp_at_timer);
 	}
 }
@@ -2592,18 +2627,20 @@
 	int i;
 
 	/* All history will be culled when the next request buffer is
-	 * freed in ptlrpc_service_purge_all() */
+	 * freed in ptlrpc_service_purge_all()
+	 */
 	svc->srv_hist_nrqbds_cpt_max = 0;
 
 	rc = LNetClearLazyPortal(svc->srv_req_portal);
 	LASSERT(rc == 0);
 
 	ptlrpc_service_for_each_part(svcpt, i, svc) {
-		if (svcpt->scp_service == NULL)
+		if (!svcpt->scp_service)
 			break;
 
 		/* Unlink all the request buffers.  This forces a 'final'
-		 * event with its 'unlink' flag set for each posted rqbd */
+		 * event with its 'unlink' flag set for each posted rqbd
+		 */
 		list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
 					rqbd_list) {
 			rc = LNetMDUnlink(rqbd->rqbd_md_h);
@@ -2612,17 +2649,19 @@
 	}
 
 	ptlrpc_service_for_each_part(svcpt, i, svc) {
-		if (svcpt->scp_service == NULL)
+		if (!svcpt->scp_service)
 			break;
 
 		/* Wait for the network to release any buffers
-		 * it's currently filling */
+		 * it's currently filling
+		 */
 		spin_lock(&svcpt->scp_lock);
 		while (svcpt->scp_nrqbds_posted != 0) {
 			spin_unlock(&svcpt->scp_lock);
 			/* Network access will complete in finite time but
 			 * the HUGE timeout lets us CWARN for visibility
-			 * of sluggish NALs */
+			 * of sluggish LNDs
+			 */
 			lwi = LWI_TIMEOUT_INTERVAL(
 					cfs_time_seconds(LONG_UNLINK),
 					cfs_time_seconds(1), NULL, NULL);
@@ -2648,7 +2687,7 @@
 	int i;
 
 	ptlrpc_service_for_each_part(svcpt, i, svc) {
-		if (svcpt->scp_service == NULL)
+		if (!svcpt->scp_service)
 			break;
 
 		spin_lock(&svcpt->scp_rep_lock);
@@ -2663,7 +2702,8 @@
 
 		/* purge the request queue.  NB No new replies (rqbds
 		 * all unlinked) and no service threads, so I'm the only
-		 * thread noodling the request queue now */
+		 * thread noodling the request queue now
+		 */
 		while (!list_empty(&svcpt->scp_req_incoming)) {
 			req = list_entry(svcpt->scp_req_incoming.next,
 					     struct ptlrpc_request, rq_list);
@@ -2682,11 +2722,13 @@
 		LASSERT(svcpt->scp_nreqs_incoming == 0);
 		LASSERT(svcpt->scp_nreqs_active == 0);
 		/* history should have been culled by
-		 * ptlrpc_server_finish_request */
+		 * ptlrpc_server_finish_request
+		 */
 		LASSERT(svcpt->scp_hist_nrqbds == 0);
 
 		/* Now free all the request buffers since nothing
-		 * references them any more... */
+		 * references them any more...
+		 */
 
 		while (!list_empty(&svcpt->scp_rqbd_idle)) {
 			rqbd = list_entry(svcpt->scp_rqbd_idle.next,
@@ -2714,7 +2756,7 @@
 	int i;
 
 	ptlrpc_service_for_each_part(svcpt, i, svc) {
-		if (svcpt->scp_service == NULL)
+		if (!svcpt->scp_service)
 			break;
 
 		/* In case somebody rearmed this in the meantime */
@@ -2730,7 +2772,7 @@
 	ptlrpc_service_for_each_part(svcpt, i, svc)
 		kfree(svcpt);
 
-	if (svc->srv_cpts != NULL)
+	if (svc->srv_cpts)
 		cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
 
 	kfree(svc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 61d9ca9..3ffd2d9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -333,17 +333,9 @@
 	CLASSERT(LDLM_MAX_TYPE == 14);
 	CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0);
 	CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1);
-	LASSERTF(UPDATE_OBJ == 1000, "found %lld\n",
-		 (long long)UPDATE_OBJ);
-	LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n",
-		 (long long)UPDATE_LAST_OPC);
 	CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2);
 	CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3);
 	CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3);
-	CLASSERT(LQUOTA_TYPE_USR == 0);
-	CLASSERT(LQUOTA_TYPE_GRP == 1);
-	CLASSERT(LQUOTA_RES_MD == 1);
-	CLASSERT(LQUOTA_RES_DT == 2);
 	LASSERTF(OBD_PING == 400, "found %lld\n",
 		 (long long)OBD_PING);
 	LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n",
@@ -437,30 +429,6 @@
 		(unsigned)LMAC_NOT_IN_OI);
 	LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
 		(unsigned)LMAC_FID_ON_OST);
-	LASSERTF(OBJ_CREATE == 1, "found %lld\n",
-		 (long long)OBJ_CREATE);
-	LASSERTF(OBJ_DESTROY == 2, "found %lld\n",
-		 (long long)OBJ_DESTROY);
-	LASSERTF(OBJ_REF_ADD == 3, "found %lld\n",
-		 (long long)OBJ_REF_ADD);
-	LASSERTF(OBJ_REF_DEL == 4, "found %lld\n",
-		 (long long)OBJ_REF_DEL);
-	LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n",
-		 (long long)OBJ_ATTR_SET);
-	LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n",
-		 (long long)OBJ_ATTR_GET);
-	LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n",
-		 (long long)OBJ_XATTR_SET);
-	LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n",
-		 (long long)OBJ_XATTR_GET);
-	LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
-		 (long long)OBJ_INDEX_LOOKUP);
-	LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
-		 (long long)OBJ_INDEX_LOOKUP);
-	LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n",
-		 (long long)OBJ_INDEX_INSERT);
-	LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n",
-		 (long long)OBJ_INDEX_DELETE);
 
 	/* Checks for struct ost_id */
 	LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -587,9 +555,6 @@
 		 (long long)LDF_COLLIDE);
 	LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n",
 		 (long long)LU_PAGE_SIZE);
-	/* Checks for union lu_page */
-	LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n",
-		 (long long)(int)sizeof(union lu_page));
 
 	/* Checks for struct lustre_handle */
 	LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n",
@@ -1535,11 +1500,6 @@
 	LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
 		 (long long)(int)sizeof(union lquota_id));
 
-	LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n",
-		 (long long)QUOTABLOCK_BITS);
-	LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n",
-		 (long long)QUOTABLOCK_SIZE);
-
 	/* Checks for struct obd_quotactl */
 	LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n",
 		 (long long)(int)sizeof(struct obd_quotactl));
@@ -1642,138 +1602,6 @@
 	LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n",
 		Q_FINVALIDATE);
 
-	/* Checks for struct lquota_acct_rec */
-	LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n",
-		 (long long)(int)sizeof(struct lquota_acct_rec));
-	LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_acct_rec, bspace));
-	LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace));
-	LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_acct_rec, ispace));
-	LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace));
-
-	/* Checks for struct lquota_glb_rec */
-	LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n",
-		 (long long)(int)sizeof(struct lquota_glb_rec));
-	LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit));
-	LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit));
-	LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit));
-	LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit));
-	LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_glb_rec, qbr_time));
-	LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time));
-	LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted));
-	LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted));
-
-	/* Checks for struct lquota_slv_rec */
-	LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n",
-		 (long long)(int)sizeof(struct lquota_slv_rec));
-	LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted));
-	LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted));
-
-	/* Checks for struct idx_info */
-	LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n",
-		 (long long)(int)sizeof(struct idx_info));
-	LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_magic));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_magic));
-	LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_flags));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_flags));
-	LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_count));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_count));
-	LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_pad0));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0));
-	LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_attrs));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs));
-	LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_fid));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_fid));
-	LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_version));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_version));
-	LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_hash_start));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start));
-	LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_hash_end));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end));
-	LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_keysize));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize));
-	LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_recsize));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize));
-	LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_pad1));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1));
-	LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_pad2));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2));
-	LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n",
-		 (long long)(int)offsetof(struct idx_info, ii_pad3));
-	LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3));
-	CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37);
-
-	/* Checks for struct lu_idxpage */
-	LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n",
-		 (long long)(int)sizeof(struct lu_idxpage));
-	LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct lu_idxpage, lip_magic));
-	LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic));
-	LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n",
-		 (long long)(int)offsetof(struct lu_idxpage, lip_flags));
-	LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags));
-	LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n",
-		 (long long)(int)offsetof(struct lu_idxpage, lip_nr));
-	LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr));
-	LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct lu_idxpage, lip_pad0));
-	LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0));
-	CLASSERT(LIP_MAGIC == 0x8A6D6B6C);
-	LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n",
-		 (long long)LIP_HDR_SIZE);
-	LASSERTF(II_FL_NOHASH == 1, "found %lld\n",
-		 (long long)II_FL_NOHASH);
-	LASSERTF(II_FL_VARKEY == 2, "found %lld\n",
-		 (long long)II_FL_VARKEY);
-	LASSERTF(II_FL_VARREC == 4, "found %lld\n",
-		 (long long)II_FL_VARREC);
-	LASSERTF(II_FL_NONUNQ == 8, "found %lld\n",
-		 (long long)II_FL_NONUNQ);
-
 	/* Checks for struct niobuf_remote */
 	LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n",
 		 (long long)(int)sizeof(struct niobuf_remote));
@@ -3753,50 +3581,6 @@
 	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
 		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
 
-	/* Checks for struct quota_body */
-	LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n",
-		 (long long)(int)sizeof(struct quota_body));
-	LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_fid));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_fid));
-	LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_id));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_id));
-	LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_flags));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_flags));
-	LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_padding));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_padding));
-	LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_count));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_count));
-	LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_usage));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_usage));
-	LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_slv_ver));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver));
-	LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_lockh));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh));
-	LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_glb_lockh));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh));
-	LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n",
-		 (long long)(int)offsetof(struct quota_body, qb_padding1[4]));
-	LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4]));
-
 	/* Checks for struct mgs_target_info */
 	LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
 		 (long long)(int)sizeof(struct mgs_target_info));
@@ -4431,60 +4215,4 @@
 	LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4,
 		 "found %lld\n",
 	      (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id));
-
-	/* Checks for struct update_buf */
-	LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n",
-		 (long long)(int)sizeof(struct update_buf));
-	LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct update_buf, ub_magic));
-	LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct update_buf *)0)->ub_magic));
-	LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n",
-		 (long long)(int)offsetof(struct update_buf, ub_count));
-	LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct update_buf *)0)->ub_count));
-	LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct update_buf, ub_bufs));
-	LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n",
-		 (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs));
-
-	/* Checks for struct update_reply */
-	LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n",
-		 (long long)(int)sizeof(struct update_reply));
-	LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct update_reply, ur_version));
-	LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct update_reply *)0)->ur_version));
-	LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n",
-		 (long long)(int)offsetof(struct update_reply, ur_count));
-	LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct update_reply *)0)->ur_count));
-	LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct update_reply, ur_lens));
-	LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n",
-		 (long long)(int)sizeof(((struct update_reply *)0)->ur_lens));
-
-	/* Checks for struct update */
-	LASSERTF((int)sizeof(struct update) == 56, "found %lld\n",
-		 (long long)(int)sizeof(struct update));
-	LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct update, u_type));
-	LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct update *)0)->u_type));
-	LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n",
-		 (long long)(int)offsetof(struct update, u_batchid));
-	LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct update *)0)->u_batchid));
-	LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct update, u_fid));
-	LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n",
-		 (long long)(int)sizeof(((struct update *)0)->u_fid));
-	LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n",
-		 (long long)(int)offsetof(struct update, u_lens));
-	LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n",
-		 (long long)(int)sizeof(((struct update *)0)->u_lens));
-	LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n",
-		 (long long)(int)offsetof(struct update, u_bufs));
-	LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n",
-		 (long long)(int)sizeof(((struct update *)0)->u_bufs));
 }
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
index 7b7e7b2..f4f35c9 100644
--- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
+++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
@@ -538,8 +538,8 @@
 };
 
 /**********************************************************************
-      IPIPE API Structures
-**********************************************************************/
+ *	IPIPE API Structures
+ **********************************************************************/
 
 /* IPIPE module configurations */
 
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index ac78ed2..ff47a8f3 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1350,21 +1350,16 @@
  */
 static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
 {
-	int ret = 0;
-
 	switch (cmd) {
 	case VIDIOC_VPFE_IPIPE_S_CONFIG:
-		ret = ipipe_s_config(sd, arg);
-		break;
+		return ipipe_s_config(sd, arg);
 
 	case VIDIOC_VPFE_IPIPE_G_CONFIG:
-		ret = ipipe_g_config(sd, arg);
-		break;
+		return ipipe_g_config(sd, arg);
 
 	default:
-		ret = -ENOIOCTLCMD;
+		return -ENOIOCTLCMD;
 	}
-	return ret;
 }
 
 void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en)
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index b1d5e23..0792fbd 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -682,8 +682,10 @@
 	ipipe_clock_enable(base_addr);
 
 	if (id == IPIPE_RGB2RGB_2) {
-		/* For second RGB module, gain integer is 3 bits instead
-		of 4, offset has 11 bits insread of 13 */
+		/*
+		 * For second RGB module, gain integer is 3 bits instead
+		 * of 4, offset has 11 bits insread of 13
+		 */
 		offset = RGB2_MUL_BASE;
 		integ_mask = 0x7;
 		offset_mask = RGB2RGB_2_OFST_MASK;
@@ -792,8 +794,10 @@
 	/* valied table */
 	tbl = lut_3d->table;
 	for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
-		/* Each entry has 0-9 (B), 10-19 (G) and
-		20-29 R values */
+		/*
+		 * Each entry has 0-9 (B), 10-19 (G) and
+		 * 20-29 R values
+		 */
 		val = tbl[i].b & D3_LUT_ENTRY_MASK;
 		val |= (tbl[i].g & D3_LUT_ENTRY_MASK) <<
 			 D3_LUT_ENTRY_G_SHIFT;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
index 2bf2f7a..7ee1572 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
@@ -278,9 +278,10 @@
 /* Resizer Rescale Parameters */
 #define RSZ_EN_A		0x58
 #define RSZ_EN_B		0xe8
-/* offset of the registers to be added with base register of
-   either RSZ0 or RSZ1
-*/
+/*
+ * offset of the registers to be added with base register of
+ * either RSZ0 or RSZ1
+ */
 #define RSZ_MODE		0x4
 #define RSZ_420			0x8
 #define RSZ_I_VPS		0xc
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index 633d645..6e4c87f 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -641,8 +641,9 @@
 }
 
 static int
-ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
-		     struct v4l2_subdev_frame_size_enum *fse)
+ipipeif_enum_frame_size(struct v4l2_subdev *sd,
+			struct v4l2_subdev_pad_config *cfg,
+			struct v4l2_subdev_frame_size_enum *fse)
 {
 	struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
 	struct v4l2_mbus_framefmt format;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 9905789..ae9202d 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -282,7 +282,8 @@
  * @fmt: pointer to v4l2 subdev format structure
  */
 static void
-isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
+isif_try_format(struct vpfe_isif_device *isif,
+		struct v4l2_subdev_pad_config *cfg,
 		struct v4l2_subdev_format *fmt)
 {
 	unsigned int width = fmt->format.width;
@@ -625,21 +626,16 @@
  */
 static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
 {
-	int ret;
-
 	switch (cmd) {
 	case VIDIOC_VPFE_ISIF_S_RAW_PARAMS:
-		ret = isif_set_params(sd, arg);
-		break;
+		return isif_set_params(sd, arg);
 
 	case VIDIOC_VPFE_ISIF_G_RAW_PARAMS:
-		ret = isif_get_params(sd, arg);
-		break;
+		return isif_get_params(sd, arg);
 
 	default:
-		ret = -ENOIOCTLCMD;
+		return -ENOIOCTLCMD;
 	}
-	return ret;
 }
 
 static void isif_config_gain_offset(struct vpfe_isif_device *isif)
@@ -1239,7 +1235,8 @@
 	 * a lot of registers that we didn't touch
 	 */
 	/* start with all bits zero */
-	ccdcfg = modeset = 0;
+	ccdcfg = 0;
+	modeset = 0;
 	pix_fmt = isif_get_pix_fmt(format->code);
 	if (pix_fmt < 0) {
 		pr_debug("Invalid pix_fmt(input mode)\n");
@@ -1398,8 +1395,9 @@
  * @which: wanted subdev format.
  */
 static struct v4l2_mbus_framefmt *
-__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
-		  unsigned int pad, enum v4l2_subdev_format_whence which)
+__isif_get_format(struct vpfe_isif_device *isif,
+		  struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+		  enum v4l2_subdev_format_whence which)
 {
 	if (which == V4L2_SUBDEV_FORMAT_TRY) {
 		struct v4l2_subdev_format fmt;
@@ -1570,7 +1568,7 @@
 		sel->r.height = format->height;
 	}
 	/* adjust the width to 16 pixel boundary */
-	sel->r.width = ((sel->r.width + 15) & ~0xf);
+	sel->r.width = (sel->r.width + 15) & ~0xf;
 	vpfe_isif->crop = sel->r;
 	if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
 		isif_set_image_window(vpfe_isif);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index a91395c..3cd56cc 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -404,7 +404,7 @@
 	param->f_div.pass[0].src_hsz = upper_h1 + o;
 	param->f_div.pass[1].o_hsz = h2 - 1;
 	param->f_div.pass[1].i_hps = 10 + (val1 * two_power);
-	param->f_div.pass[1].h_phs = (val - (val1 << 8));
+	param->f_div.pass[1].h_phs = val - (val1 << 8);
 	param->f_div.pass[1].src_hps = upper_h1 - o;
 	param->f_div.pass[1].src_hsz = upper_h2 + o;
 
@@ -425,8 +425,8 @@
 	param->rsz_common.hps = param->user_config.hst;
 
 	if (vpfe_ipipeif_decimation_enabled(vpfe_dev))
-		param->rsz_common.hsz = (((informat->width - 1) *
-			IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev));
+		param->rsz_common.hsz = ((informat->width - 1) *
+			IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev);
 	else
 		param->rsz_common.hsz = informat->width - 1;
 
@@ -650,7 +650,7 @@
 	param->f_div.pass[0].src_hsz = (input_width >> 2) + o;
 	param->f_div.pass[1].o_hsz = h2 - 1;
 	param->f_div.pass[1].i_hps = val1;
-	param->f_div.pass[1].h_phs = (val - (val1 << 8));
+	param->f_div.pass[1].h_phs = val - (val1 << 8);
 	param->f_div.pass[1].src_hps = (input_width >> 2) - o;
 	param->f_div.pass[1].src_hsz = (input_width >> 2) + o;
 
@@ -1387,8 +1387,9 @@
  * @fmt: pointer to v4l2 subdev format structure
  * return -EINVAL or zero on success
  */
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
-			   struct v4l2_subdev_format *fmt)
+static int resizer_set_format(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_format *fmt)
 {
 	struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
 	struct v4l2_mbus_framefmt *format;
@@ -1447,8 +1448,9 @@
  * @fmt: pointer to v4l2 subdev format structure
  * return -EINVAL or zero on success
  */
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
-			   struct v4l2_subdev_format *fmt)
+static int resizer_get_format(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_format *fmt)
 {
 	struct v4l2_mbus_framefmt *format;
 
@@ -1670,7 +1672,7 @@
 				resizer->crop_resizer.input =
 						RESIZER_CROP_INPUT_IPIPEIF;
 			else if (ipipe_source == IPIPE_OUTPUT_RESIZER)
-					resizer->crop_resizer.input =
+				resizer->crop_resizer.input =
 						RESIZER_CROP_INPUT_IPIPE;
 			else
 				return -EINVAL;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index ec46f36..bf077f8 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -442,8 +442,10 @@
 
 	/* create links now, starting with external(i2c) entities */
 	for (i = 0; i < vpfe_dev->num_ext_subdevs; i++)
-		/* if entity has no pads (ex: amplifier),
-		   cant establish link */
+		/*
+		 * if entity has no pads (ex: amplifier),
+		 * cant establish link
+		 */
 		if (vpfe_dev->sd[i]->entity.num_pads) {
 			ret = media_create_pad_link(&vpfe_dev->sd[i]->entity,
 				0, &vpfe_dev->vpfe_isif.subdev.entity,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 3ec7e65..0a65405 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,21 +172,19 @@
 static int vpfe_update_pipe_state(struct vpfe_video_device *video)
 {
 	struct vpfe_pipeline *pipe = &video->pipe;
-	int ret;
 
-	ret = vpfe_prepare_pipeline(video);
-	if (ret)
-		return ret;
+	if (vpfe_prepare_pipeline(video))
+		return vpfe_prepare_pipeline(video);
 
-	/* Find out if there is any input video
-	  if yes, it is single shot.
-	*/
+	/*
+	 * Find out if there is any input video
+	 * if yes, it is single shot.
+	 */
 	if (pipe->input_num == 0) {
 		pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
-		ret = vpfe_update_current_ext_subdev(video);
-		if (ret) {
+		if (vpfe_update_current_ext_subdev(video)) {
 			pr_err("Invalid external subdev\n");
-			return ret;
+			return vpfe_update_current_ext_subdev(video);
 		}
 	} else {
 		pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -460,7 +458,7 @@
 	video->next_frm = list_entry(video->dma_queue.next,
 					struct vpfe_cap_buffer, list);
 
-	if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state)
+	if (video->pipe.state == VPFE_PIPELINE_STREAM_SINGLESHOT)
 		video->cur_frm = video->next_frm;
 
 	list_del(&video->next_frm->list);
@@ -529,10 +527,11 @@
 	if (fh->io_allowed) {
 		if (video->started) {
 			vpfe_stop_capture(video);
-			/* mark pipe state as stopped in vpfe_release(),
-			   as app might call streamon() after streamoff()
-			   in which case driver has to start streaming.
-			*/
+			/*
+			 * mark pipe state as stopped in vpfe_release(),
+			 * as app might call streamon() after streamoff()
+			 * in which case driver has to start streaming.
+			 */
 			video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
 			vb2_streamoff(&video->buffer_queue,
 				      video->buffer_queue.type);
@@ -668,12 +667,13 @@
 	struct v4l2_subdev *subdev;
 	struct v4l2_format format;
 	struct media_pad *remote;
-	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
 
-	/* since already subdev pad format is set,
-	only one pixel format is available */
+	/*
+	 * since already subdev pad format is set,
+	 * only one pixel format is available
+	 */
 	if (fmt->index > 0) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
 		return -EINVAL;
@@ -695,11 +695,10 @@
 	sd_fmt.pad = remote->index;
 	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 	/* get output format of remote subdev */
-	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
-	if (ret) {
+	if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
 		v4l2_err(&vpfe_dev->v4l2_dev,
 			 "invalid remote subdev for video node\n");
-		return ret;
+		return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
 	}
 	/* convert to pix format */
 	mbus.code = sd_fmt.format.code;
@@ -726,7 +725,6 @@
 	struct vpfe_video_device *video = video_drvdata(file);
 	struct vpfe_device *vpfe_dev = video->vpfe_dev;
 	struct v4l2_format format;
-	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
 	/* If streaming is started, return error */
@@ -735,9 +733,8 @@
 		return -EBUSY;
 	}
 	/* get adjacent subdev's output pad format */
-	ret = __vpfe_video_get_format(video, &format);
-	if (ret)
-		return ret;
+	if (__vpfe_video_get_format(video, &format))
+		return __vpfe_video_get_format(video, &format);
 	*fmt = format;
 	video->fmt = *fmt;
 	return 0;
@@ -760,13 +757,11 @@
 	struct vpfe_video_device *video = video_drvdata(file);
 	struct vpfe_device *vpfe_dev = video->vpfe_dev;
 	struct v4l2_format format;
-	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
 	/* get adjacent subdev's output pad format */
-	ret = __vpfe_video_get_format(video, &format);
-	if (ret)
-		return ret;
+	if (__vpfe_video_get_format(video, &format))
+		return __vpfe_video_get_format(video, &format);
 
 	*fmt = format;
 	return 0;
@@ -843,9 +838,8 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
 
-	ret = mutex_lock_interruptible(&video->lock);
-	if (ret)
-		return ret;
+	if (mutex_lock_interruptible(&video->lock))
+		return mutex_lock_interruptible(&video->lock);
 	/*
 	 * If streaming is started return device busy
 	 * error
@@ -946,9 +940,8 @@
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
 
 	/* Call decoder driver function to set the standard */
-	ret = mutex_lock_interruptible(&video->lock);
-	if (ret)
-		return ret;
+	if (mutex_lock_interruptible(&video->lock))
+		return mutex_lock_interruptible(&video->lock);
 	sdinfo = video->current_ext_subdev;
 	/* If streaming is started, return device busy error */
 	if (video->started) {
@@ -1328,15 +1321,14 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
 
-	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type &&
-	    V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+	if (req_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    req_buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT){
 		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
 		return -EINVAL;
 	}
 
-	ret = mutex_lock_interruptible(&video->lock);
-	if (ret)
-		return ret;
+	if (mutex_lock_interruptible(&video->lock))
+		return mutex_lock_interruptible(&video->lock);
 
 	if (video->io_usrs != 0) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1362,11 +1354,10 @@
 	q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 
-	ret = vb2_queue_init(q);
-	if (ret) {
+	if (vb2_queue_init(q)) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
 		vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
-		return ret;
+		return vb2_queue_init(q);
 	}
 
 	fh->io_allowed = 1;
@@ -1390,8 +1381,8 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
 
-	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
-	    V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+	if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
 		return  -EINVAL;
 	}
@@ -1417,8 +1408,8 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
 
-	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type &&
-	    V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    p->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
 		return -EINVAL;
 	}
@@ -1445,8 +1436,8 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
 
-	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
-	    V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+	if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
 		return -EINVAL;
 	}
@@ -1478,8 +1469,8 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
 
-	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type &&
-	    V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+	if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
 		return ret;
 	}
@@ -1495,7 +1486,7 @@
 		return -EIO;
 	}
 	/* Validate the pipeline */
-	if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) {
+	if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
 		ret = vpfe_video_validate_pipeline(pipe);
 		if (ret < 0)
 			return ret;
@@ -1542,9 +1533,8 @@
 		return -EINVAL;
 	}
 
-	ret = mutex_lock_interruptible(&video->lock);
-	if (ret)
-		return ret;
+	if (mutex_lock_interruptible(&video->lock))
+		return mutex_lock_interruptible(&video->lock);
 
 	vpfe_stop_capture(video);
 	ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
index cf2e96b..c2f2a63 100644
--- a/drivers/staging/media/mn88472/mn88472.c
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -96,9 +96,9 @@
 	/* Calculate IF registers ( (1<<24)*IF / Xtal ) */
 	tmp =  div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
 				   dev->xtal);
-	if_val[0] = ((tmp >> 16) & 0xff);
-	if_val[1] = ((tmp >>  8) & 0xff);
-	if_val[2] = ((tmp >>  0) & 0xff);
+	if_val[0] = (tmp >> 16) & 0xff;
+	if_val[1] = (tmp >>  8) & 0xff;
+	if_val[2] = (tmp >>  0) & 0xff;
 
 	ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
 	ret = regmap_write(dev->regmap[2], 0xef, 0x13);
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
index a222e99..68a614b 100644
--- a/drivers/staging/media/mn88473/mn88473.c
+++ b/drivers/staging/media/mn88473/mn88473.c
@@ -94,9 +94,9 @@
 	/* Calculate IF registers ( (1<<24)*IF / Xtal ) */
 	tmp =  div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
 				   dev->xtal);
-	if_val[0] = ((tmp >> 16) & 0xff);
-	if_val[1] = ((tmp >>  8) & 0xff);
-	if_val[2] = ((tmp >>  0) & 0xff);
+	if_val[0] = (tmp >> 16) & 0xff;
+	if_val[1] = (tmp >>  8) & 0xff;
+	if_val[2] = (tmp >>  0) & 0xff;
 
 	ret = regmap_write(dev->regmap[2], 0x05, 0x00);
 	ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index dc3fb25..de4f76a 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -32,6 +32,7 @@
 
 struct aim_channel {
 	wait_queue_head_t wq;
+	spinlock_t unlink;	/* synchronization lock to unlink channels */
 	struct cdev cdev;
 	struct device *dev;
 	struct mutex io_mutex;
@@ -39,11 +40,9 @@
 	struct most_channel_config *cfg;
 	unsigned int channel_id;
 	dev_t devno;
-	bool keep_mbo;
-	unsigned int mbo_offs;
-	struct mbo *stacked_mbo;
+	size_t mbo_offs;
 	DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
-	atomic_t access_ref;
+	int access_ref;
 	struct list_head list;
 };
 
@@ -51,15 +50,26 @@
 static struct list_head channel_list;
 static spinlock_t ch_list_lock;
 
+static inline bool ch_has_mbo(struct aim_channel *c)
+{
+	return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
+}
+
+static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
+{
+	*mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+	return *mbo;
+}
+
 static struct aim_channel *get_channel(struct most_interface *iface, int id)
 {
-	struct aim_channel *channel, *tmp;
+	struct aim_channel *c, *tmp;
 	unsigned long flags;
 	int found_channel = 0;
 
 	spin_lock_irqsave(&ch_list_lock, flags);
-	list_for_each_entry_safe(channel, tmp, &channel_list, list) {
-		if ((channel->iface == iface) && (channel->channel_id == id)) {
+	list_for_each_entry_safe(c, tmp, &channel_list, list) {
+		if ((c->iface == iface) && (c->channel_id == id)) {
 			found_channel = 1;
 			break;
 		}
@@ -67,7 +77,29 @@
 	spin_unlock_irqrestore(&ch_list_lock, flags);
 	if (!found_channel)
 		return NULL;
-	return channel;
+	return c;
+}
+
+static void stop_channel(struct aim_channel *c)
+{
+	struct mbo *mbo;
+
+	while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
+		most_put_mbo(mbo);
+	most_stop_channel(c->iface, c->channel_id, &cdev_aim);
+}
+
+static void destroy_cdev(struct aim_channel *c)
+{
+	unsigned long flags;
+
+	device_destroy(aim_class, c->devno);
+	cdev_del(&c->cdev);
+	kfifo_free(&c->fifo);
+	spin_lock_irqsave(&ch_list_lock, flags);
+	list_del(&c->list);
+	spin_unlock_irqrestore(&ch_list_lock, flags);
+	ida_simple_remove(&minor_id, MINOR(c->devno));
 }
 
 /**
@@ -80,29 +112,38 @@
  */
 static int aim_open(struct inode *inode, struct file *filp)
 {
-	struct aim_channel *channel;
+	struct aim_channel *c;
 	int ret;
 
-	channel = to_channel(inode->i_cdev);
-	filp->private_data = channel;
+	c = to_channel(inode->i_cdev);
+	filp->private_data = c;
 
-	if (((channel->cfg->direction == MOST_CH_RX) &&
+	if (((c->cfg->direction == MOST_CH_RX) &&
 	     ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
-	     ((channel->cfg->direction == MOST_CH_TX) &&
+	     ((c->cfg->direction == MOST_CH_TX) &&
 		((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
 		pr_info("WARN: Access flags mismatch\n");
 		return -EACCES;
 	}
-	if (!atomic_inc_and_test(&channel->access_ref)) {
-		pr_info("WARN: Device is busy\n");
-		atomic_dec(&channel->access_ref);
+
+	mutex_lock(&c->io_mutex);
+	if (!c->dev) {
+		pr_info("WARN: Device is destroyed\n");
+		mutex_unlock(&c->io_mutex);
 		return -EBUSY;
 	}
 
-	ret = most_start_channel(channel->iface, channel->channel_id,
-				 &cdev_aim);
-	if (ret)
-		atomic_dec(&channel->access_ref);
+	if (c->access_ref) {
+		pr_info("WARN: Device is busy\n");
+		mutex_unlock(&c->io_mutex);
+		return -EBUSY;
+	}
+
+	c->mbo_offs = 0;
+	ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
+	if (!ret)
+		c->access_ref = 1;
+	mutex_unlock(&c->io_mutex);
 	return ret;
 }
 
@@ -115,33 +156,21 @@
  */
 static int aim_close(struct inode *inode, struct file *filp)
 {
-	int ret;
-	struct mbo *mbo;
-	struct aim_channel *channel = to_channel(inode->i_cdev);
+	struct aim_channel *c = to_channel(inode->i_cdev);
 
-	mutex_lock(&channel->io_mutex);
-	if (!channel->dev) {
-		mutex_unlock(&channel->io_mutex);
-		atomic_dec(&channel->access_ref);
-		device_destroy(aim_class, channel->devno);
-		cdev_del(&channel->cdev);
-		kfifo_free(&channel->fifo);
-		list_del(&channel->list);
-		ida_simple_remove(&minor_id, MINOR(channel->devno));
-		wake_up_interruptible(&channel->wq);
-		kfree(channel);
-		return 0;
+	mutex_lock(&c->io_mutex);
+	spin_lock(&c->unlink);
+	c->access_ref = 0;
+	spin_unlock(&c->unlink);
+	if (c->dev) {
+		stop_channel(c);
+		mutex_unlock(&c->io_mutex);
+	} else {
+		destroy_cdev(c);
+		mutex_unlock(&c->io_mutex);
+		kfree(c);
 	}
-	mutex_unlock(&channel->io_mutex);
-
-	while (kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
-		most_put_mbo(mbo);
-	if (channel->keep_mbo)
-		most_put_mbo(channel->stacked_mbo);
-	ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
-	atomic_dec(&channel->access_ref);
-	wake_up_interruptible(&channel->wq);
-	return ret;
+	return 0;
 }
 
 /**
@@ -154,62 +183,48 @@
 static ssize_t aim_write(struct file *filp, const char __user *buf,
 			 size_t count, loff_t *offset)
 {
-	int ret, err;
-	size_t actual_len = 0;
-	size_t max_len = 0;
-	ssize_t retval;
-	struct mbo *mbo;
-	struct aim_channel *channel = filp->private_data;
+	int ret;
+	size_t actual_len;
+	size_t max_len;
+	struct mbo *mbo = NULL;
+	struct aim_channel *c = filp->private_data;
 
-	mutex_lock(&channel->io_mutex);
-	if (unlikely(!channel->dev)) {
-		mutex_unlock(&channel->io_mutex);
-		return -EPIPE;
-	}
-	mutex_unlock(&channel->io_mutex);
+	mutex_lock(&c->io_mutex);
+	while (c->dev && !ch_get_mbo(c, &mbo)) {
+		mutex_unlock(&c->io_mutex);
 
-	mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
-
-	if (!mbo) {
 		if ((filp->f_flags & O_NONBLOCK))
 			return -EAGAIN;
-		if (wait_event_interruptible(
-			    channel->wq,
-			    (mbo = most_get_mbo(channel->iface,
-						channel->channel_id,
-						&cdev_aim)) ||
-			    (!channel->dev)))
+		if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
 			return -ERESTARTSYS;
+		mutex_lock(&c->io_mutex);
 	}
 
-	mutex_lock(&channel->io_mutex);
-	if (unlikely(!channel->dev)) {
-		mutex_unlock(&channel->io_mutex);
-		err = -EPIPE;
-		goto error;
+	if (unlikely(!c->dev)) {
+		ret = -EPIPE;
+		goto unlock;
 	}
-	mutex_unlock(&channel->io_mutex);
 
-	max_len = channel->cfg->buffer_size;
+	max_len = c->cfg->buffer_size;
 	actual_len = min(count, max_len);
 	mbo->buffer_length = actual_len;
 
-	retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
-	if (retval) {
-		err = -EIO;
-		goto error;
+	if (copy_from_user(mbo->virt_address, buf, mbo->buffer_length)) {
+		ret = -EFAULT;
+		goto put_mbo;
 	}
 
 	ret = most_submit_mbo(mbo);
-	if (ret) {
-		pr_info("submitting MBO to core failed\n");
-		err = ret;
-		goto error;
-	}
-	return actual_len - retval;
-error:
+	if (ret)
+		goto put_mbo;
+
+	mutex_unlock(&c->io_mutex);
+	return actual_len;
+put_mbo:
 	most_put_mbo(mbo);
-	return err;
+unlock:
+	mutex_unlock(&c->io_mutex);
+	return ret;
 }
 
 /**
@@ -222,59 +237,46 @@
 static ssize_t
 aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
 {
-	ssize_t retval;
-	size_t not_copied, proc_len;
+	size_t to_copy, not_copied, copied;
 	struct mbo *mbo;
-	struct aim_channel *channel = filp->private_data;
+	struct aim_channel *c = filp->private_data;
 
-	if (channel->keep_mbo) {
-		mbo = channel->stacked_mbo;
-		channel->keep_mbo = false;
-		goto start_copy;
-	}
-	while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) {
+	mutex_lock(&c->io_mutex);
+	while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
+		mutex_unlock(&c->io_mutex);
 		if (filp->f_flags & O_NONBLOCK)
 			return -EAGAIN;
-		if (wait_event_interruptible(channel->wq,
-					     (!kfifo_is_empty(&channel->fifo) ||
-					      (!channel->dev))))
+		if (wait_event_interruptible(c->wq,
+					     (!kfifo_is_empty(&c->fifo) ||
+					      (!c->dev))))
 			return -ERESTARTSYS;
+		mutex_lock(&c->io_mutex);
 	}
 
-start_copy:
 	/* make sure we don't submit to gone devices */
-	mutex_lock(&channel->io_mutex);
-	if (unlikely(!channel->dev)) {
-		mutex_unlock(&channel->io_mutex);
+	if (unlikely(!c->dev)) {
+		mutex_unlock(&c->io_mutex);
 		return -EIO;
 	}
 
-	if (count < mbo->processed_length)
-		channel->keep_mbo = true;
-
-	proc_len = min((int)count,
-		       (int)(mbo->processed_length - channel->mbo_offs));
+	to_copy = min_t(size_t,
+			count,
+			mbo->processed_length - c->mbo_offs);
 
 	not_copied = copy_to_user(buf,
-				  mbo->virt_address + channel->mbo_offs,
-				  proc_len);
+				  mbo->virt_address + c->mbo_offs,
+				  to_copy);
 
-	retval = not_copied ? proc_len - not_copied : proc_len;
+	copied = to_copy - not_copied;
 
-	if (channel->keep_mbo) {
-		channel->mbo_offs = retval;
-		channel->stacked_mbo = mbo;
-	} else {
+	c->mbo_offs += copied;
+	if (c->mbo_offs >= mbo->processed_length) {
+		kfifo_skip(&c->fifo);
 		most_put_mbo(mbo);
-		channel->mbo_offs = 0;
+		c->mbo_offs = 0;
 	}
-	mutex_unlock(&channel->io_mutex);
-	return retval;
-}
-
-static inline bool __must_check IS_ERR_OR_FALSE(int x)
-{
-	return x <= 0;
+	mutex_unlock(&c->io_mutex);
+	return copied;
 }
 
 static unsigned int aim_poll(struct file *filp, poll_table *wait)
@@ -288,7 +290,7 @@
 		if (!kfifo_is_empty(&c->fifo))
 			mask |= POLLIN | POLLRDNORM;
 	} else {
-		if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id)))
+		if (ch_has_mbo(c))
 			mask |= POLLOUT | POLLWRNORM;
 	}
 	return mask;
@@ -316,33 +318,29 @@
  */
 static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
 {
-	struct aim_channel *channel;
-	unsigned long flags;
+	struct aim_channel *c;
 
 	if (!iface) {
 		pr_info("Bad interface pointer\n");
 		return -EINVAL;
 	}
 
-	channel = get_channel(iface, channel_id);
-	if (!channel)
+	c = get_channel(iface, channel_id);
+	if (!c)
 		return -ENXIO;
 
-	mutex_lock(&channel->io_mutex);
-	channel->dev = NULL;
-	mutex_unlock(&channel->io_mutex);
-
-	if (atomic_read(&channel->access_ref)) {
-		device_destroy(aim_class, channel->devno);
-		cdev_del(&channel->cdev);
-		kfifo_free(&channel->fifo);
-		ida_simple_remove(&minor_id, MINOR(channel->devno));
-		spin_lock_irqsave(&ch_list_lock, flags);
-		list_del(&channel->list);
-		spin_unlock_irqrestore(&ch_list_lock, flags);
-		kfree(channel);
+	mutex_lock(&c->io_mutex);
+	spin_lock(&c->unlink);
+	c->dev = NULL;
+	spin_unlock(&c->unlink);
+	if (c->access_ref) {
+		stop_channel(c);
+		wake_up_interruptible(&c->wq);
+		mutex_unlock(&c->io_mutex);
 	} else {
-		wake_up_interruptible(&channel->wq);
+		destroy_cdev(c);
+		mutex_unlock(&c->io_mutex);
+		kfree(c);
 	}
 	return 0;
 }
@@ -356,21 +354,27 @@
  */
 static int aim_rx_completion(struct mbo *mbo)
 {
-	struct aim_channel *channel;
+	struct aim_channel *c;
 
 	if (!mbo)
 		return -EINVAL;
 
-	channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
-	if (!channel)
+	c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+	if (!c)
 		return -ENXIO;
 
-	kfifo_in(&channel->fifo, &mbo, 1);
+	spin_lock(&c->unlink);
+	if (!c->access_ref || !c->dev) {
+		spin_unlock(&c->unlink);
+		return -EFAULT;
+	}
+	kfifo_in(&c->fifo, &mbo, 1);
+	spin_unlock(&c->unlink);
 #ifdef DEBUG_MESG
-	if (kfifo_is_full(&channel->fifo))
+	if (kfifo_is_full(&c->fifo))
 		pr_info("WARN: Fifo is full\n");
 #endif
-	wake_up_interruptible(&channel->wq);
+	wake_up_interruptible(&c->wq);
 	return 0;
 }
 
@@ -383,7 +387,7 @@
  */
 static int aim_tx_completion(struct most_interface *iface, int channel_id)
 {
-	struct aim_channel *channel;
+	struct aim_channel *c;
 
 	if (!iface) {
 		pr_info("Bad interface pointer\n");
@@ -394,15 +398,13 @@
 		return -EINVAL;
 	}
 
-	channel = get_channel(iface, channel_id);
-	if (!channel)
+	c = get_channel(iface, channel_id);
+	if (!c)
 		return -ENXIO;
-	wake_up_interruptible(&channel->wq);
+	wake_up_interruptible(&c->wq);
 	return 0;
 }
 
-static struct most_aim cdev_aim;
-
 /**
  * aim_probe - probe function of the driver module
  * @iface: pointer to interface instance
@@ -419,7 +421,7 @@
 		     struct most_channel_config *cfg,
 		     struct kobject *parent, char *name)
 {
-	struct aim_channel *channel;
+	struct aim_channel *c;
 	unsigned long cl_flags;
 	int retval;
 	int current_minor;
@@ -428,60 +430,60 @@
 		pr_info("Probing AIM with bad arguments");
 		return -EINVAL;
 	}
-	channel = get_channel(iface, channel_id);
-	if (channel)
+	c = get_channel(iface, channel_id);
+	if (c)
 		return -EEXIST;
 
 	current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
 	if (current_minor < 0)
 		return current_minor;
 
-	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
-	if (!channel) {
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c) {
 		retval = -ENOMEM;
 		goto error_alloc_channel;
 	}
 
-	channel->devno = MKDEV(major, current_minor);
-	cdev_init(&channel->cdev, &channel_fops);
-	channel->cdev.owner = THIS_MODULE;
-	cdev_add(&channel->cdev, channel->devno, 1);
-	channel->iface = iface;
-	channel->cfg = cfg;
-	channel->channel_id = channel_id;
-	channel->mbo_offs = 0;
-	atomic_set(&channel->access_ref, -1);
-	INIT_KFIFO(channel->fifo);
-	retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
+	c->devno = MKDEV(major, current_minor);
+	cdev_init(&c->cdev, &channel_fops);
+	c->cdev.owner = THIS_MODULE;
+	cdev_add(&c->cdev, c->devno, 1);
+	c->iface = iface;
+	c->cfg = cfg;
+	c->channel_id = channel_id;
+	c->access_ref = 0;
+	spin_lock_init(&c->unlink);
+	INIT_KFIFO(c->fifo);
+	retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
 	if (retval) {
 		pr_info("failed to alloc channel kfifo");
 		goto error_alloc_kfifo;
 	}
-	init_waitqueue_head(&channel->wq);
-	mutex_init(&channel->io_mutex);
+	init_waitqueue_head(&c->wq);
+	mutex_init(&c->io_mutex);
 	spin_lock_irqsave(&ch_list_lock, cl_flags);
-	list_add_tail(&channel->list, &channel_list);
+	list_add_tail(&c->list, &channel_list);
 	spin_unlock_irqrestore(&ch_list_lock, cl_flags);
-	channel->dev = device_create(aim_class,
+	c->dev = device_create(aim_class,
 				     NULL,
-				     channel->devno,
+				     c->devno,
 				     NULL,
 				     "%s", name);
 
-	retval = IS_ERR(channel->dev);
-	if (retval) {
+	if (IS_ERR(c->dev)) {
+		retval = PTR_ERR(c->dev);
 		pr_info("failed to create new device node %s\n", name);
 		goto error_create_device;
 	}
-	kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
+	kobject_uevent(&c->dev->kobj, KOBJ_ADD);
 	return 0;
 
 error_create_device:
-	kfifo_free(&channel->fifo);
-	list_del(&channel->list);
+	kfifo_free(&c->fifo);
+	list_del(&c->list);
 error_alloc_kfifo:
-	cdev_del(&channel->cdev);
-	kfree(channel);
+	cdev_del(&c->cdev);
+	kfree(c);
 error_alloc_channel:
 	ida_simple_remove(&minor_id, current_minor);
 	return retval;
@@ -526,19 +528,15 @@
 
 static void __exit mod_exit(void)
 {
-	struct aim_channel *channel, *tmp;
+	struct aim_channel *c, *tmp;
 
 	pr_info("exit module\n");
 
 	most_deregister_aim(&cdev_aim);
 
-	list_for_each_entry_safe(channel, tmp, &channel_list, list) {
-		device_destroy(aim_class, channel->devno);
-		cdev_del(&channel->cdev);
-		kfifo_free(&channel->fifo);
-		list_del(&channel->list);
-		ida_simple_remove(&minor_id, MINOR(channel->devno));
-		kfree(channel);
+	list_for_each_entry_safe(c, tmp, &channel_list, list) {
+		destroy_cdev(c);
+		kfree(c);
 	}
 	class_destroy(aim_class);
 	unregister_chrdev_region(aim_devno, 1);
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 3c7beb0..2f42de4 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -431,6 +431,7 @@
 	u32 len = mbo->processed_length;
 	struct sk_buff *skb;
 	struct net_device *dev;
+	unsigned int skb_len;
 
 	nd = get_net_dev_context(mbo->ifp);
 	if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
@@ -482,9 +483,13 @@
 
 	memcpy(skb_put(skb, len), buf, len);
 	skb->protocol = eth_type_trans(skb, dev);
-	dev->stats.rx_packets++;
-	dev->stats.rx_bytes += skb->len;
-	netif_rx(skb);
+	skb_len = skb->len;
+	if (netif_rx(skb) == NET_RX_SUCCESS) {
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb_len;
+	} else {
+		dev->stats.rx_dropped++;
+	}
 
 out:
 	most_put_mbo(mbo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
index 17225759..3c52450 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -84,7 +84,7 @@
 struct lld_global_vars_t {
 	bool dim_is_initialized;
 	bool mcm_is_initialized;
-	struct dim2_regs *dim2; /* DIM2 core base address */
+	struct dim2_regs __iomem *dim2; /* DIM2 core base address */
 	u32 dbr_map[DBR_MAP_SIZE];
 };
 
@@ -650,7 +650,7 @@
 /* -------------------------------------------------------------------------- */
 /* API */
 
-u8 dim_startup(void *dim_base_address, u32 mlb_clock)
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock)
 {
 	g.dim_is_initialized = false;
 
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index 48cdd9c..fc73d4f 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -16,6 +16,7 @@
 #define _DIM2_HAL_H
 
 #include <linux/types.h>
+#include "dim2_reg.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -65,7 +66,7 @@
 	u16 done_sw_buffers_number; /*< Done software buffers number. */
 };
 
-u8 dim_startup(void *dim_base_address, u32 mlb_clock);
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock);
 
 void dim_shutdown(void);
 
@@ -103,9 +104,9 @@
 
 bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
 
-u32 dimcb_io_read(u32 *ptr32);
+u32 dimcb_io_read(u32 __iomem *ptr32);
 
-void dimcb_io_write(u32 *ptr32, u32 value);
+void dimcb_io_write(u32 __iomem *ptr32, u32 value);
 
 void dimcb_on_error(u8 error_id, const char *error_message);
 
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 327d738..c736310 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -99,7 +99,7 @@
 	struct most_channel_capability capabilities[DMA_CHANNELS];
 	struct most_interface most_iface;
 	char name[16 + sizeof "dim2-"];
-	void *io_base;
+	void __iomem *io_base;
 	unsigned int irq_ahb0;
 	int clk_speed;
 	struct task_struct *netinfo_task;
@@ -138,9 +138,9 @@
  * dimcb_io_read - callback from HAL to read an I/O register
  * @ptr32: register address
  */
-u32 dimcb_io_read(u32 *ptr32)
+u32 dimcb_io_read(u32 __iomem *ptr32)
 {
-	return __raw_readl(ptr32);
+	return readl(ptr32);
 }
 
 /**
@@ -148,9 +148,9 @@
  * @ptr32: register address
  * @value: value to write
  */
-void dimcb_io_write(u32 *ptr32, u32 value)
+void dimcb_io_write(u32 __iomem *ptr32, u32 value)
 {
-	__raw_writel(value, ptr32);
+	writel(value, ptr32);
 }
 
 /**
@@ -736,7 +736,7 @@
 	int ret, i;
 	struct kobject *kobj;
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
 
@@ -747,47 +747,31 @@
 	test_dev = dev;
 #else
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		pr_err("no memory region defined\n");
-		ret = -ENOENT;
-		goto err_free_dev;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-		pr_err("failed to request mem region\n");
-		ret = -EBUSY;
-		goto err_free_dev;
-	}
-
-	dev->io_base = ioremap(res->start, resource_size(res));
-	if (!dev->io_base) {
-		pr_err("failed to ioremap\n");
-		ret = -ENOMEM;
-		goto err_release_mem;
-	}
+	dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dev->io_base))
+		return PTR_ERR(dev->io_base);
 
 	ret = platform_get_irq(pdev, 0);
 	if (ret < 0) {
-		pr_err("failed to get irq\n");
-		goto err_unmap_io;
+		dev_err(&pdev->dev, "failed to get irq\n");
+		return -ENODEV;
 	}
 	dev->irq_ahb0 = ret;
 
-	ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev);
+	ret = devm_request_irq(&pdev->dev, dev->irq_ahb0, dim2_ahb_isr, 0,
+			       "mlb_ahb0", dev);
 	if (ret) {
-		pr_err("failed to request IRQ: %d, err: %d\n",
-		       dev->irq_ahb0, ret);
-		goto err_unmap_io;
+		dev_err(&pdev->dev, "failed to request IRQ: %d, err: %d\n",
+			dev->irq_ahb0, ret);
+		return ret;
 	}
 #endif
 	init_waitqueue_head(&dev->netinfo_waitq);
 	dev->deliver_netinfo = 0;
 	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
 					"dim2_netinfo");
-	if (IS_ERR(dev->netinfo_task)) {
+	if (IS_ERR(dev->netinfo_task))
 		ret = PTR_ERR(dev->netinfo_task);
-		goto err_free_irq;
-	}
 
 	for (i = 0; i < DMA_CHANNELS; i++) {
 		struct most_channel_capability *cap = dev->capabilities + i;
@@ -855,16 +839,6 @@
 	most_deregister_interface(&dev->most_iface);
 err_stop_thread:
 	kthread_stop(dev->netinfo_task);
-err_free_irq:
-#if !defined(ENABLE_HDM_TEST)
-	free_irq(dev->irq_ahb0, dev);
-err_unmap_io:
-	iounmap(dev->io_base);
-err_release_mem:
-	release_mem_region(res->start, resource_size(res));
-err_free_dev:
-#endif
-	kfree(dev);
 
 	return ret;
 }
@@ -878,7 +852,6 @@
 static int dim2_remove(struct platform_device *pdev)
 {
 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	struct dim2_platform_data *pdata = pdev->dev.platform_data;
 	unsigned long flags;
 
@@ -892,13 +865,6 @@
 	dim2_sysfs_destroy(&dev->bus);
 	most_deregister_interface(&dev->most_iface);
 	kthread_stop(dev->netinfo_task);
-#if !defined(ENABLE_HDM_TEST)
-	free_irq(dev->irq_ahb0, dev);
-	iounmap(dev->io_base);
-	release_mem_region(res->start, resource_size(res));
-#endif
-	kfree(dev);
-	platform_set_drvdata(pdev, NULL);
 
 	/*
 	 * break link to local platform_device_id struct
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
index 1c94e33..4050e7c 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.h
@@ -18,7 +18,7 @@
 
 /* platform dependent data for dim2 interface */
 struct dim2_platform_data {
-	int (*init)(struct dim2_platform_data *pd, void *io_base,
+	int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
 		    int clk_speed);
 	void (*destroy)(struct dim2_platform_data *pd);
 	void *priv;
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
index c5b10c7..2b28e4a 100644
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
@@ -63,7 +63,6 @@
 static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
 				   const char *buf, size_t count)
 {
-	ssize_t ret;
 	struct medialb_bus *bus =
 		container_of(kobj, struct medialb_bus, kobj_group);
 	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
@@ -71,8 +70,7 @@
 	if (!xattr->store)
 		return -EIO;
 
-	ret = xattr->store(bus, buf, count);
-	return ret;
+	return xattr->store(bus, buf, count);
 }
 
 static struct sysfs_ops const bus_kobj_sysfs_ops = {
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 41690f8..9d5555d 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -40,7 +40,6 @@
 #define MAX_SUFFIX_LEN		10
 #define MAX_STRING_LEN		80
 #define MAX_BUF_SIZE		0xFFFF
-#define CEILING(x, y)		(((x) + (y) - 1) / (y))
 
 #define USB_VENDOR_ID_SMSC	0x0424  /* VID: SMSC */
 #define USB_DEV_ID_BRDG		0xC001  /* PID: USB Bridge */
@@ -137,7 +136,6 @@
 #define to_mdev(d) container_of(d, struct most_dev, iface)
 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
 
-static struct workqueue_struct *schedule_usb_work;
 static void wq_clear_halt(struct work_struct *wq_obj);
 static void wq_netinfo(struct work_struct *wq_obj);
 
@@ -226,6 +224,8 @@
 		kfree(anchor);
 	}
 	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+
+	cancel_work_sync(&anchor->clear_work_obj);
 }
 
 /**
@@ -411,7 +411,7 @@
 			mbo->status = MBO_E_INVAL;
 			usb_unlink_urb(urb);
 			INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
-			queue_work(schedule_usb_work, &anchor->clear_work_obj);
+			schedule_work(&anchor->clear_work_obj);
 			return;
 		case -ENODEV:
 		case -EPROTO:
@@ -575,7 +575,7 @@
 			mbo->status = MBO_E_INVAL;
 			usb_unlink_urb(urb);
 			INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
-			queue_work(schedule_usb_work, &anchor->clear_work_obj);
+			schedule_work(&anchor->clear_work_obj);
 			return;
 		case -ENODEV:
 		case -EPROTO:
@@ -785,7 +785,7 @@
 	temp_size += tail_space;
 
 	/* calculate extra length to comply w/ HW padding */
-	conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
+	conf->extra_len = (DIV_ROUND_UP(temp_size, USB_MTU) * USB_MTU)
 			  - conf->buffer_size;
 exit:
 	mdev->conf[channel] = *conf;
@@ -872,7 +872,7 @@
 {
 	struct most_dev *mdev = (struct most_dev *)data;
 
-	queue_work(schedule_usb_work, &mdev->poll_work_obj);
+	schedule_work(&mdev->poll_work_obj);
 	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
 	add_timer(&mdev->link_stat_timer);
 }
@@ -1415,19 +1415,13 @@
 		pr_err("could not register hdm_usb driver\n");
 		return -EIO;
 	}
-	schedule_usb_work = create_workqueue("hdmu_work");
-	if (!schedule_usb_work) {
-		pr_err("could not create workqueue\n");
-		usb_deregister(&hdm_usb);
-		return -ENOMEM;
-	}
+
 	return 0;
 }
 
 static void __exit hdm_usb_exit(void)
 {
 	pr_info("hdm_usb_exit()\n");
-	destroy_workqueue(schedule_usb_work);
 	usb_deregister(&hdm_usb);
 }
 
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index ed1ed25..7c619fe 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -35,7 +35,6 @@
 static struct class *most_class;
 static struct device *class_glue_dir;
 static struct ida mdev_id;
-static int modref;
 static int dummy_num_buffers;
 
 struct most_c_aim_obj {
@@ -66,7 +65,6 @@
 	struct most_c_aim_obj aim1;
 	struct list_head trash_fifo;
 	struct task_struct *hdm_enqueue_task;
-	struct mutex stop_task_mutex;
 	wait_queue_head_t hdm_fifo_wq;
 };
 
@@ -74,7 +72,6 @@
 
 struct most_inst_obj {
 	int dev_id;
-	atomic_t tainted;
 	struct most_interface *iface;
 	struct list_head channel_list;
 	struct most_c_obj *channel[MAX_CHANNELS];
@@ -82,6 +79,14 @@
 	struct list_head list;
 };
 
+static const struct {
+	int most_ch_data_type;
+	char *name;
+} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" },
+	{ MOST_CH_ASYNC, "async\n" },
+	{ MOST_CH_SYNC, "sync\n" },
+	{ MOST_CH_ISOC_AVP, "isoc_avp\n"} };
+
 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
 
 /**
@@ -95,8 +100,6 @@
 	_mbo;								\
 })
 
-static struct mutex deregister_mutex;
-
 /*		     ___	     ___
  *		     ___C H A N N E L___
  */
@@ -414,14 +417,12 @@
 				 struct most_c_attr *attr,
 				 char *buf)
 {
-	if (c->cfg.data_type & MOST_CH_CONTROL)
-		return snprintf(buf, PAGE_SIZE, "control\n");
-	else if (c->cfg.data_type & MOST_CH_ASYNC)
-		return snprintf(buf, PAGE_SIZE, "async\n");
-	else if (c->cfg.data_type & MOST_CH_SYNC)
-		return snprintf(buf, PAGE_SIZE, "sync\n");
-	else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
-		return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+		if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
+			return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+	}
 	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
 }
 
@@ -430,15 +431,16 @@
 				  const char *buf,
 				  size_t count)
 {
-	if (!strcmp(buf, "control\n")) {
-		c->cfg.data_type = MOST_CH_CONTROL;
-	} else if (!strcmp(buf, "async\n")) {
-		c->cfg.data_type = MOST_CH_ASYNC;
-	} else if (!strcmp(buf, "sync\n")) {
-		c->cfg.data_type = MOST_CH_SYNC;
-	} else if (!strcmp(buf, "isoc_avp\n")) {
-		c->cfg.data_type = MOST_CH_ISOC_AVP;
-	} else {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+		if (!strcmp(buf, ch_data_type[i].name)) {
+			c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(ch_data_type)) {
 		pr_info("WARN: invalid attribute settings\n");
 		return -EINVAL;
 	}
@@ -551,29 +553,6 @@
 	return c;
 }
 
-/**
- * destroy_most_c_obj - channel release function
- * @c: pointer to channel object
- *
- * This decrements the reference counter of the channel object.
- * If the reference count turns zero, its release function is called.
- */
-static void destroy_most_c_obj(struct most_c_obj *c)
-{
-	if (c->aim0.ptr)
-		c->aim0.ptr->disconnect_channel(c->iface, c->channel_id);
-	if (c->aim1.ptr)
-		c->aim1.ptr->disconnect_channel(c->iface, c->channel_id);
-	c->aim0.ptr = NULL;
-	c->aim1.ptr = NULL;
-
-	mutex_lock(&deregister_mutex);
-	flush_trash_fifo(c);
-	flush_channel_fifos(c);
-	mutex_unlock(&deregister_mutex);
-	kobject_put(&c->kobj);
-}
-
 /*		     ___	       ___
  *		     ___I N S T A N C E___
  */
@@ -761,12 +740,10 @@
 {
 	struct most_c_obj *c, *tmp;
 
-	/* need to destroy channels first, since
-	 * each channel incremented the
-	 * reference count of the inst->kobj
-	 */
 	list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
-		destroy_most_c_obj(c);
+		flush_trash_fifo(c);
+		flush_channel_fifos(c);
+		kobject_put(&c->kobj);
 	}
 	kobject_put(&inst->kobj);
 }
@@ -1006,11 +983,14 @@
 	else
 		return -ENOSPC;
 
+	*aim_ptr = aim_obj->driver;
 	ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
 					     &c->cfg, &c->kobj, mdev_devnod);
-	if (ret)
+	if (ret) {
+		*aim_ptr = NULL;
 		return ret;
-	*aim_ptr = aim_obj->driver;
+	}
+
 	return len;
 }
 
@@ -1056,12 +1036,12 @@
 	if (IS_ERR(c))
 		return -ENODEV;
 
+	if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+		return -EIO;
 	if (c->aim0.ptr == aim_obj->driver)
 		c->aim0.ptr = NULL;
 	if (c->aim1.ptr == aim_obj->driver)
 		c->aim1.ptr = NULL;
-	if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
-		return -EIO;
 	return len;
 }
 
@@ -1279,7 +1259,6 @@
 	for (i = 0; i < c->cfg.num_buffers; i++) {
 		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
 		if (!mbo) {
-			pr_info("WARN: Allocation of MBO failed.\n");
 			retval = i;
 			goto _exit;
 		}
@@ -1319,18 +1298,10 @@
  */
 int most_submit_mbo(struct mbo *mbo)
 {
-	struct most_c_obj *c;
-	struct most_inst_obj *i;
-
 	if (unlikely((!mbo) || (!mbo->context))) {
 		pr_err("Bad MBO or missing channel reference\n");
 		return -EINVAL;
 	}
-	c = mbo->context;
-	i = c->inst;
-
-	if (unlikely(atomic_read(&i->tainted)))
-		return -ENODEV;
 
 	nq_hdm_mbo(mbo);
 	return 0;
@@ -1387,7 +1358,7 @@
 	return i->channel[id];
 }
 
-int channel_has_mbo(struct most_interface *iface, int id)
+int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
 {
 	struct most_c_obj *c = get_channel_by_iface(iface, id);
 	unsigned long flags;
@@ -1396,6 +1367,11 @@
 	if (unlikely(!c))
 		return -EINVAL;
 
+	if (c->aim0.refs && c->aim1.refs &&
+	    ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+	     (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+		return 0;
+
 	spin_lock_irqsave(&c->fifo_lock, flags);
 	empty = list_empty(&c->fifo);
 	spin_unlock_irqrestore(&c->fifo_lock, flags);
@@ -1456,17 +1432,8 @@
  */
 void most_put_mbo(struct mbo *mbo)
 {
-	struct most_c_obj *c;
-	struct most_inst_obj *i;
+	struct most_c_obj *c = mbo->context;
 
-	c = mbo->context;
-	i = c->inst;
-
-	if (unlikely(atomic_read(&i->tainted))) {
-		mbo->status = MBO_E_CLOSE;
-		trash_mbo(mbo);
-		return;
-	}
 	if (c->cfg.direction == MOST_CH_TX) {
 		arm_mbo(mbo);
 		return;
@@ -1546,7 +1513,6 @@
 		mutex_unlock(&c->start_mutex);
 		return -ENOLCK;
 	}
-	modref++;
 
 	c->cfg.extra_len = 0;
 	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
@@ -1588,7 +1554,6 @@
 
 error:
 	module_put(iface->mod);
-	modref--;
 	mutex_unlock(&c->start_mutex);
 	return ret;
 }
@@ -1616,24 +1581,12 @@
 	if (c->aim0.refs + c->aim1.refs >= 2)
 		goto out;
 
-	mutex_lock(&c->stop_task_mutex);
 	if (c->hdm_enqueue_task)
 		kthread_stop(c->hdm_enqueue_task);
 	c->hdm_enqueue_task = NULL;
-	mutex_unlock(&c->stop_task_mutex);
 
-	mutex_lock(&deregister_mutex);
-	if (atomic_read(&c->inst->tainted)) {
-		mutex_unlock(&deregister_mutex);
-		mutex_unlock(&c->start_mutex);
-		return -ENODEV;
-	}
-	mutex_unlock(&deregister_mutex);
-
-	if (iface->mod && modref) {
+	if (iface->mod)
 		module_put(iface->mod);
-		modref--;
-	}
 
 	c->is_poisoned = true;
 	if (c->iface->poison_channel(c->iface, c->channel_id)) {
@@ -1762,6 +1715,7 @@
 	inst = create_most_inst_obj(name);
 	if (!inst) {
 		pr_info("Failed to allocate interface instance\n");
+		ida_simple_remove(&mdev_id, id);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1769,7 +1723,6 @@
 	INIT_LIST_HEAD(&inst->channel_list);
 	inst->iface = iface;
 	inst->dev_id = id;
-	atomic_set(&inst->tainted, 0);
 	list_add_tail(&inst->list, &instance_list);
 
 	for (i = 0; i < iface->num_channels; i++) {
@@ -1808,7 +1761,6 @@
 		init_completion(&c->cleanup);
 		atomic_set(&c->mbo_ref, 0);
 		mutex_init(&c->start_mutex);
-		mutex_init(&c->stop_task_mutex);
 		list_add_tail(&c->list, &inst->channel_list);
 	}
 	pr_info("registered new MOST device mdev%d (%s)\n",
@@ -1818,6 +1770,7 @@
 free_instance:
 	pr_info("Failed allocate channel(s)\n");
 	list_del(&inst->list);
+	ida_simple_remove(&mdev_id, id);
 	destroy_most_inst_obj(inst);
 	return ERR_PTR(-ENOMEM);
 }
@@ -1835,37 +1788,24 @@
 	struct most_inst_obj *i = iface->priv;
 	struct most_c_obj *c;
 
-	mutex_lock(&deregister_mutex);
 	if (unlikely(!i)) {
 		pr_info("Bad Interface\n");
-		mutex_unlock(&deregister_mutex);
 		return;
 	}
 	pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
 		iface->description);
 
-	atomic_set(&i->tainted, 1);
-	mutex_unlock(&deregister_mutex);
-
-	while (modref) {
-		if (iface->mod && modref)
-			module_put(iface->mod);
-		modref--;
-	}
-
 	list_for_each_entry(c, &i->channel_list, list) {
-		if (c->aim0.refs + c->aim1.refs <= 0)
-			continue;
-
-		mutex_lock(&c->stop_task_mutex);
-		if (c->hdm_enqueue_task)
-			kthread_stop(c->hdm_enqueue_task);
-		c->hdm_enqueue_task = NULL;
-		mutex_unlock(&c->stop_task_mutex);
-
-		if (iface->poison_channel(iface, c->channel_id))
-			pr_err("Can't poison channel %d\n", c->channel_id);
+		if (c->aim0.ptr)
+			c->aim0.ptr->disconnect_channel(c->iface,
+							c->channel_id);
+		if (c->aim1.ptr)
+			c->aim1.ptr->disconnect_channel(c->iface,
+							c->channel_id);
+		c->aim0.ptr = NULL;
+		c->aim1.ptr = NULL;
 	}
+
 	ida_simple_remove(&mdev_id, i->dev_id);
 	list_del(&i->list);
 	destroy_most_inst_obj(i);
@@ -1913,41 +1853,52 @@
 
 static int __init most_init(void)
 {
+	int err;
+
 	pr_info("init()\n");
 	INIT_LIST_HEAD(&instance_list);
 	INIT_LIST_HEAD(&aim_list);
-	mutex_init(&deregister_mutex);
 	ida_init(&mdev_id);
 
-	if (bus_register(&most_bus)) {
+	err = bus_register(&most_bus);
+	if (err) {
 		pr_info("Cannot register most bus\n");
-		goto exit;
+		return err;
 	}
 
 	most_class = class_create(THIS_MODULE, "most");
 	if (IS_ERR(most_class)) {
 		pr_info("No udev support.\n");
+		err = PTR_ERR(most_class);
 		goto exit_bus;
 	}
-	if (driver_register(&mostcore)) {
+
+	err = driver_register(&mostcore);
+	if (err) {
 		pr_info("Cannot register core driver\n");
 		goto exit_class;
 	}
 
 	class_glue_dir =
 		device_create(most_class, NULL, 0, NULL, "mostcore");
-	if (!class_glue_dir)
+	if (IS_ERR(class_glue_dir)) {
+		err = PTR_ERR(class_glue_dir);
 		goto exit_driver;
+	}
 
 	most_aim_kset =
 		kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
-	if (!most_aim_kset)
+	if (!most_aim_kset) {
+		err = -ENOMEM;
 		goto exit_class_container;
+	}
 
 	most_inst_kset =
 		kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
-	if (!most_inst_kset)
+	if (!most_inst_kset) {
+		err = -ENOMEM;
 		goto exit_driver_kset;
+	}
 
 	return 0;
 
@@ -1961,8 +1912,7 @@
 	class_destroy(most_class);
 exit_bus:
 	bus_unregister(&most_bus);
-exit:
-	return -ENOMEM;
+	return err;
 }
 
 static void __exit most_exit(void)
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
index bda3850..60e018e 100644
--- a/drivers/staging/most/mostcore/mostcore.h
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -310,7 +310,8 @@
 struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
 			 struct most_aim *);
 void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+		    struct most_aim *aim);
 int most_start_channel(struct most_interface *iface, int channel_idx,
 		       struct most_aim *);
 int most_stop_channel(struct most_interface *iface, int channel_idx,
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 197d112..75fe61c 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -63,8 +63,8 @@
 };
 #endif
 
-/*
- * spinand_cmd - to process a command to send to the SPI Nand
+/**
+ * spinand_cmd - process a command to send to the SPI Nand
  * Description:
  *    Set up the command buffer to send to the SPI controller.
  *    The command buffer has to initialized to 0.
@@ -110,10 +110,10 @@
 	return spi_sync(spi, &message);
 }
 
-/*
- * spinand_read_id- Read SPI Nand ID
+/**
+ * spinand_read_id - Read SPI Nand ID
  * Description:
- *    Read ID: read two ID bytes from the SPI Nand device
+ *    read two ID bytes from the SPI Nand device
  */
 static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
 {
@@ -135,8 +135,8 @@
 	return retval;
 }
 
-/*
- * spinand_read_status- send command 0xf to the SPI Nand status register
+/**
+ * spinand_read_status - send command 0xf to the SPI Nand status register
  * Description:
  *    After read, write, or erase, the Nand device is expected to set the
  *    busy status.
@@ -175,7 +175,7 @@
 		retval = spinand_read_status(spi_nand, &stat);
 		if (retval < 0)
 			return -1;
-		else if (!(stat & 0x1))
+		if (!(stat & 0x1))
 			break;
 
 		cond_resched();
@@ -188,7 +188,7 @@
 }
 
 /**
- * spinand_get_otp- send command 0xf to read the SPI Nand OTP register
+ * spinand_get_otp - send command 0xf to read the SPI Nand OTP register
  * Description:
  *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
  *   Enable chip internal ECC, set the bit to 1
@@ -212,7 +212,7 @@
 }
 
 /**
- * spinand_set_otp- send command 0x1f to write the SPI Nand OTP register
+ * spinand_set_otp - send command 0x1f to write the SPI Nand OTP register
  * Description:
  *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
  *   Enable chip internal ECC, set the bit to 1
@@ -223,11 +223,11 @@
 	int retval;
 	struct spinand_cmd cmd = {0};
 
-	cmd.cmd = CMD_WRITE_REG,
-	cmd.n_addr = 1,
-	cmd.addr[0] = REG_OTP,
-	cmd.n_tx = 1,
-	cmd.tx_buf = otp,
+	cmd.cmd = CMD_WRITE_REG;
+	cmd.n_addr = 1;
+	cmd.addr[0] = REG_OTP;
+	cmd.n_tx = 1;
+	cmd.tx_buf = otp;
 
 	retval = spinand_cmd(spi_nand, &cmd);
 	if (retval < 0)
@@ -238,7 +238,7 @@
 
 #ifdef CONFIG_MTD_SPINAND_ONDIEECC
 /**
- * spinand_enable_ecc- send command 0x1f to write the SPI Nand OTP register
+ * spinand_enable_ecc - send command 0x1f to write the SPI Nand OTP register
  * Description:
  *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
  *   Enable chip internal ECC, set the bit to 1
@@ -283,7 +283,7 @@
 }
 
 /**
- * spinand_write_enable- send command 0x06 to enable write or erase the
+ * spinand_write_enable - send command 0x06 to enable write or erase the
  * Nand cells
  * Description:
  *   Before write and erase the Nand cells, the write enable has to be set.
@@ -313,9 +313,9 @@
 	return spinand_cmd(spi_nand, &cmd);
 }
 
-/*
- * spinand_read_from_cache- send command 0x03 to read out the data from the
- * cache register(2112 bytes max)
+/**
+ * spinand_read_from_cache - send command 0x03 to read out the data from the
+ * cache register (2112 bytes max)
  * Description:
  *   The read can specify 1 to 2112 bytes of data read at the corresponding
  *   locations.
@@ -341,15 +341,15 @@
 	return spinand_cmd(spi_nand, &cmd);
 }
 
-/*
- * spinand_read_page-to read a page with:
+/**
+ * spinand_read_page - read a page
  * @page_id: the physical page number
  * @offset:  the location from 0 to 2111
  * @len:     number of bytes to read
  * @rbuf:    read buffer to hold @len bytes
  *
  * Description:
- *   The read includes two commands to the Nand: 0x13 and 0x03 commands
+ *   The read includes two commands to the Nand - 0x13 and 0x03 commands
  *   Poll to read status to wait for tRD time.
  */
 static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
@@ -408,11 +408,11 @@
 	return ret;
 }
 
-/*
- * spinand_program_data_to_cache--to write a page to cache with:
+/**
+ * spinand_program_data_to_cache - write a page to cache
  * @byte_id: the location to write to the cache
  * @len:     number of bytes to write
- * @rbuf:    read buffer to hold @len bytes
+ * @wbuf:    write buffer holding @len bytes
  *
  * Description:
  *   The write command used here is 0x84--indicating that the cache is
@@ -439,7 +439,7 @@
 }
 
 /**
- * spinand_program_execute--to write a page from cache to the Nand array with
+ * spinand_program_execute - write a page from cache to the Nand array
  * @page_id: the physical page location to write the page.
  *
  * Description:
@@ -462,11 +462,11 @@
 }
 
 /**
- * spinand_program_page--to write a page with:
+ * spinand_program_page - write a page
  * @page_id: the physical page location to write the page.
  * @offset:  the location from the cache starting from 0 to 2111
  * @len:     the number of bytes to write
- * @wbuf:    the buffer to hold the number of bytes
+ * @buf:     the buffer holding @len bytes
  *
  * Description:
  *   The commands used here are 0x06, 0x84, and 0x10--indicating that
@@ -483,8 +483,11 @@
 #ifdef CONFIG_MTD_SPINAND_ONDIEECC
 	unsigned int i, j;
 
-	enable_read_hw_ecc = 0;
 	wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
+	if (!wbuf)
+		return -ENOMEM;
+
+	enable_read_hw_ecc = 0;
 	spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
 
 	for (i = offset, j = 0; i < len; i++, j++)
@@ -547,7 +550,7 @@
 }
 
 /**
- * spinand_erase_block_erase--to erase a page with:
+ * spinand_erase_block_erase - erase a page
  * @block_id: the physical block location to erase.
  *
  * Description:
@@ -570,7 +573,7 @@
 }
 
 /**
- * spinand_erase_block--to erase a page with:
+ * spinand_erase_block - erase a page
  * @block_id: the physical block location to erase.
  *
  * Description:
@@ -810,7 +813,7 @@
 }
 
 /**
- * spinand_lock_block- send write register 0x1f command to the Nand device
+ * spinand_lock_block - send write register 0x1f command to the Nand device
  *
  * Description:
  *    After power up, all the Nand blocks are locked.  This function allows
@@ -837,12 +840,12 @@
 	return ret;
 }
 
-/*
+/**
  * spinand_probe - [spinand Interface]
  * @spi_nand: registered device driver.
  *
  * Description:
- *   To set up the device driver parameters to make the device available.
+ *   Set up the device driver parameters to make the device available.
  */
 static int spinand_probe(struct spi_device *spi_nand)
 {
@@ -916,12 +919,12 @@
 	return mtd_device_register(mtd, NULL, 0);
 }
 
-/*
- * spinand_remove: Remove the device driver
+/**
+ * spinand_remove - remove the device driver
  * @spi: the spi device.
  *
  * Description:
- *   To remove the device driver parameters and free up allocated memories.
+ *   Remove the device driver parameters and free up allocated memories.
  */
 static int spinand_remove(struct spi_device *spi)
 {
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
index 7806c2b..abf4c71 100644
--- a/drivers/staging/netlogic/platform_net.c
+++ b/drivers/staging/netlogic/platform_net.c
@@ -86,7 +86,8 @@
 
 	res++;
 	res->name = "gmac";
-	res->start = res->end = irq;
+	res->start = irq;
+	res->end = irq;
 	res->flags = IORESOURCE_IRQ;
 }
 
@@ -121,8 +122,8 @@
 		ndata1.phy_addr[mac] = mac + 4 + 0x10;
 
 		xlr_resource_init(&xlr_net1_res[mac * 2],
-				xlr_gmac_offsets[mac + 4],
-				xlr_gmac_irqs[mac + 4]);
+				  xlr_gmac_offsets[mac + 4],
+				  xlr_gmac_irqs[mac + 4]);
 	}
 	xlr_net_dev1.num_resources = 8;
 
@@ -169,7 +170,7 @@
 		xlr_net_dev0.num_resources = 2;
 
 		xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0],
-				xlr_gmac_irqs[0]);
+				  xlr_gmac_irqs[0]);
 		platform_device_register(&xlr_net_dev0);
 
 		/* second block is XAUI, not supported yet */
@@ -182,7 +183,7 @@
 			ndata0.phy_addr[mac] = mac + 0x10;
 
 			xlr_resource_init(&xlr_net0_res[mac * 2],
-					xlr_gmac_offsets[mac],
+					  xlr_gmac_offsets[mac],
 					xlr_gmac_irqs[mac]);
 		}
 		xlr_net_dev0.num_resources = 8;
@@ -208,7 +209,6 @@
 		.gpio_addr	= NULL,
 	};
 
-
 	static struct platform_device xlr_net_dev0 = {
 		.name		= "xlr-net",
 		.id		= 0,
@@ -223,7 +223,7 @@
 		ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
 		ndata0.phy_addr[mac] = mac;
 		xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac],
-				xlr_gmac_irqs[mac]);
+				  xlr_gmac_irqs[mac]);
 	}
 	xlr_net_dev0.num_resources = 8;
 	xlr_net_dev0.resource = xlr_net0_res;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 0b4e819..0015847 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -69,8 +69,7 @@
 	return __raw_readl(base + reg);
 }
 
-static inline void xlr_reg_update(u32 *base_addr,
-		u32 off, u32 val, u32 mask)
+static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask)
 {
 	u32 tmp;
 
@@ -122,8 +121,8 @@
 	return skb->data;
 }
 
-static void xlr_net_fmn_handler(int bkt, int src_stnid, int size,
-		int code, struct nlm_fmn_msg *msg, void *arg)
+static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
+				struct nlm_fmn_msg *msg, void *arg)
 {
 	struct sk_buff *skb;
 	void *skb_data = NULL;
@@ -131,13 +130,13 @@
 	struct xlr_net_priv *priv;
 	u32 port, length;
 	unsigned char *addr;
-	struct xlr_adapter *adapter = (struct xlr_adapter *) arg;
+	struct xlr_adapter *adapter = (struct xlr_adapter *)arg;
 
 	length = (msg->msg0 >> 40) & 0x3fff;
 	if (length == 0) {
 		addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
 		addr = addr - MAC_SKB_BACK_PTR_SIZE;
-		skb = (struct sk_buff *) *(unsigned long *)addr;
+		skb = (struct sk_buff *)(*(unsigned long *)addr);
 		dev_kfree_skb_any((struct sk_buff *)addr);
 	} else {
 		addr = (unsigned char *)
@@ -145,9 +144,9 @@
 		length = length - BYTE_OFFSET - MAC_CRC_LEN;
 		port = ((int)msg->msg0) & 0x0f;
 		addr = addr - MAC_SKB_BACK_PTR_SIZE;
-		skb = (struct sk_buff *) *(unsigned long *)addr;
+		skb = (struct sk_buff *)(*(unsigned long *)addr);
 		skb->dev = adapter->netdev[port];
-		if (skb->dev == NULL)
+		if (!skb->dev)
 			return;
 		ndev = skb->dev;
 		priv = netdev_priv(ndev);
@@ -207,7 +206,7 @@
 	struct xlr_net_priv *priv = netdev_priv(ndev);
 	int i;
 
-	for (i = 0; i < MAX_FRIN_SPILL/4; i++) {
+	for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
 		skb_data = xlr_alloc_skb();
 		if (!skb_data) {
 			pr_err("SKB allocation failed\n");
@@ -252,7 +251,7 @@
 }
 
 static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
-		struct sk_buff *skb)
+			     struct sk_buff *skb)
 {
 	unsigned long physkb = virt_to_phys(skb);
 	int cpu_core = nlm_core_id();
@@ -266,12 +265,13 @@
 		((u64)fr_stn_id << 54)	|	/* Free back id */
 		(u64)0 << 40		|	/* Set len to 0 */
 		((u64)physkb  & 0xffffffff));	/* 32bit address */
-	msg->msg2 = msg->msg3 = 0;
+	msg->msg2 = 0;
+	msg->msg3 = 0;
 }
 
 static void __maybe_unused xlr_wakeup_queue(unsigned long dev)
 {
-	struct net_device *ndev = (struct net_device *) dev;
+	struct net_device *ndev = (struct net_device *)dev;
 	struct xlr_net_priv *priv = netdev_priv(ndev);
 	struct phy_device *phydev = xlr_get_phydev(priv);
 
@@ -280,7 +280,7 @@
 }
 
 static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
-		struct net_device *ndev)
+				      struct net_device *ndev)
 {
 	struct nlm_fmn_msg msg;
 	struct xlr_net_priv *priv = netdev_priv(ndev);
@@ -309,10 +309,10 @@
 
 	/* set mac station address */
 	xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
-		((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
-		(ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
+		     ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
+		     (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
 	xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
-		((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
+		     ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
 
 	xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
 	xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
@@ -320,12 +320,12 @@
 	xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
 
 	xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
-		(1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
-		(1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
-		(1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
+		     (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
+		     (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
+		     (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
 
 	if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
-			priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
+	    priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
 		xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
 }
 
@@ -406,7 +406,8 @@
 }
 
 static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
-		struct rtnl_link_stats64 *stats)
+						 struct rtnl_link_stats64 *stats
+						 )
 {
 	xlr_stats(ndev, stats);
 	return stats;
@@ -426,7 +427,7 @@
  * Gmac init
  */
 static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
-		int reg_start_1, int reg_size, int size)
+			      int reg_start_1, int reg_size, int size)
 {
 	void *spill;
 	u32 *base;
@@ -436,13 +437,15 @@
 	base = priv->base_addr;
 	spill_size = size;
 	spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_ATOMIC);
-	if (!spill)
+	if (!spill) {
 		pr_err("Unable to allocate memory for spill area!\n");
+		return ZERO_SIZE_PTR;
+	}
 
 	spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
 	phys_addr = virt_to_phys(spill);
 	dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
-			size, phys_addr);
+		size, phys_addr);
 	xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
 	xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
 	xlr_nae_wreg(base, reg_size, spill_size);
@@ -511,19 +514,19 @@
 
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
-			((bkt_map >> 32) & 0xffffffff));
+		     ((bkt_map >> 32) & 0xffffffff));
 
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
-			((bkt_map >> 32) & 0xffffffff));
+		     ((bkt_map >> 32) & 0xffffffff));
 
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
-			((bkt_map >> 32) & 0xffffffff));
+		     ((bkt_map >> 32) & 0xffffffff));
 
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
 	xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
-			((bkt_map >> 32) & 0xffffffff));
+		     ((bkt_map >> 32) & 0xffffffff));
 }
 
 /*
@@ -541,8 +544,8 @@
 	/* Setting non-core MsgBktSize(0x321 - 0x325) */
 	for (i = start_stn_id; i <= end_stn_id; i++) {
 		xlr_nae_wreg(priv->base_addr,
-				R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
-				bucket_size[i]);
+			     R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
+			     bucket_size[i]);
 	}
 
 	/*
@@ -552,8 +555,8 @@
 	for (i = 0; i < 8; i++) {
 		for (j = 0; j < 8; j++)
 			xlr_nae_wreg(priv->base_addr,
-					(R_CC_CPU0_0 + (i * 8)) + j,
-					gmac->credit_config[(i * 8) + j]);
+				     (R_CC_CPU0_0 + (i * 8)) + j,
+				     gmac->credit_config[(i * 8) + j]);
 	}
 
 	xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
@@ -567,7 +570,7 @@
 	if (err)
 		return err;
 	nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
-			priv->adapter);
+				 priv->adapter);
 	return 0;
 }
 
@@ -583,7 +586,7 @@
 	cpu_mask = priv->nd->cpu_mask;
 
 	pr_info("Using %s-based distribution\n",
-			(use_bkt) ? "bucket" : "class");
+		(use_bkt) ? "bucket" : "class");
 	j = 0;
 	for (i = 0; i < 32; i++) {
 		if ((1 << i) & cpu_mask) {
@@ -614,7 +617,7 @@
 		val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
 				(c2 << 7) | (b2 << 1) | (use_bkt << 0));
 		dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
-				i, b1, b2, c1, c2);
+			i, b1, b2, c1, c2);
 		xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
 		c1 = c2;
 	}
@@ -629,16 +632,16 @@
 
 	/* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
 	xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
-			((0x7f << 8) | (1 << 1)));
+		     ((0x7f << 8) | (1 << 1)));
 
 	/* configure the parser : L2 Type is configured in the bootloader */
 	/* extract IP: src, dest protocol */
 	xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
-			(9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
-			(0x0800 << 0));
+		     (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
+		     (0x0800 << 0));
 	xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
-			(9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
-			(16 << 4) | 4);
+		     (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
+		     (16 << 4) | 4);
 
 	/* Configure to extract SRC port and Dest port for TCP and UDP pkts */
 	xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
@@ -663,7 +666,7 @@
 	xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
 
 	/* Write the data which starts the write cycle */
-	xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32) val);
+	xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val);
 
 	/* poll for the read cycle to complete */
 	while (!timedout) {
@@ -692,11 +695,11 @@
 
 	/* setup the phy reg to be used */
 	xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
-			(phy_addr << 8) | (regnum << 0));
+		     (phy_addr << 8) | (regnum << 0));
 
 	/* Issue the read command */
 	xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
-			(1 << O_MII_MGMT_COMMAND__rstat));
+		     (1 << O_MII_MGMT_COMMAND__rstat));
 
 	/* poll for the read cycle to complete */
 	while (!timedout) {
@@ -724,7 +727,7 @@
 
 	ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
 	dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
-			phy_addr, regnum, val, ret);
+		phy_addr, regnum, val, ret);
 	return ret;
 }
 
@@ -735,7 +738,7 @@
 
 	ret =  xlr_phy_read(priv->mii_addr, phy_addr, regnum);
 	dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
-			phy_addr, regnum, ret);
+		phy_addr, regnum, ret);
 	return ret;
 }
 
@@ -797,13 +800,16 @@
 		if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
 			if (speed == SPEED_10)
 				xlr_nae_wreg(priv->base_addr,
-					R_INTERFACE_CONTROL, SGMII_SPEED_10);
+					     R_INTERFACE_CONTROL,
+					     SGMII_SPEED_10);
 			if (speed == SPEED_100)
 				xlr_nae_wreg(priv->base_addr,
-					R_INTERFACE_CONTROL, SGMII_SPEED_100);
+					     R_INTERFACE_CONTROL,
+					     SGMII_SPEED_100);
 			if (speed == SPEED_1000)
 				xlr_nae_wreg(priv->base_addr,
-					R_INTERFACE_CONTROL, SGMII_SPEED_1000);
+					     R_INTERFACE_CONTROL,
+					     SGMII_SPEED_1000);
 		}
 		if (speed == SPEED_10)
 			xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
@@ -864,7 +870,7 @@
 }
 
 static int xlr_setup_mdio(struct xlr_net_priv *priv,
-		struct platform_device *pdev)
+			  struct platform_device *pdev)
 {
 	int err;
 
@@ -877,7 +883,7 @@
 	priv->mii_bus->priv = priv;
 	priv->mii_bus->name = "xlr-mdio";
 	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
-			priv->mii_bus->name, priv->port_id);
+		 priv->mii_bus->name, priv->port_id);
 	priv->mii_bus->read = xlr_mii_read;
 	priv->mii_bus->write = xlr_mii_write;
 	priv->mii_bus->parent = &pdev->dev;
@@ -910,25 +916,31 @@
 
 	/* Setup MAC_CONFIG reg if (xls & rgmii) */
 	if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
-			priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
+	    priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
 		xlr_reg_update(priv->base_addr, R_RX_CONTROL,
-			(1 << O_RX_CONTROL__RGMII), (1 << O_RX_CONTROL__RGMII));
+			       (1 << O_RX_CONTROL__RGMII),
+			       (1 << O_RX_CONTROL__RGMII));
 
 	/* Rx Tx enable */
 	xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
-		((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
-		(1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)),
-		((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
-		(1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)));
+		       ((1 << O_MAC_CONFIG_1__rxen) |
+			(1 << O_MAC_CONFIG_1__txen) |
+			(1 << O_MAC_CONFIG_1__rxfc) |
+			(1 << O_MAC_CONFIG_1__txfc)),
+		       ((1 << O_MAC_CONFIG_1__rxen) |
+			(1 << O_MAC_CONFIG_1__txen) |
+			(1 << O_MAC_CONFIG_1__rxfc) |
+			(1 << O_MAC_CONFIG_1__txfc)));
 
 	/* Setup tx control reg */
 	xlr_reg_update(priv->base_addr, R_TX_CONTROL,
-		((1 << O_TX_CONTROL__TxEnable) |
-		(512 << O_TX_CONTROL__TxThreshold)), 0x3fff);
+		       ((1 << O_TX_CONTROL__TXENABLE) |
+		       (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff);
 
 	/* Setup rx control reg */
 	xlr_reg_update(priv->base_addr, R_RX_CONTROL,
-		1 << O_RX_CONTROL__RxEnable, 1 << O_RX_CONTROL__RxEnable);
+		       1 << O_RX_CONTROL__RXENABLE,
+		       1 << O_RX_CONTROL__RXENABLE);
 }
 
 static void xlr_port_disable(struct xlr_net_priv *priv)
@@ -936,25 +948,26 @@
 	/* Setup MAC_CONFIG reg */
 	/* Rx Tx disable*/
 	xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
-		((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
-		(1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)),
-		0x0);
+		       ((1 << O_MAC_CONFIG_1__rxen) |
+			(1 << O_MAC_CONFIG_1__txen) |
+			(1 << O_MAC_CONFIG_1__rxfc) |
+			(1 << O_MAC_CONFIG_1__txfc)), 0x0);
 
 	/* Setup tx control reg */
 	xlr_reg_update(priv->base_addr, R_TX_CONTROL,
-		((1 << O_TX_CONTROL__TxEnable) |
-		(512 << O_TX_CONTROL__TxThreshold)), 0);
+		       ((1 << O_TX_CONTROL__TXENABLE) |
+		       (512 << O_TX_CONTROL__TXTHRESHOLD)), 0);
 
 	/* Setup rx control reg */
 	xlr_reg_update(priv->base_addr, R_RX_CONTROL,
-		1 << O_RX_CONTROL__RxEnable, 0);
+		       1 << O_RX_CONTROL__RXENABLE, 0);
 }
 
 /*
  * Initialization of gmac
  */
 static int xlr_gmac_init(struct xlr_net_priv *priv,
-		struct platform_device *pdev)
+			 struct platform_device *pdev)
 {
 	int ret;
 
@@ -963,9 +976,9 @@
 	xlr_port_disable(priv);
 
 	xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
-			(1 << O_DESC_PACK_CTRL__MaxEntry)
-			| (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset)
-			| (1600 << O_DESC_PACK_CTRL__RegularSize));
+		     (1 << O_DESC_PACK_CTRL__MAXENTRY) |
+		     (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) |
+		     (1600 << O_DESC_PACK_CTRL__REGULARSIZE));
 
 	ret = xlr_setup_mdio(priv, pdev);
 	if (ret)
@@ -977,21 +990,14 @@
 	/* speed 2.5Mhz */
 	xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
 	/* Setup Interrupt mask reg */
-	xlr_nae_wreg(priv->base_addr, R_INTMASK,
-		(1 << O_INTMASK__TxIllegal)	|
-		(1 << O_INTMASK__MDInt)		|
-		(1 << O_INTMASK__TxFetchError)	|
-		(1 << O_INTMASK__P2PSpillEcc)	|
-		(1 << O_INTMASK__TagFull)	|
-		(1 << O_INTMASK__Underrun)	|
-		(1 << O_INTMASK__Abort)
-		);
+	xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) |
+		     (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) |
+		     (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) |
+		     (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT));
 
 	/* Clear all stats */
-	xlr_reg_update(priv->base_addr, R_STATCTRL,
-		0, 1 << O_STATCTRL__ClrCnt);
-	xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2,
-		1 << 2);
+	xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT);
+	xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2);
 	return 0;
 }
 
@@ -1019,7 +1025,7 @@
 	 * Each controller has 4 gmac ports, mapping each controller
 	 * under one parent device, 4 gmac ports under one device.
 	 */
-	for (port = 0; port < pdev->num_resources/2; port++) {
+	for (port = 0; port < pdev->num_resources / 2; port++) {
 		ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
 		if (!ndev) {
 			pr_err("Allocation of Ethernet device failed\n");
@@ -1035,7 +1041,7 @@
 
 		if (res == NULL) {
 			pr_err("No memory resource for MAC %d\n",
-					priv->port_id);
+			       priv->port_id);
 			err = -ENODEV;
 			goto err_gmac;
 		}
@@ -1048,7 +1054,7 @@
 		adapter->netdev[port] = ndev;
 
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
-		if (res == NULL) {
+		if (!res) {
 			pr_err("No irq resource for MAC %d\n", priv->port_id);
 			err = -ENODEV;
 			goto err_gmac;
@@ -1098,7 +1104,7 @@
 		err = register_netdev(ndev);
 		if (err) {
 			pr_err("Registering netdev failed for gmac%d\n",
-					priv->port_id);
+			       priv->port_id);
 			goto err_netdev;
 		}
 		platform_set_drvdata(pdev, priv);
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index 7ae8874..f76e16c 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -277,332 +277,332 @@
 #define   O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID                      0
 #define R_HASH_TABLE_VECTOR                                         0x30
 #define R_TX_CONTROL                                                 0x0A0
-#define   O_TX_CONTROL__Tx15Halt                                     31
-#define   O_TX_CONTROL__Tx14Halt                                     30
-#define   O_TX_CONTROL__Tx13Halt                                     29
-#define   O_TX_CONTROL__Tx12Halt                                     28
-#define   O_TX_CONTROL__Tx11Halt                                     27
-#define   O_TX_CONTROL__Tx10Halt                                     26
-#define   O_TX_CONTROL__Tx9Halt                                      25
-#define   O_TX_CONTROL__Tx8Halt                                      24
-#define   O_TX_CONTROL__Tx7Halt                                      23
-#define   O_TX_CONTROL__Tx6Halt                                      22
-#define   O_TX_CONTROL__Tx5Halt                                      21
-#define   O_TX_CONTROL__Tx4Halt                                      20
-#define   O_TX_CONTROL__Tx3Halt                                      19
-#define   O_TX_CONTROL__Tx2Halt                                      18
-#define   O_TX_CONTROL__Tx1Halt                                      17
-#define   O_TX_CONTROL__Tx0Halt                                      16
-#define   O_TX_CONTROL__TxIdle                                       15
-#define   O_TX_CONTROL__TxEnable                                     14
-#define   O_TX_CONTROL__TxThreshold                                  0
-#define   W_TX_CONTROL__TxThreshold                                  14
+#define   O_TX_CONTROL__TX15HALT                                     31
+#define   O_TX_CONTROL__TX14HALT                                     30
+#define   O_TX_CONTROL__TX13HALT                                     29
+#define   O_TX_CONTROL__TX12HALT                                     28
+#define   O_TX_CONTROL__TX11HALT                                     27
+#define   O_TX_CONTROL__TX10HALT                                     26
+#define   O_TX_CONTROL__TX9HALT                                      25
+#define   O_TX_CONTROL__TX8HALT                                      24
+#define   O_TX_CONTROL__TX7HALT                                      23
+#define   O_TX_CONTROL__TX6HALT                                      22
+#define   O_TX_CONTROL__TX5HALT                                      21
+#define   O_TX_CONTROL__TX4HALT                                      20
+#define   O_TX_CONTROL__TX3HALT                                      19
+#define   O_TX_CONTROL__TX2HALT                                      18
+#define   O_TX_CONTROL__TX1HALT                                      17
+#define   O_TX_CONTROL__TX0HALT                                      16
+#define   O_TX_CONTROL__TXIDLE                                       15
+#define   O_TX_CONTROL__TXENABLE                                     14
+#define   O_TX_CONTROL__TXTHRESHOLD                                  0
+#define   W_TX_CONTROL__TXTHRESHOLD                                  14
 #define R_RX_CONTROL                                                 0x0A1
 #define   O_RX_CONTROL__RGMII                                        10
-#define   O_RX_CONTROL__SoftReset			             2
-#define   O_RX_CONTROL__RxHalt                                       1
-#define   O_RX_CONTROL__RxEnable                                     0
+#define   O_RX_CONTROL__SOFTRESET			             2
+#define   O_RX_CONTROL__RXHALT                                       1
+#define   O_RX_CONTROL__RXENABLE                                     0
 #define R_DESC_PACK_CTRL                                            0x0A2
-#define   O_DESC_PACK_CTRL__ByteOffset                              17
-#define   W_DESC_PACK_CTRL__ByteOffset                              3
-#define   O_DESC_PACK_CTRL__PrePadEnable                            16
-#define   O_DESC_PACK_CTRL__MaxEntry                                14
-#define   W_DESC_PACK_CTRL__MaxEntry                                2
-#define   O_DESC_PACK_CTRL__RegularSize                             0
-#define   W_DESC_PACK_CTRL__RegularSize                             14
+#define   O_DESC_PACK_CTRL__BYTEOFFSET                              17
+#define   W_DESC_PACK_CTRL__BYTEOFFSET                              3
+#define   O_DESC_PACK_CTRL__PREPADENABLE                            16
+#define   O_DESC_PACK_CTRL__MAXENTRY                                14
+#define   W_DESC_PACK_CTRL__MAXENTRY                                2
+#define   O_DESC_PACK_CTRL__REGULARSIZE                             0
+#define   W_DESC_PACK_CTRL__REGULARSIZE                             14
 #define R_STATCTRL                                                  0x0A3
-#define   O_STATCTRL__OverFlowEn                                    4
+#define   O_STATCTRL__OVERFLOWEN                                    4
 #define   O_STATCTRL__GIG                                           3
-#define   O_STATCTRL__Sten                                          2
-#define   O_STATCTRL__ClrCnt                                        1
-#define   O_STATCTRL__AutoZ                                         0
+#define   O_STATCTRL__STEN                                          2
+#define   O_STATCTRL__CLRCNT                                        1
+#define   O_STATCTRL__AUTOZ                                         0
 #define R_L2ALLOCCTRL                                               0x0A4
-#define   O_L2ALLOCCTRL__TxL2Allocate                               9
-#define   W_L2ALLOCCTRL__TxL2Allocate                               9
-#define   O_L2ALLOCCTRL__RxL2Allocate                               0
-#define   W_L2ALLOCCTRL__RxL2Allocate                               9
+#define   O_L2ALLOCCTRL__TXL2ALLOCATE                               9
+#define   W_L2ALLOCCTRL__TXL2ALLOCATE                               9
+#define   O_L2ALLOCCTRL__RXL2ALLOCATE                               0
+#define   W_L2ALLOCCTRL__RXL2ALLOCATE                               9
 #define R_INTMASK                                                   0x0A5
-#define   O_INTMASK__Spi4TxError                                     28
-#define   O_INTMASK__Spi4RxError                                     27
-#define   O_INTMASK__RGMIIHalfDupCollision                           27
-#define   O_INTMASK__Abort                                           26
-#define   O_INTMASK__Underrun                                        25
-#define   O_INTMASK__DiscardPacket                                   24
-#define   O_INTMASK__AsyncFifoFull                                   23
-#define   O_INTMASK__TagFull                                         22
-#define   O_INTMASK__Class3Full                                      21
-#define   O_INTMASK__C3EarlyFull                                     20
-#define   O_INTMASK__Class2Full                                      19
-#define   O_INTMASK__C2EarlyFull                                     18
-#define   O_INTMASK__Class1Full                                      17
-#define   O_INTMASK__C1EarlyFull                                     16
-#define   O_INTMASK__Class0Full                                      15
-#define   O_INTMASK__C0EarlyFull                                     14
-#define   O_INTMASK__RxDataFull                                      13
-#define   O_INTMASK__RxEarlyFull                                     12
-#define   O_INTMASK__RFreeEmpty                                      9
-#define   O_INTMASK__RFEarlyEmpty                                    8
-#define   O_INTMASK__P2PSpillEcc                                     7
-#define   O_INTMASK__FreeDescFull                                    5
-#define   O_INTMASK__FreeEarlyFull                                   4
-#define   O_INTMASK__TxFetchError                                    3
-#define   O_INTMASK__StatCarry                                       2
-#define   O_INTMASK__MDInt                                           1
-#define   O_INTMASK__TxIllegal                                       0
+#define   O_INTMASK__SPI4TXERROR                                     28
+#define   O_INTMASK__SPI4RXERROR                                     27
+#define   O_INTMASK__RGMIIHALFDUPCOLLISION                           27
+#define   O_INTMASK__ABORT                                           26
+#define   O_INTMASK__UNDERRUN                                        25
+#define   O_INTMASK__DISCARDPACKET                                   24
+#define   O_INTMASK__ASYNCFIFOFULL                                   23
+#define   O_INTMASK__TAGFULL                                         22
+#define   O_INTMASK__CLASS3FULL                                      21
+#define   O_INTMASK__C3EARLYFULL                                     20
+#define   O_INTMASK__CLASS2FULL                                      19
+#define   O_INTMASK__C2EARLYFULL                                     18
+#define   O_INTMASK__CLASS1FULL                                      17
+#define   O_INTMASK__C1EARLYFULL                                     16
+#define   O_INTMASK__CLASS0FULL                                      15
+#define   O_INTMASK__C0EARLYFULL                                     14
+#define   O_INTMASK__RXDATAFULL                                      13
+#define   O_INTMASK__RXEARLYFULL                                     12
+#define   O_INTMASK__RFREEEMPTY                                      9
+#define   O_INTMASK__RFEARLYEMPTY                                    8
+#define   O_INTMASK__P2PSPILLECC                                     7
+#define   O_INTMASK__FREEDESCFULL                                    5
+#define   O_INTMASK__FREEEARLYFULL                                   4
+#define   O_INTMASK__TXFETCHERROR                                    3
+#define   O_INTMASK__STATCARRY                                       2
+#define   O_INTMASK__MDINT                                           1
+#define   O_INTMASK__TXILLEGAL                                       0
 #define R_INTREG                                                    0x0A6
-#define   O_INTREG__Spi4TxError                                     28
-#define   O_INTREG__Spi4RxError                                     27
-#define   O_INTREG__RGMIIHalfDupCollision                           27
-#define   O_INTREG__Abort                                           26
-#define   O_INTREG__Underrun                                        25
-#define   O_INTREG__DiscardPacket                                   24
-#define   O_INTREG__AsyncFifoFull                                   23
-#define   O_INTREG__TagFull                                         22
-#define   O_INTREG__Class3Full                                      21
-#define   O_INTREG__C3EarlyFull                                     20
-#define   O_INTREG__Class2Full                                      19
-#define   O_INTREG__C2EarlyFull                                     18
-#define   O_INTREG__Class1Full                                      17
-#define   O_INTREG__C1EarlyFull                                     16
-#define   O_INTREG__Class0Full                                      15
-#define   O_INTREG__C0EarlyFull                                     14
-#define   O_INTREG__RxDataFull                                      13
-#define   O_INTREG__RxEarlyFull                                     12
-#define   O_INTREG__RFreeEmpty                                      9
-#define   O_INTREG__RFEarlyEmpty                                    8
-#define   O_INTREG__P2PSpillEcc                                     7
-#define   O_INTREG__FreeDescFull                                    5
-#define   O_INTREG__FreeEarlyFull                                   4
-#define   O_INTREG__TxFetchError                                    3
-#define   O_INTREG__StatCarry                                       2
-#define   O_INTREG__MDInt                                           1
-#define   O_INTREG__TxIllegal                                       0
+#define   O_INTREG__SPI4TXERROR                                     28
+#define   O_INTREG__SPI4RXERROR                                     27
+#define   O_INTREG__RGMIIHALFDUPCOLLISION                           27
+#define   O_INTREG__ABORT                                           26
+#define   O_INTREG__UNDERRUN                                        25
+#define   O_INTREG__DISCARDPACKET                                   24
+#define   O_INTREG__ASYNCFIFOFULL                                   23
+#define   O_INTREG__TAGFULL                                         22
+#define   O_INTREG__CLASS3FULL                                      21
+#define   O_INTREG__C3EARLYFULL                                     20
+#define   O_INTREG__CLASS2FULL                                      19
+#define   O_INTREG__C2EARLYFULL                                     18
+#define   O_INTREG__CLASS1FULL                                      17
+#define   O_INTREG__C1EARLYFULL                                     16
+#define   O_INTREG__CLASS0FULL                                      15
+#define   O_INTREG__C0EARLYFULL                                     14
+#define   O_INTREG__RXDATAFULL                                      13
+#define   O_INTREG__RXEARLYFULL                                     12
+#define   O_INTREG__RFREEEMPTY                                      9
+#define   O_INTREG__RFEARLYEMPTY                                    8
+#define   O_INTREG__P2PSPILLECC                                     7
+#define   O_INTREG__FREEDESCFULL                                    5
+#define   O_INTREG__FREEEARLYFULL                                   4
+#define   O_INTREG__TXFETCHERROR                                    3
+#define   O_INTREG__STATCARRY                                       2
+#define   O_INTREG__MDINT                                           1
+#define   O_INTREG__TXILLEGAL                                       0
 #define R_TXRETRY                                                   0x0A7
-#define   O_TXRETRY__CollisionRetry                                 6
-#define   O_TXRETRY__BusErrorRetry                                  5
-#define   O_TXRETRY__UnderRunRetry                                  4
-#define   O_TXRETRY__Retries                                        0
-#define   W_TXRETRY__Retries                                        4
+#define   O_TXRETRY__COLLISIONRETRY                                 6
+#define   O_TXRETRY__BUSERRORRETRY                                  5
+#define   O_TXRETRY__UNDERRUNRETRY                                  4
+#define   O_TXRETRY__RETRIES                                        0
+#define   W_TXRETRY__RETRIES                                        4
 #define R_CORECONTROL                                               0x0A8
-#define   O_CORECONTROL__ErrorThread                                4
-#define   W_CORECONTROL__ErrorThread                                7
-#define   O_CORECONTROL__Shutdown                                   2
-#define   O_CORECONTROL__Speed                                      0
-#define   W_CORECONTROL__Speed                                      2
+#define   O_CORECONTROL__ERRORTHREAD                                4
+#define   W_CORECONTROL__ERRORTHREAD                                7
+#define   O_CORECONTROL__SHUTDOWN                                   2
+#define   O_CORECONTROL__SPEED                                      0
+#define   W_CORECONTROL__SPEED                                      2
 #define R_BYTEOFFSET0                                               0x0A9
 #define R_BYTEOFFSET1                                               0x0AA
 #define R_L2TYPE_0                                                  0x0F0
-#define   O_L2TYPE__ExtraHdrProtoSize                               26
-#define   W_L2TYPE__ExtraHdrProtoSize                               5
-#define   O_L2TYPE__ExtraHdrProtoOffset                             20
-#define   W_L2TYPE__ExtraHdrProtoOffset                             6
-#define   O_L2TYPE__ExtraHeaderSize                                 14
-#define   W_L2TYPE__ExtraHeaderSize                                 6
-#define   O_L2TYPE__ProtoOffset                                     8
-#define   W_L2TYPE__ProtoOffset                                     6
-#define   O_L2TYPE__L2HdrOffset                                     2
-#define   W_L2TYPE__L2HdrOffset                                     6
-#define   O_L2TYPE__L2Proto                                         0
-#define   W_L2TYPE__L2Proto                                         2
+#define   O_L2TYPE__EXTRAHDRPROTOSIZE                               26
+#define   W_L2TYPE__EXTRAHDRPROTOSIZE                               5
+#define   O_L2TYPE__EXTRAHDRPROTOOFFSET                             20
+#define   W_L2TYPE__EXTRAHDRPROTOOFFSET                             6
+#define   O_L2TYPE__EXTRAHEADERSIZE                                 14
+#define   W_L2TYPE__EXTRAHEADERSIZE                                 6
+#define   O_L2TYPE__PROTOOFFSET                                     8
+#define   W_L2TYPE__PROTOOFFSET                                     6
+#define   O_L2TYPE__L2HDROFFSET                                     2
+#define   W_L2TYPE__L2HDROFFSET                                     6
+#define   O_L2TYPE__L2PROTO                                         0
+#define   W_L2TYPE__L2PROTO                                         2
 #define R_L2TYPE_1                                                  0xF0
 #define R_L2TYPE_2                                                  0xF0
 #define R_L2TYPE_3                                                  0xF0
 #define R_PARSERCONFIGREG                                           0x100
-#define   O_PARSERCONFIGREG__CRCHashPoly                            8
-#define   W_PARSERCONFIGREG__CRCHashPoly                            7
-#define   O_PARSERCONFIGREG__PrePadOffset                           4
-#define   W_PARSERCONFIGREG__PrePadOffset                           4
-#define   O_PARSERCONFIGREG__UseCAM                                 2
-#define   O_PARSERCONFIGREG__UseHASH                                1
-#define   O_PARSERCONFIGREG__UseProto                               0
+#define   O_PARSERCONFIGREG__CRCHASHPOLY                            8
+#define   W_PARSERCONFIGREG__CRCHASHPOLY                            7
+#define   O_PARSERCONFIGREG__PREPADOFFSET                           4
+#define   W_PARSERCONFIGREG__PREPADOFFSET                           4
+#define   O_PARSERCONFIGREG__USECAM                                 2
+#define   O_PARSERCONFIGREG__USEHASH                                1
+#define   O_PARSERCONFIGREG__USEPROTO                               0
 #define R_L3CTABLE                                                  0x140
-#define   O_L3CTABLE__Offset0                                       25
-#define   W_L3CTABLE__Offset0                                       7
-#define   O_L3CTABLE__Len0                                          21
-#define   W_L3CTABLE__Len0                                          4
-#define   O_L3CTABLE__Offset1                                       14
-#define   W_L3CTABLE__Offset1                                       7
-#define   O_L3CTABLE__Len1                                          10
-#define   W_L3CTABLE__Len1                                          4
-#define   O_L3CTABLE__Offset2                                       4
-#define   W_L3CTABLE__Offset2                                       6
-#define   O_L3CTABLE__Len2                                          0
-#define   W_L3CTABLE__Len2                                          4
-#define   O_L3CTABLE__L3HdrOffset                                   26
-#define   W_L3CTABLE__L3HdrOffset                                   6
-#define   O_L3CTABLE__L4ProtoOffset                                 20
-#define   W_L3CTABLE__L4ProtoOffset                                 6
-#define   O_L3CTABLE__IPChksumCompute                               19
-#define   O_L3CTABLE__L4Classify                                    18
-#define   O_L3CTABLE__L2Proto                                       16
-#define   W_L3CTABLE__L2Proto                                       2
-#define   O_L3CTABLE__L3ProtoKey                                    0
-#define   W_L3CTABLE__L3ProtoKey                                    16
+#define   O_L3CTABLE__OFFSET0                                       25
+#define   W_L3CTABLE__OFFSET0                                       7
+#define   O_L3CTABLE__LEN0                                          21
+#define   W_L3CTABLE__LEN0                                          4
+#define   O_L3CTABLE__OFFSET1                                       14
+#define   W_L3CTABLE__OFFSET1                                       7
+#define   O_L3CTABLE__LEN1                                          10
+#define   W_L3CTABLE__LEN1                                          4
+#define   O_L3CTABLE__OFFSET2                                       4
+#define   W_L3CTABLE__OFFSET2                                       6
+#define   O_L3CTABLE__LEN2                                          0
+#define   W_L3CTABLE__LEN2                                          4
+#define   O_L3CTABLE__L3HDROFFSET                                   26
+#define   W_L3CTABLE__L3HDROFFSET                                   6
+#define   O_L3CTABLE__L4PROTOOFFSET                                 20
+#define   W_L3CTABLE__L4PROTOOFFSET                                 6
+#define   O_L3CTABLE__IPCHKSUMCOMPUTE                               19
+#define   O_L3CTABLE__L4CLASSIFY                                    18
+#define   O_L3CTABLE__L2PROTO                                       16
+#define   W_L3CTABLE__L2PROTO                                       2
+#define   O_L3CTABLE__L3PROTOKEY                                    0
+#define   W_L3CTABLE__L3PROTOKEY                                    16
 #define R_L4CTABLE                                                  0x160
-#define   O_L4CTABLE__Offset0                                       21
-#define   W_L4CTABLE__Offset0                                       6
-#define   O_L4CTABLE__Len0                                          17
-#define   W_L4CTABLE__Len0                                          4
-#define   O_L4CTABLE__Offset1                                       11
-#define   W_L4CTABLE__Offset1                                       6
-#define   O_L4CTABLE__Len1                                          7
-#define   W_L4CTABLE__Len1                                          4
-#define   O_L4CTABLE__TCPChksumEnable                               0
+#define   O_L4CTABLE__OFFSET0                                       21
+#define   W_L4CTABLE__OFFSET0                                       6
+#define   O_L4CTABLE__LEN0                                          17
+#define   W_L4CTABLE__LEN0                                          4
+#define   O_L4CTABLE__OFFSET1                                       11
+#define   W_L4CTABLE__OFFSET1                                       6
+#define   O_L4CTABLE__LEN1                                          7
+#define   W_L4CTABLE__LEN1                                          4
+#define   O_L4CTABLE__TCPCHKSUMENABLE                               0
 #define R_CAM4X128TABLE                                             0x172
-#define   O_CAM4X128TABLE__ClassId                                  7
-#define   W_CAM4X128TABLE__ClassId                                  2
-#define   O_CAM4X128TABLE__BucketId                                 1
-#define   W_CAM4X128TABLE__BucketId                                 6
-#define   O_CAM4X128TABLE__UseBucket                                0
+#define   O_CAM4X128TABLE__CLASSID                                  7
+#define   W_CAM4X128TABLE__CLASSID                                  2
+#define   O_CAM4X128TABLE__BUCKETID                                 1
+#define   W_CAM4X128TABLE__BUCKETID                                 6
+#define   O_CAM4X128TABLE__USEBUCKET                                0
 #define R_CAM4X128KEY                                               0x180
 #define R_TRANSLATETABLE                                            0x1A0
 #define R_DMACR0                                                    0x200
-#define   O_DMACR0__Data0WrMaxCr                                    27
-#define   W_DMACR0__Data0WrMaxCr                                    3
-#define   O_DMACR0__Data0RdMaxCr                                    24
-#define   W_DMACR0__Data0RdMaxCr                                    3
-#define   O_DMACR0__Data1WrMaxCr                                    21
-#define   W_DMACR0__Data1WrMaxCr                                    3
-#define   O_DMACR0__Data1RdMaxCr                                    18
-#define   W_DMACR0__Data1RdMaxCr                                    3
-#define   O_DMACR0__Data2WrMaxCr                                    15
-#define   W_DMACR0__Data2WrMaxCr                                    3
-#define   O_DMACR0__Data2RdMaxCr                                    12
-#define   W_DMACR0__Data2RdMaxCr                                    3
-#define   O_DMACR0__Data3WrMaxCr                                    9
-#define   W_DMACR0__Data3WrMaxCr                                    3
-#define   O_DMACR0__Data3RdMaxCr                                    6
-#define   W_DMACR0__Data3RdMaxCr                                    3
-#define   O_DMACR0__Data4WrMaxCr                                    3
-#define   W_DMACR0__Data4WrMaxCr                                    3
-#define   O_DMACR0__Data4RdMaxCr                                    0
-#define   W_DMACR0__Data4RdMaxCr                                    3
+#define   O_DMACR0__DATA0WRMAXCR                                    27
+#define   W_DMACR0__DATA0WRMAXCR                                    3
+#define   O_DMACR0__DATA0RDMAXCR                                    24
+#define   W_DMACR0__DATA0RDMAXCR                                    3
+#define   O_DMACR0__DATA1WRMAXCR                                    21
+#define   W_DMACR0__DATA1WRMAXCR                                    3
+#define   O_DMACR0__DATA1RDMAXCR                                    18
+#define   W_DMACR0__DATA1RDMAXCR                                    3
+#define   O_DMACR0__DATA2WRMAXCR                                    15
+#define   W_DMACR0__DATA2WRMAXCR                                    3
+#define   O_DMACR0__DATA2RDMAXCR                                    12
+#define   W_DMACR0__DATA2RDMAXCR                                    3
+#define   O_DMACR0__DATA3WRMAXCR                                    9
+#define   W_DMACR0__DATA3WRMAXCR                                    3
+#define   O_DMACR0__DATA3RDMAXCR                                    6
+#define   W_DMACR0__DATA3RDMAXCR                                    3
+#define   O_DMACR0__DATA4WRMAXCR                                    3
+#define   W_DMACR0__DATA4WRMAXCR                                    3
+#define   O_DMACR0__DATA4RDMAXCR                                    0
+#define   W_DMACR0__DATA4RDMAXCR                                    3
 #define R_DMACR1                                                    0x201
-#define   O_DMACR1__Data5WrMaxCr                                    27
-#define   W_DMACR1__Data5WrMaxCr                                    3
-#define   O_DMACR1__Data5RdMaxCr                                    24
-#define   W_DMACR1__Data5RdMaxCr                                    3
-#define   O_DMACR1__Data6WrMaxCr                                    21
-#define   W_DMACR1__Data6WrMaxCr                                    3
-#define   O_DMACR1__Data6RdMaxCr                                    18
-#define   W_DMACR1__Data6RdMaxCr                                    3
-#define   O_DMACR1__Data7WrMaxCr                                    15
-#define   W_DMACR1__Data7WrMaxCr                                    3
-#define   O_DMACR1__Data7RdMaxCr                                    12
-#define   W_DMACR1__Data7RdMaxCr                                    3
-#define   O_DMACR1__Data8WrMaxCr                                    9
-#define   W_DMACR1__Data8WrMaxCr                                    3
-#define   O_DMACR1__Data8RdMaxCr                                    6
-#define   W_DMACR1__Data8RdMaxCr                                    3
-#define   O_DMACR1__Data9WrMaxCr                                    3
-#define   W_DMACR1__Data9WrMaxCr                                    3
-#define   O_DMACR1__Data9RdMaxCr                                    0
-#define   W_DMACR1__Data9RdMaxCr                                    3
+#define   O_DMACR1__DATA5WRMAXCR                                    27
+#define   W_DMACR1__DATA5WRMAXCR                                    3
+#define   O_DMACR1__DATA5RDMAXCR                                    24
+#define   W_DMACR1__DATA5RDMAXCR                                    3
+#define   O_DMACR1__DATA6WRMAXCR                                    21
+#define   W_DMACR1__DATA6WRMAXCR                                    3
+#define   O_DMACR1__DATA6RDMAXCR                                    18
+#define   W_DMACR1__DATA6RDMAXCR                                    3
+#define   O_DMACR1__DATA7WRMAXCR                                    15
+#define   W_DMACR1__DATA7WRMAXCR                                    3
+#define   O_DMACR1__DATA7RDMAXCR                                    12
+#define   W_DMACR1__DATA7RDMAXCR                                    3
+#define   O_DMACR1__DATA8WRMAXCR                                    9
+#define   W_DMACR1__DATA8WRMAXCR                                    3
+#define   O_DMACR1__DATA8RDMAXCR                                    6
+#define   W_DMACR1__DATA8RDMAXCR                                    3
+#define   O_DMACR1__DATA9WRMAXCR                                    3
+#define   W_DMACR1__DATA9WRMAXCR                                    3
+#define   O_DMACR1__DATA9RDMAXCR                                    0
+#define   W_DMACR1__DATA9RDMAXCR                                    3
 #define R_DMACR2                                                    0x202
-#define   O_DMACR2__Data10WrMaxCr                                   27
-#define   W_DMACR2__Data10WrMaxCr                                   3
-#define   O_DMACR2__Data10RdMaxCr                                   24
-#define   W_DMACR2__Data10RdMaxCr                                   3
-#define   O_DMACR2__Data11WrMaxCr                                   21
-#define   W_DMACR2__Data11WrMaxCr                                   3
-#define   O_DMACR2__Data11RdMaxCr                                   18
-#define   W_DMACR2__Data11RdMaxCr                                   3
-#define   O_DMACR2__Data12WrMaxCr                                   15
-#define   W_DMACR2__Data12WrMaxCr                                   3
-#define   O_DMACR2__Data12RdMaxCr                                   12
-#define   W_DMACR2__Data12RdMaxCr                                   3
-#define   O_DMACR2__Data13WrMaxCr                                   9
-#define   W_DMACR2__Data13WrMaxCr                                   3
-#define   O_DMACR2__Data13RdMaxCr                                   6
-#define   W_DMACR2__Data13RdMaxCr                                   3
-#define   O_DMACR2__Data14WrMaxCr                                   3
-#define   W_DMACR2__Data14WrMaxCr                                   3
-#define   O_DMACR2__Data14RdMaxCr                                   0
-#define   W_DMACR2__Data14RdMaxCr                                   3
+#define   O_DMACR2__DATA10WRMAXCR                                   27
+#define   W_DMACR2__DATA10WRMAXCR                                   3
+#define   O_DMACR2__DATA10RDMAXCR                                   24
+#define   W_DMACR2__DATA10RDMAXCR                                   3
+#define   O_DMACR2__DATA11WRMAXCR                                   21
+#define   W_DMACR2__DATA11WRMAXCR                                   3
+#define   O_DMACR2__DATA11RDMAXCR                                   18
+#define   W_DMACR2__DATA11RDMAXCR                                   3
+#define   O_DMACR2__DATA12WRMAXCR                                   15
+#define   W_DMACR2__DATA12WRMAXCR                                   3
+#define   O_DMACR2__DATA12RDMAXCR                                   12
+#define   W_DMACR2__DATA12RDMAXCR                                   3
+#define   O_DMACR2__DATA13WRMAXCR                                   9
+#define   W_DMACR2__DATA13WRMAXCR                                   3
+#define   O_DMACR2__DATA13RDMAXCR                                   6
+#define   W_DMACR2__DATA13RDMAXCR                                   3
+#define   O_DMACR2__DATA14WRMAXCR                                   3
+#define   W_DMACR2__DATA14WRMAXCR                                   3
+#define   O_DMACR2__DATA14RDMAXCR                                   0
+#define   W_DMACR2__DATA14RDMAXCR                                   3
 #define R_DMACR3                                                    0x203
-#define   O_DMACR3__Data15WrMaxCr                                   27
-#define   W_DMACR3__Data15WrMaxCr                                   3
-#define   O_DMACR3__Data15RdMaxCr                                   24
-#define   W_DMACR3__Data15RdMaxCr                                   3
-#define   O_DMACR3__SpClassWrMaxCr                                  21
-#define   W_DMACR3__SpClassWrMaxCr                                  3
-#define   O_DMACR3__SpClassRdMaxCr                                  18
-#define   W_DMACR3__SpClassRdMaxCr                                  3
-#define   O_DMACR3__JumFrInWrMaxCr                                  15
-#define   W_DMACR3__JumFrInWrMaxCr                                  3
-#define   O_DMACR3__JumFrInRdMaxCr                                  12
-#define   W_DMACR3__JumFrInRdMaxCr                                  3
-#define   O_DMACR3__RegFrInWrMaxCr                                  9
-#define   W_DMACR3__RegFrInWrMaxCr                                  3
-#define   O_DMACR3__RegFrInRdMaxCr                                  6
-#define   W_DMACR3__RegFrInRdMaxCr                                  3
-#define   O_DMACR3__FrOutWrMaxCr                                    3
-#define   W_DMACR3__FrOutWrMaxCr                                    3
-#define   O_DMACR3__FrOutRdMaxCr                                    0
-#define   W_DMACR3__FrOutRdMaxCr                                    3
+#define   O_DMACR3__DATA15WRMAXCR                                   27
+#define   W_DMACR3__DATA15WRMAXCR                                   3
+#define   O_DMACR3__DATA15RDMAXCR                                   24
+#define   W_DMACR3__DATA15RDMAXCR                                   3
+#define   O_DMACR3__SPCLASSWRMAXCR                                  21
+#define   W_DMACR3__SPCLASSWRMAXCR                                  3
+#define   O_DMACR3__SPCLASSRDMAXCR                                  18
+#define   W_DMACR3__SPCLASSRDMAXCR                                  3
+#define   O_DMACR3__JUMFRINWRMAXCR                                  15
+#define   W_DMACR3__JUMFRINWRMAXCR                                  3
+#define   O_DMACR3__JUMFRINRDMAXCR                                  12
+#define   W_DMACR3__JUMFRINRDMAXCR                                  3
+#define   O_DMACR3__REGFRINWRMAXCR                                  9
+#define   W_DMACR3__REGFRINWRMAXCR                                  3
+#define   O_DMACR3__REGFRINRDMAXCR                                  6
+#define   W_DMACR3__REGFRINRDMAXCR                                  3
+#define   O_DMACR3__FROUTWRMAXCR                                    3
+#define   W_DMACR3__FROUTWRMAXCR                                    3
+#define   O_DMACR3__FROUTRDMAXCR                                    0
+#define   W_DMACR3__FROUTRDMAXCR                                    3
 #define R_REG_FRIN_SPILL_MEM_START_0                                0x204
-#define   O_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0        0
-#define   W_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0       32
+#define   O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0        0
+#define   W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0       32
 #define R_REG_FRIN_SPILL_MEM_START_1                                0x205
-#define   O_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1        0
-#define   W_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1        3
+#define   O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1        0
+#define   W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1        3
 #define R_REG_FRIN_SPILL_MEM_SIZE                                   0x206
-#define   O_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize             0
-#define   W_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize            32
+#define   O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE             0
+#define   W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE            32
 #define R_FROUT_SPILL_MEM_START_0                                   0x207
-#define   O_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0             0
-#define   W_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0            32
+#define   O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0             0
+#define   W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0            32
 #define R_FROUT_SPILL_MEM_START_1                                   0x208
-#define   O_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1             0
-#define   W_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1             3
+#define   O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1             0
+#define   W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1             3
 #define R_FROUT_SPILL_MEM_SIZE                                      0x209
-#define   O_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize                  0
-#define   W_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize                 32
+#define   O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE                  0
+#define   W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE                 32
 #define R_CLASS0_SPILL_MEM_START_0                                  0x20A
-#define   O_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0           0
-#define   W_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0          32
+#define   O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0           0
+#define   W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0          32
 #define R_CLASS0_SPILL_MEM_START_1                                  0x20B
-#define   O_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1           0
-#define   W_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1           3
+#define   O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1           0
+#define   W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1           3
 #define R_CLASS0_SPILL_MEM_SIZE                                     0x20C
-#define   O_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize                0
-#define   W_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize               32
+#define   O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE                0
+#define   W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE               32
 #define R_JUMFRIN_SPILL_MEM_START_0                                 0x20D
-#define   O_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0          0
-#define   W_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0         32
+#define   O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0          0
+#define   W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0         32
 #define R_JUMFRIN_SPILL_MEM_START_1                                 0x20E
-#define   O_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1         0
-#define   W_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1         3
+#define   O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1         0
+#define   W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1         3
 #define R_JUMFRIN_SPILL_MEM_SIZE                                    0x20F
-#define   O_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize              0
-#define   W_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize             32
+#define   O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE              0
+#define   W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE             32
 #define R_CLASS1_SPILL_MEM_START_0                                  0x210
-#define   O_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0           0
-#define   W_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0          32
+#define   O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0           0
+#define   W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0          32
 #define R_CLASS1_SPILL_MEM_START_1                                  0x211
-#define   O_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1           0
-#define   W_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1           3
+#define   O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1           0
+#define   W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1           3
 #define R_CLASS1_SPILL_MEM_SIZE                                     0x212
-#define   O_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize                0
-#define   W_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize               32
+#define   O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE                0
+#define   W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE               32
 #define R_CLASS2_SPILL_MEM_START_0                                  0x213
-#define   O_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0           0
-#define   W_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0          32
+#define   O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0           0
+#define   W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0          32
 #define R_CLASS2_SPILL_MEM_START_1                                  0x214
-#define   O_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1           0
-#define   W_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1           3
+#define   O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1           0
+#define   W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1           3
 #define R_CLASS2_SPILL_MEM_SIZE                                     0x215
-#define   O_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize                0
-#define   W_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize               32
+#define   O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE                0
+#define   W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE               32
 #define R_CLASS3_SPILL_MEM_START_0                                  0x216
-#define   O_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0           0
-#define   W_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0          32
+#define   O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0           0
+#define   W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0          32
 #define R_CLASS3_SPILL_MEM_START_1                                  0x217
-#define   O_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1           0
-#define   W_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1           3
+#define   O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1           0
+#define   W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1           3
 #define R_CLASS3_SPILL_MEM_SIZE                                     0x218
-#define   O_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize                0
-#define   W_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize               32
+#define   O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE                0
+#define   W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE               32
 #define R_REG_FRIN1_SPILL_MEM_START_0                               0x219
 #define R_REG_FRIN1_SPILL_MEM_START_1                               0x21a
 #define R_REG_FRIN1_SPILL_MEM_SIZE                                  0x21b
@@ -679,244 +679,244 @@
 #define   O_SPISTRV3__EG_STRV_THRESH_15                             0
 #define   W_SPISTRV3__EG_STRV_THRESH_15                             7
 #define R_TXDATAFIFO0                                               0x221
-#define   O_TXDATAFIFO0__Tx0DataFifoStart                           24
-#define   W_TXDATAFIFO0__Tx0DataFifoStart                           7
-#define   O_TXDATAFIFO0__Tx0DataFifoSize                            16
-#define   W_TXDATAFIFO0__Tx0DataFifoSize                            7
-#define   O_TXDATAFIFO0__Tx1DataFifoStart                           8
-#define   W_TXDATAFIFO0__Tx1DataFifoStart                           7
-#define   O_TXDATAFIFO0__Tx1DataFifoSize                            0
-#define   W_TXDATAFIFO0__Tx1DataFifoSize                            7
+#define   O_TXDATAFIFO0__TX0DATAFIFOSTART                           24
+#define   W_TXDATAFIFO0__TX0DATAFIFOSTART                           7
+#define   O_TXDATAFIFO0__TX0DATAFIFOSIZE                            16
+#define   W_TXDATAFIFO0__TX0DATAFIFOSIZE                            7
+#define   O_TXDATAFIFO0__TX1DATAFIFOSTART                           8
+#define   W_TXDATAFIFO0__TX1DATAFIFOSTART                           7
+#define   O_TXDATAFIFO0__TX1DATAFIFOSIZE                            0
+#define   W_TXDATAFIFO0__TX1DATAFIFOSIZE                            7
 #define R_TXDATAFIFO1                                               0x222
-#define   O_TXDATAFIFO1__Tx2DataFifoStart                           24
-#define   W_TXDATAFIFO1__Tx2DataFifoStart                           7
-#define   O_TXDATAFIFO1__Tx2DataFifoSize                            16
-#define   W_TXDATAFIFO1__Tx2DataFifoSize                            7
-#define   O_TXDATAFIFO1__Tx3DataFifoStart                           8
-#define   W_TXDATAFIFO1__Tx3DataFifoStart                           7
-#define   O_TXDATAFIFO1__Tx3DataFifoSize                            0
-#define   W_TXDATAFIFO1__Tx3DataFifoSize                            7
+#define   O_TXDATAFIFO1__TX2DATAFIFOSTART                           24
+#define   W_TXDATAFIFO1__TX2DATAFIFOSTART                           7
+#define   O_TXDATAFIFO1__TX2DATAFIFOSIZE                            16
+#define   W_TXDATAFIFO1__TX2DATAFIFOSIZE                            7
+#define   O_TXDATAFIFO1__TX3DATAFIFOSTART                           8
+#define   W_TXDATAFIFO1__TX3DATAFIFOSTART                           7
+#define   O_TXDATAFIFO1__TX3DATAFIFOSIZE                            0
+#define   W_TXDATAFIFO1__TX3DATAFIFOSIZE                            7
 #define R_TXDATAFIFO2                                               0x223
-#define   O_TXDATAFIFO2__Tx4DataFifoStart                           24
-#define   W_TXDATAFIFO2__Tx4DataFifoStart                           7
-#define   O_TXDATAFIFO2__Tx4DataFifoSize                            16
-#define   W_TXDATAFIFO2__Tx4DataFifoSize                            7
-#define   O_TXDATAFIFO2__Tx5DataFifoStart                           8
-#define   W_TXDATAFIFO2__Tx5DataFifoStart                           7
-#define   O_TXDATAFIFO2__Tx5DataFifoSize                            0
-#define   W_TXDATAFIFO2__Tx5DataFifoSize                            7
+#define   O_TXDATAFIFO2__TX4DATAFIFOSTART                           24
+#define   W_TXDATAFIFO2__TX4DATAFIFOSTART                           7
+#define   O_TXDATAFIFO2__TX4DATAFIFOSIZE                            16
+#define   W_TXDATAFIFO2__TX4DATAFIFOSIZE                            7
+#define   O_TXDATAFIFO2__TX5DATAFIFOSTART                           8
+#define   W_TXDATAFIFO2__TX5DATAFIFOSTART                           7
+#define   O_TXDATAFIFO2__TX5DATAFIFOSIZE                            0
+#define   W_TXDATAFIFO2__TX5DATAFIFOSIZE                            7
 #define R_TXDATAFIFO3                                               0x224
-#define   O_TXDATAFIFO3__Tx6DataFifoStart                           24
-#define   W_TXDATAFIFO3__Tx6DataFifoStart                           7
-#define   O_TXDATAFIFO3__Tx6DataFifoSize                            16
-#define   W_TXDATAFIFO3__Tx6DataFifoSize                            7
-#define   O_TXDATAFIFO3__Tx7DataFifoStart                           8
-#define   W_TXDATAFIFO3__Tx7DataFifoStart                           7
-#define   O_TXDATAFIFO3__Tx7DataFifoSize                            0
-#define   W_TXDATAFIFO3__Tx7DataFifoSize                            7
+#define   O_TXDATAFIFO3__TX6DATAFIFOSTART                           24
+#define   W_TXDATAFIFO3__TX6DATAFIFOSTART                           7
+#define   O_TXDATAFIFO3__TX6DATAFIFOSIZE                            16
+#define   W_TXDATAFIFO3__TX6DATAFIFOSIZE                            7
+#define   O_TXDATAFIFO3__TX7DATAFIFOSTART                           8
+#define   W_TXDATAFIFO3__TX7DATAFIFOSTART                           7
+#define   O_TXDATAFIFO3__TX7DATAFIFOSIZE                            0
+#define   W_TXDATAFIFO3__TX7DATAFIFOSIZE                            7
 #define R_TXDATAFIFO4                                               0x225
-#define   O_TXDATAFIFO4__Tx8DataFifoStart                           24
-#define   W_TXDATAFIFO4__Tx8DataFifoStart                           7
-#define   O_TXDATAFIFO4__Tx8DataFifoSize                            16
-#define   W_TXDATAFIFO4__Tx8DataFifoSize                            7
-#define   O_TXDATAFIFO4__Tx9DataFifoStart                           8
-#define   W_TXDATAFIFO4__Tx9DataFifoStart                           7
-#define   O_TXDATAFIFO4__Tx9DataFifoSize                            0
-#define   W_TXDATAFIFO4__Tx9DataFifoSize                            7
+#define   O_TXDATAFIFO4__TX8DATAFIFOSTART                           24
+#define   W_TXDATAFIFO4__TX8DATAFIFOSTART                           7
+#define   O_TXDATAFIFO4__TX8DATAFIFOSIZE                            16
+#define   W_TXDATAFIFO4__TX8DATAFIFOSIZE                            7
+#define   O_TXDATAFIFO4__TX9DATAFIFOSTART                           8
+#define   W_TXDATAFIFO4__TX9DATAFIFOSTART                           7
+#define   O_TXDATAFIFO4__TX9DATAFIFOSIZE                            0
+#define   W_TXDATAFIFO4__TX9DATAFIFOSIZE                            7
 #define R_TXDATAFIFO5                                               0x226
-#define   O_TXDATAFIFO5__Tx10DataFifoStart                          24
-#define   W_TXDATAFIFO5__Tx10DataFifoStart                          7
-#define   O_TXDATAFIFO5__Tx10DataFifoSize                           16
-#define   W_TXDATAFIFO5__Tx10DataFifoSize                           7
-#define   O_TXDATAFIFO5__Tx11DataFifoStart                          8
-#define   W_TXDATAFIFO5__Tx11DataFifoStart                          7
-#define   O_TXDATAFIFO5__Tx11DataFifoSize                           0
-#define   W_TXDATAFIFO5__Tx11DataFifoSize                           7
+#define   O_TXDATAFIFO5__TX10DATAFIFOSTART                          24
+#define   W_TXDATAFIFO5__TX10DATAFIFOSTART                          7
+#define   O_TXDATAFIFO5__TX10DATAFIFOSIZE                           16
+#define   W_TXDATAFIFO5__TX10DATAFIFOSIZE                           7
+#define   O_TXDATAFIFO5__TX11DATAFIFOSTART                          8
+#define   W_TXDATAFIFO5__TX11DATAFIFOSTART                          7
+#define   O_TXDATAFIFO5__TX11DATAFIFOSIZE                           0
+#define   W_TXDATAFIFO5__TX11DATAFIFOSIZE                           7
 #define R_TXDATAFIFO6                                               0x227
-#define   O_TXDATAFIFO6__Tx12DataFifoStart                          24
-#define   W_TXDATAFIFO6__Tx12DataFifoStart                          7
-#define   O_TXDATAFIFO6__Tx12DataFifoSize                           16
-#define   W_TXDATAFIFO6__Tx12DataFifoSize                           7
-#define   O_TXDATAFIFO6__Tx13DataFifoStart                          8
-#define   W_TXDATAFIFO6__Tx13DataFifoStart                          7
-#define   O_TXDATAFIFO6__Tx13DataFifoSize                           0
-#define   W_TXDATAFIFO6__Tx13DataFifoSize                           7
+#define   O_TXDATAFIFO6__TX12DATAFIFOSTART                          24
+#define   W_TXDATAFIFO6__TX12DATAFIFOSTART                          7
+#define   O_TXDATAFIFO6__TX12DATAFIFOSIZE                           16
+#define   W_TXDATAFIFO6__TX12DATAFIFOSIZE                           7
+#define   O_TXDATAFIFO6__TX13DATAFIFOSTART                          8
+#define   W_TXDATAFIFO6__TX13DATAFIFOSTART                          7
+#define   O_TXDATAFIFO6__TX13DATAFIFOSIZE                           0
+#define   W_TXDATAFIFO6__TX13DATAFIFOSIZE                           7
 #define R_TXDATAFIFO7                                               0x228
-#define   O_TXDATAFIFO7__Tx14DataFifoStart                          24
-#define   W_TXDATAFIFO7__Tx14DataFifoStart                          7
-#define   O_TXDATAFIFO7__Tx14DataFifoSize                           16
-#define   W_TXDATAFIFO7__Tx14DataFifoSize                           7
-#define   O_TXDATAFIFO7__Tx15DataFifoStart                          8
-#define   W_TXDATAFIFO7__Tx15DataFifoStart                          7
-#define   O_TXDATAFIFO7__Tx15DataFifoSize                           0
-#define   W_TXDATAFIFO7__Tx15DataFifoSize                           7
+#define   O_TXDATAFIFO7__TX14DATAFIFOSTART                          24
+#define   W_TXDATAFIFO7__TX14DATAFIFOSTART                          7
+#define   O_TXDATAFIFO7__TX14DATAFIFOSIZE                           16
+#define   W_TXDATAFIFO7__TX14DATAFIFOSIZE                           7
+#define   O_TXDATAFIFO7__TX15DATAFIFOSTART                          8
+#define   W_TXDATAFIFO7__TX15DATAFIFOSTART                          7
+#define   O_TXDATAFIFO7__TX15DATAFIFOSIZE                           0
+#define   W_TXDATAFIFO7__TX15DATAFIFOSIZE                           7
 #define R_RXDATAFIFO0                                               0x229
-#define   O_RXDATAFIFO0__Rx0DataFifoStart                           24
-#define   W_RXDATAFIFO0__Rx0DataFifoStart                           7
-#define   O_RXDATAFIFO0__Rx0DataFifoSize                            16
-#define   W_RXDATAFIFO0__Rx0DataFifoSize                            7
-#define   O_RXDATAFIFO0__Rx1DataFifoStart                           8
-#define   W_RXDATAFIFO0__Rx1DataFifoStart                           7
-#define   O_RXDATAFIFO0__Rx1DataFifoSize                            0
-#define   W_RXDATAFIFO0__Rx1DataFifoSize                            7
+#define   O_RXDATAFIFO0__RX0DATAFIFOSTART                           24
+#define   W_RXDATAFIFO0__RX0DATAFIFOSTART                           7
+#define   O_RXDATAFIFO0__RX0DATAFIFOSIZE                            16
+#define   W_RXDATAFIFO0__RX0DATAFIFOSIZE                            7
+#define   O_RXDATAFIFO0__RX1DATAFIFOSTART                           8
+#define   W_RXDATAFIFO0__RX1DATAFIFOSTART                           7
+#define   O_RXDATAFIFO0__RX1DATAFIFOSIZE                            0
+#define   W_RXDATAFIFO0__RX1DATAFIFOSIZE                            7
 #define R_RXDATAFIFO1                                               0x22A
-#define   O_RXDATAFIFO1__Rx2DataFifoStart                           24
-#define   W_RXDATAFIFO1__Rx2DataFifoStart                           7
-#define   O_RXDATAFIFO1__Rx2DataFifoSize                            16
-#define   W_RXDATAFIFO1__Rx2DataFifoSize                            7
-#define   O_RXDATAFIFO1__Rx3DataFifoStart                           8
-#define   W_RXDATAFIFO1__Rx3DataFifoStart                           7
-#define   O_RXDATAFIFO1__Rx3DataFifoSize                            0
-#define   W_RXDATAFIFO1__Rx3DataFifoSize                            7
+#define   O_RXDATAFIFO1__RX2DATAFIFOSTART                           24
+#define   W_RXDATAFIFO1__RX2DATAFIFOSTART                           7
+#define   O_RXDATAFIFO1__RX2DATAFIFOSIZE                            16
+#define   W_RXDATAFIFO1__RX2DATAFIFOSIZE                            7
+#define   O_RXDATAFIFO1__RX3DATAFIFOSTART                           8
+#define   W_RXDATAFIFO1__RX3DATAFIFOSTART                           7
+#define   O_RXDATAFIFO1__RX3DATAFIFOSIZE                            0
+#define   W_RXDATAFIFO1__RX3DATAFIFOSIZE                            7
 #define R_RXDATAFIFO2                                               0x22B
-#define   O_RXDATAFIFO2__Rx4DataFifoStart                           24
-#define   W_RXDATAFIFO2__Rx4DataFifoStart                           7
-#define   O_RXDATAFIFO2__Rx4DataFifoSize                            16
-#define   W_RXDATAFIFO2__Rx4DataFifoSize                            7
-#define   O_RXDATAFIFO2__Rx5DataFifoStart                           8
-#define   W_RXDATAFIFO2__Rx5DataFifoStart                           7
-#define   O_RXDATAFIFO2__Rx5DataFifoSize                            0
-#define   W_RXDATAFIFO2__Rx5DataFifoSize                            7
+#define   O_RXDATAFIFO2__RX4DATAFIFOSTART                           24
+#define   W_RXDATAFIFO2__RX4DATAFIFOSTART                           7
+#define   O_RXDATAFIFO2__RX4DATAFIFOSIZE                            16
+#define   W_RXDATAFIFO2__RX4DATAFIFOSIZE                            7
+#define   O_RXDATAFIFO2__RX5DATAFIFOSTART                           8
+#define   W_RXDATAFIFO2__RX5DATAFIFOSTART                           7
+#define   O_RXDATAFIFO2__RX5DATAFIFOSIZE                            0
+#define   W_RXDATAFIFO2__RX5DATAFIFOSIZE                            7
 #define R_RXDATAFIFO3                                               0x22C
-#define   O_RXDATAFIFO3__Rx6DataFifoStart                           24
-#define   W_RXDATAFIFO3__Rx6DataFifoStart                           7
-#define   O_RXDATAFIFO3__Rx6DataFifoSize                            16
-#define   W_RXDATAFIFO3__Rx6DataFifoSize                            7
-#define   O_RXDATAFIFO3__Rx7DataFifoStart                           8
-#define   W_RXDATAFIFO3__Rx7DataFifoStart                           7
-#define   O_RXDATAFIFO3__Rx7DataFifoSize                            0
-#define   W_RXDATAFIFO3__Rx7DataFifoSize                            7
+#define   O_RXDATAFIFO3__RX6DATAFIFOSTART                           24
+#define   W_RXDATAFIFO3__RX6DATAFIFOSTART                           7
+#define   O_RXDATAFIFO3__RX6DATAFIFOSIZE                            16
+#define   W_RXDATAFIFO3__RX6DATAFIFOSIZE                            7
+#define   O_RXDATAFIFO3__RX7DATAFIFOSTART                           8
+#define   W_RXDATAFIFO3__RX7DATAFIFOSTART                           7
+#define   O_RXDATAFIFO3__RX7DATAFIFOSIZE                            0
+#define   W_RXDATAFIFO3__RX7DATAFIFOSIZE                            7
 #define R_RXDATAFIFO4                                               0x22D
-#define   O_RXDATAFIFO4__Rx8DataFifoStart                           24
-#define   W_RXDATAFIFO4__Rx8DataFifoStart                           7
-#define   O_RXDATAFIFO4__Rx8DataFifoSize                            16
-#define   W_RXDATAFIFO4__Rx8DataFifoSize                            7
-#define   O_RXDATAFIFO4__Rx9DataFifoStart                           8
-#define   W_RXDATAFIFO4__Rx9DataFifoStart                           7
-#define   O_RXDATAFIFO4__Rx9DataFifoSize                            0
-#define   W_RXDATAFIFO4__Rx9DataFifoSize                            7
+#define   O_RXDATAFIFO4__RX8DATAFIFOSTART                           24
+#define   W_RXDATAFIFO4__RX8DATAFIFOSTART                           7
+#define   O_RXDATAFIFO4__RX8DATAFIFOSIZE                            16
+#define   W_RXDATAFIFO4__RX8DATAFIFOSIZE                            7
+#define   O_RXDATAFIFO4__RX9DATAFIFOSTART                           8
+#define   W_RXDATAFIFO4__RX9DATAFIFOSTART                           7
+#define   O_RXDATAFIFO4__RX9DATAFIFOSIZE                            0
+#define   W_RXDATAFIFO4__RX9DATAFIFOSIZE                            7
 #define R_RXDATAFIFO5                                               0x22E
-#define   O_RXDATAFIFO5__Rx10DataFifoStart                          24
-#define   W_RXDATAFIFO5__Rx10DataFifoStart                          7
-#define   O_RXDATAFIFO5__Rx10DataFifoSize                           16
-#define   W_RXDATAFIFO5__Rx10DataFifoSize                           7
-#define   O_RXDATAFIFO5__Rx11DataFifoStart                          8
-#define   W_RXDATAFIFO5__Rx11DataFifoStart                          7
-#define   O_RXDATAFIFO5__Rx11DataFifoSize                           0
-#define   W_RXDATAFIFO5__Rx11DataFifoSize                           7
+#define   O_RXDATAFIFO5__RX10DATAFIFOSTART                          24
+#define   W_RXDATAFIFO5__RX10DATAFIFOSTART                          7
+#define   O_RXDATAFIFO5__RX10DATAFIFOSIZE                           16
+#define   W_RXDATAFIFO5__RX10DATAFIFOSIZE                           7
+#define   O_RXDATAFIFO5__RX11DATAFIFOSTART                          8
+#define   W_RXDATAFIFO5__RX11DATAFIFOSTART                          7
+#define   O_RXDATAFIFO5__RX11DATAFIFOSIZE                           0
+#define   W_RXDATAFIFO5__RX11DATAFIFOSIZE                           7
 #define R_RXDATAFIFO6                                               0x22F
-#define   O_RXDATAFIFO6__Rx12DataFifoStart                          24
-#define   W_RXDATAFIFO6__Rx12DataFifoStart                          7
-#define   O_RXDATAFIFO6__Rx12DataFifoSize                           16
-#define   W_RXDATAFIFO6__Rx12DataFifoSize                           7
-#define   O_RXDATAFIFO6__Rx13DataFifoStart                          8
-#define   W_RXDATAFIFO6__Rx13DataFifoStart                          7
-#define   O_RXDATAFIFO6__Rx13DataFifoSize                           0
-#define   W_RXDATAFIFO6__Rx13DataFifoSize                           7
+#define   O_RXDATAFIFO6__RX12DATAFIFOSTART                          24
+#define   W_RXDATAFIFO6__RX12DATAFIFOSTART                          7
+#define   O_RXDATAFIFO6__RX12DATAFIFOSIZE                           16
+#define   W_RXDATAFIFO6__RX12DATAFIFOSIZE                           7
+#define   O_RXDATAFIFO6__RX13DATAFIFOSTART                          8
+#define   W_RXDATAFIFO6__RX13DATAFIFOSTART                          7
+#define   O_RXDATAFIFO6__RX13DATAFIFOSIZE                           0
+#define   W_RXDATAFIFO6__RX13DATAFIFOSIZE                           7
 #define R_RXDATAFIFO7                                               0x230
-#define   O_RXDATAFIFO7__Rx14DataFifoStart                          24
-#define   W_RXDATAFIFO7__Rx14DataFifoStart                          7
-#define   O_RXDATAFIFO7__Rx14DataFifoSize                           16
-#define   W_RXDATAFIFO7__Rx14DataFifoSize                           7
-#define   O_RXDATAFIFO7__Rx15DataFifoStart                          8
-#define   W_RXDATAFIFO7__Rx15DataFifoStart                          7
-#define   O_RXDATAFIFO7__Rx15DataFifoSize                           0
-#define   W_RXDATAFIFO7__Rx15DataFifoSize                           7
+#define   O_RXDATAFIFO7__RX14DATAFIFOSTART                          24
+#define   W_RXDATAFIFO7__RX14DATAFIFOSTART                          7
+#define   O_RXDATAFIFO7__RX14DATAFIFOSIZE                           16
+#define   W_RXDATAFIFO7__RX14DATAFIFOSIZE                           7
+#define   O_RXDATAFIFO7__RX15DATAFIFOSTART                          8
+#define   W_RXDATAFIFO7__RX15DATAFIFOSTART                          7
+#define   O_RXDATAFIFO7__RX15DATAFIFOSIZE                           0
+#define   W_RXDATAFIFO7__RX15DATAFIFOSIZE                           7
 #define R_XGMACPADCALIBRATION                                       0x231
 #define R_FREEQCARVE                                                0x233
 #define R_SPI4STATICDELAY0                                          0x240
-#define   O_SPI4STATICDELAY0__DataLine7                             28
-#define   W_SPI4STATICDELAY0__DataLine7                             4
-#define   O_SPI4STATICDELAY0__DataLine6                             24
-#define   W_SPI4STATICDELAY0__DataLine6                             4
-#define   O_SPI4STATICDELAY0__DataLine5                             20
-#define   W_SPI4STATICDELAY0__DataLine5                             4
-#define   O_SPI4STATICDELAY0__DataLine4                             16
-#define   W_SPI4STATICDELAY0__DataLine4                             4
-#define   O_SPI4STATICDELAY0__DataLine3                             12
-#define   W_SPI4STATICDELAY0__DataLine3                             4
-#define   O_SPI4STATICDELAY0__DataLine2                             8
-#define   W_SPI4STATICDELAY0__DataLine2                             4
-#define   O_SPI4STATICDELAY0__DataLine1                             4
-#define   W_SPI4STATICDELAY0__DataLine1                             4
-#define   O_SPI4STATICDELAY0__DataLine0                             0
-#define   W_SPI4STATICDELAY0__DataLine0                             4
+#define   O_SPI4STATICDELAY0__DATALINE7                             28
+#define   W_SPI4STATICDELAY0__DATALINE7                             4
+#define   O_SPI4STATICDELAY0__DATALINE6                             24
+#define   W_SPI4STATICDELAY0__DATALINE6                             4
+#define   O_SPI4STATICDELAY0__DATALINE5                             20
+#define   W_SPI4STATICDELAY0__DATALINE5                             4
+#define   O_SPI4STATICDELAY0__DATALINE4                             16
+#define   W_SPI4STATICDELAY0__DATALINE4                             4
+#define   O_SPI4STATICDELAY0__DATALINE3                             12
+#define   W_SPI4STATICDELAY0__DATALINE3                             4
+#define   O_SPI4STATICDELAY0__DATALINE2                             8
+#define   W_SPI4STATICDELAY0__DATALINE2                             4
+#define   O_SPI4STATICDELAY0__DATALINE1                             4
+#define   W_SPI4STATICDELAY0__DATALINE1                             4
+#define   O_SPI4STATICDELAY0__DATALINE0                             0
+#define   W_SPI4STATICDELAY0__DATALINE0                             4
 #define R_SPI4STATICDELAY1                                          0x241
-#define   O_SPI4STATICDELAY1__DataLine15                            28
-#define   W_SPI4STATICDELAY1__DataLine15                            4
-#define   O_SPI4STATICDELAY1__DataLine14                            24
-#define   W_SPI4STATICDELAY1__DataLine14                            4
-#define   O_SPI4STATICDELAY1__DataLine13                            20
-#define   W_SPI4STATICDELAY1__DataLine13                            4
-#define   O_SPI4STATICDELAY1__DataLine12                            16
-#define   W_SPI4STATICDELAY1__DataLine12                            4
-#define   O_SPI4STATICDELAY1__DataLine11                            12
-#define   W_SPI4STATICDELAY1__DataLine11                            4
-#define   O_SPI4STATICDELAY1__DataLine10                            8
-#define   W_SPI4STATICDELAY1__DataLine10                            4
-#define   O_SPI4STATICDELAY1__DataLine9                             4
-#define   W_SPI4STATICDELAY1__DataLine9                             4
-#define   O_SPI4STATICDELAY1__DataLine8                             0
-#define   W_SPI4STATICDELAY1__DataLine8                             4
+#define   O_SPI4STATICDELAY1__DATALINE15                            28
+#define   W_SPI4STATICDELAY1__DATALINE15                            4
+#define   O_SPI4STATICDELAY1__DATALINE14                            24
+#define   W_SPI4STATICDELAY1__DATALINE14                            4
+#define   O_SPI4STATICDELAY1__DATALINE13                            20
+#define   W_SPI4STATICDELAY1__DATALINE13                            4
+#define   O_SPI4STATICDELAY1__DATALINE12                            16
+#define   W_SPI4STATICDELAY1__DATALINE12                            4
+#define   O_SPI4STATICDELAY1__DATALINE11                            12
+#define   W_SPI4STATICDELAY1__DATALINE11                            4
+#define   O_SPI4STATICDELAY1__DATALINE10                            8
+#define   W_SPI4STATICDELAY1__DATALINE10                            4
+#define   O_SPI4STATICDELAY1__DATALINE9                             4
+#define   W_SPI4STATICDELAY1__DATALINE9                             4
+#define   O_SPI4STATICDELAY1__DATALINE8                             0
+#define   W_SPI4STATICDELAY1__DATALINE8                             4
 #define R_SPI4STATICDELAY2                                          0x242
-#define   O_SPI4STATICDELAY0__TxStat1                               8
-#define   W_SPI4STATICDELAY0__TxStat1                               4
-#define   O_SPI4STATICDELAY0__TxStat0                               4
-#define   W_SPI4STATICDELAY0__TxStat0                               4
-#define   O_SPI4STATICDELAY0__RxControl                             0
-#define   W_SPI4STATICDELAY0__RxControl                             4
+#define   O_SPI4STATICDELAY0__TXSTAT1                               8
+#define   W_SPI4STATICDELAY0__TXSTAT1                               4
+#define   O_SPI4STATICDELAY0__TXSTAT0                               4
+#define   W_SPI4STATICDELAY0__TXSTAT0                               4
+#define   O_SPI4STATICDELAY0__RXCONTROL                             0
+#define   W_SPI4STATICDELAY0__RXCONTROL                             4
 #define R_SPI4CONTROL                                               0x243
-#define   O_SPI4CONTROL__StaticDelay                                2
+#define   O_SPI4CONTROL__STATICDELAY                                2
 #define   O_SPI4CONTROL__LVDS_LVTTL                                 1
-#define   O_SPI4CONTROL__SPI4Enable                                 0
+#define   O_SPI4CONTROL__SPI4ENABLE                                 0
 #define R_CLASSWATERMARKS                                           0x244
-#define   O_CLASSWATERMARKS__Class0Watermark                        24
-#define   W_CLASSWATERMARKS__Class0Watermark                        5
-#define   O_CLASSWATERMARKS__Class1Watermark                        16
-#define   W_CLASSWATERMARKS__Class1Watermark                        5
-#define   O_CLASSWATERMARKS__Class3Watermark                        0
-#define   W_CLASSWATERMARKS__Class3Watermark                        5
+#define   O_CLASSWATERMARKS__CLASS0WATERMARK                        24
+#define   W_CLASSWATERMARKS__CLASS0WATERMARK                        5
+#define   O_CLASSWATERMARKS__CLASS1WATERMARK                        16
+#define   W_CLASSWATERMARKS__CLASS1WATERMARK                        5
+#define   O_CLASSWATERMARKS__CLASS3WATERMARK                        0
+#define   W_CLASSWATERMARKS__CLASS3WATERMARK                        5
 #define R_RXWATERMARKS1                                              0x245
-#define   O_RXWATERMARKS__Rx0DataWatermark                          24
-#define   W_RXWATERMARKS__Rx0DataWatermark                          7
-#define   O_RXWATERMARKS__Rx1DataWatermark                          16
-#define   W_RXWATERMARKS__Rx1DataWatermark                          7
-#define   O_RXWATERMARKS__Rx3DataWatermark                          0
-#define   W_RXWATERMARKS__Rx3DataWatermark                          7
+#define   O_RXWATERMARKS__RX0DATAWATERMARK                          24
+#define   W_RXWATERMARKS__RX0DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX1DATAWATERMARK                          16
+#define   W_RXWATERMARKS__RX1DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX3DATAWATERMARK                          0
+#define   W_RXWATERMARKS__RX3DATAWATERMARK                          7
 #define R_RXWATERMARKS2                                              0x246
-#define   O_RXWATERMARKS__Rx4DataWatermark                          24
-#define   W_RXWATERMARKS__Rx4DataWatermark                          7
-#define   O_RXWATERMARKS__Rx5DataWatermark                          16
-#define   W_RXWATERMARKS__Rx5DataWatermark                          7
-#define   O_RXWATERMARKS__Rx6DataWatermark                          8
-#define   W_RXWATERMARKS__Rx6DataWatermark                          7
-#define   O_RXWATERMARKS__Rx7DataWatermark                          0
-#define   W_RXWATERMARKS__Rx7DataWatermark                          7
+#define   O_RXWATERMARKS__RX4DATAWATERMARK                          24
+#define   W_RXWATERMARKS__RX4DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX5DATAWATERMARK                          16
+#define   W_RXWATERMARKS__RX5DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX6DATAWATERMARK                          8
+#define   W_RXWATERMARKS__RX6DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX7DATAWATERMARK                          0
+#define   W_RXWATERMARKS__RX7DATAWATERMARK                          7
 #define R_RXWATERMARKS3                                              0x247
-#define   O_RXWATERMARKS__Rx8DataWatermark                          24
-#define   W_RXWATERMARKS__Rx8DataWatermark                          7
-#define   O_RXWATERMARKS__Rx9DataWatermark                          16
-#define   W_RXWATERMARKS__Rx9DataWatermark                          7
-#define   O_RXWATERMARKS__Rx10DataWatermark                         8
-#define   W_RXWATERMARKS__Rx10DataWatermark                         7
-#define   O_RXWATERMARKS__Rx11DataWatermark                         0
-#define   W_RXWATERMARKS__Rx11DataWatermark                         7
+#define   O_RXWATERMARKS__RX8DATAWATERMARK                          24
+#define   W_RXWATERMARKS__RX8DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX9DATAWATERMARK                          16
+#define   W_RXWATERMARKS__RX9DATAWATERMARK                          7
+#define   O_RXWATERMARKS__RX10DATAWATERMARK                         8
+#define   W_RXWATERMARKS__RX10DATAWATERMARK                         7
+#define   O_RXWATERMARKS__RX11DATAWATERMARK                         0
+#define   W_RXWATERMARKS__RX11DATAWATERMARK                         7
 #define R_RXWATERMARKS4                                              0x248
-#define   O_RXWATERMARKS__Rx12DataWatermark                         24
-#define   W_RXWATERMARKS__Rx12DataWatermark                         7
-#define   O_RXWATERMARKS__Rx13DataWatermark                         16
-#define   W_RXWATERMARKS__Rx13DataWatermark                         7
-#define   O_RXWATERMARKS__Rx14DataWatermark                         8
-#define   W_RXWATERMARKS__Rx14DataWatermark                         7
-#define   O_RXWATERMARKS__Rx15DataWatermark                         0
-#define   W_RXWATERMARKS__Rx15DataWatermark                         7
+#define   O_RXWATERMARKS__RX12DATAWATERMARK                         24
+#define   W_RXWATERMARKS__RX12DATAWATERMARK                         7
+#define   O_RXWATERMARKS__RX13DATAWATERMARK                         16
+#define   W_RXWATERMARKS__RX13DATAWATERMARK                         7
+#define   O_RXWATERMARKS__RX14DATAWATERMARK                         8
+#define   W_RXWATERMARKS__RX14DATAWATERMARK                         7
+#define   O_RXWATERMARKS__RX15DATAWATERMARK                         0
+#define   W_RXWATERMARKS__RX15DATAWATERMARK                         7
 #define R_FREEWATERMARKS                                            0x249
-#define   O_FREEWATERMARKS__FreeOutWatermark                        16
-#define   W_FREEWATERMARKS__FreeOutWatermark                        16
-#define   O_FREEWATERMARKS__JumFrWatermark                          8
-#define   W_FREEWATERMARKS__JumFrWatermark                          7
-#define   O_FREEWATERMARKS__RegFrWatermark                          0
-#define   W_FREEWATERMARKS__RegFrWatermark                          7
+#define   O_FREEWATERMARKS__FREEOUTWATERMARK                        16
+#define   W_FREEWATERMARKS__FREEOUTWATERMARK                        16
+#define   O_FREEWATERMARKS__JUMFRWATERMARK                          8
+#define   W_FREEWATERMARKS__JUMFRWATERMARK                          7
+#define   O_FREEWATERMARKS__REGFRWATERMARK                          0
+#define   W_FREEWATERMARKS__REGFRWATERMARK                          7
 #define R_EGRESSFIFOCARVINGSLOTS                                    0x24a
 
 #define CTRL_RES0           0
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index e5ae42a..e4d85d9 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -3,6 +3,4 @@
 	- move half of the nvec init stuff to i2c-tegra.c
 	- move event handling to nvec_events
 	- finish suspend/resume support
-	- modifiy the sync_write method to return the received
-	  message in a variable (and return the error code).
 	- add support for more device implementations
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 4ae44a5..c335ae2 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -14,8 +14,6 @@
  *
  */
 
-/* #define DEBUG */
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/atomic.h>
@@ -40,18 +38,18 @@
 #include "nvec.h"
 
 #define I2C_CNFG			0x00
-#define I2C_CNFG_PACKET_MODE_EN		(1 << 10)
-#define I2C_CNFG_NEW_MASTER_SFM		(1 << 11)
+#define I2C_CNFG_PACKET_MODE_EN		BIT(10)
+#define I2C_CNFG_NEW_MASTER_SFM		BIT(11)
 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
 
 #define I2C_SL_CNFG		0x20
-#define I2C_SL_NEWSL		(1 << 2)
-#define I2C_SL_NACK		(1 << 1)
-#define I2C_SL_RESP		(1 << 0)
-#define I2C_SL_IRQ		(1 << 3)
-#define END_TRANS		(1 << 4)
-#define RCVD			(1 << 2)
-#define RNW			(1 << 1)
+#define I2C_SL_NEWSL		BIT(2)
+#define I2C_SL_NACK		BIT(1)
+#define I2C_SL_RESP		BIT(0)
+#define I2C_SL_IRQ		BIT(3)
+#define END_TRANS		BIT(4)
+#define RCVD			BIT(2)
+#define RNW			BIT(1)
 
 #define I2C_SL_RCVD		0x24
 #define I2C_SL_STATUS		0x28
@@ -150,7 +148,7 @@
 
 	dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
-		msg, msg[1] + 2, true);
+		       msg, msg[1] + 2, true);
 
 	return NOTIFY_OK;
 }
@@ -259,7 +257,7 @@
  * occurred, the nvec driver may print an error.
  */
 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
-			short size)
+		     short size)
 {
 	struct nvec_msg *msg;
 	unsigned long flags;
@@ -288,46 +286,49 @@
  * @nvec: An &struct nvec_chip
  * @data: The data to write
  * @size: The size of @data
+ * @msg:  The response message received
  *
  * This is similar to nvec_write_async(), but waits for the
  * request to be answered before returning. This function
  * uses a mutex and can thus not be called from e.g.
  * interrupt handlers.
  *
- * Returns: A pointer to the response message on success,
- * %NULL on failure. Free with nvec_msg_free() once no longer
- * used.
+ * Returns: 0 on success, a negative error code on failure.
+ * The response message is returned in @msg. Shall be freed with
+ * with nvec_msg_free() once no longer used.
+ *
  */
-struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
-		const unsigned char *data, short size)
+int nvec_write_sync(struct nvec_chip *nvec,
+		    const unsigned char *data, short size,
+		    struct nvec_msg **msg)
 {
-	struct nvec_msg *msg;
-
 	mutex_lock(&nvec->sync_write_mutex);
 
+	*msg = NULL;
 	nvec->sync_write_pending = (data[1] << 8) + data[0];
 
 	if (nvec_write_async(nvec, data, size) < 0) {
 		mutex_unlock(&nvec->sync_write_mutex);
-		return NULL;
+		return -ENOMEM;
 	}
 
 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
-					nvec->sync_write_pending);
+		nvec->sync_write_pending);
 	if (!(wait_for_completion_timeout(&nvec->sync_write,
-				msecs_to_jiffies(2000)))) {
-		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
+					  msecs_to_jiffies(2000)))) {
+		dev_warn(nvec->dev,
+			 "timeout waiting for sync write to complete\n");
 		mutex_unlock(&nvec->sync_write_mutex);
-		return NULL;
+		return -ETIMEDOUT;
 	}
 
 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
 
-	msg = nvec->last_sync_msg;
+	*msg = nvec->last_sync_msg;
 
 	mutex_unlock(&nvec->sync_write_mutex);
 
-	return msg;
+	return 0;
 }
 EXPORT_SYMBOL(nvec_write_sync);
 
@@ -422,8 +423,8 @@
 
 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
 		print_hex_dump(KERN_WARNING, "ec system event ",
-				DUMP_PREFIX_NONE, 16, 1, msg->data,
-				msg->data[1] + 2, true);
+			       DUMP_PREFIX_NONE, 16, 1, msg->data,
+			       msg->data[1] + 2, true);
 
 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
 				   msg->data);
@@ -493,8 +494,8 @@
 {
 	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
-			   (uint) nvec_msg_size(nvec->rx),
-			   (uint) nvec->rx->pos);
+			(uint)nvec_msg_size(nvec->rx),
+			(uint)nvec->rx->pos);
 
 		nvec_msg_free(nvec, nvec->rx);
 		nvec->state = 0;
@@ -508,8 +509,10 @@
 
 	spin_lock(&nvec->rx_lock);
 
-	/* add the received data to the work list
-	   and move the ring buffer pointer to the next entry */
+	/*
+	 * Add the received data to the work list and move the ring buffer
+	 * pointer to the next entry.
+	 */
 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
 
 	spin_unlock(&nvec->rx_lock);
@@ -686,8 +689,8 @@
 	if ((status & (RCVD | RNW)) == RCVD) {
 		if (received != nvec->i2c_addr)
 			dev_err(nvec->dev,
-			"received address 0x%02x, expected 0x%02x\n",
-			received, nvec->i2c_addr);
+				"received address 0x%02x, expected 0x%02x\n",
+				received, nvec->i2c_addr);
 		nvec->state = 1;
 	}
 
@@ -776,7 +779,7 @@
 	}
 
 	if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
-				&nvec->i2c_addr)) {
+				 &nvec->i2c_addr)) {
 		dev_err(nvec->dev, "no i2c address specified");
 		return -ENODEV;
 	}
@@ -852,14 +855,14 @@
 	INIT_WORK(&nvec->tx_work, nvec_request_master);
 
 	err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
-					"nvec gpio");
+				    "nvec gpio");
 	if (err < 0) {
 		dev_err(nvec->dev, "couldn't request gpio\n");
 		return -ENODEV;
 	}
 
 	err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
-				"nvec", nvec);
+			       "nvec", nvec);
 	if (err) {
 		dev_err(nvec->dev, "couldn't request irq\n");
 		return -ENODEV;
@@ -878,11 +881,13 @@
 	pm_power_off = nvec_power_off;
 
 	/* Get Firmware Version */
-	msg = nvec_write_sync(nvec, get_firmware_version, 2);
+	err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
 
-	if (msg) {
-		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
-			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
+	if (!err) {
+		dev_warn(nvec->dev,
+			 "ec firmware version %02x.%02x.%02x / %02x\n",
+			 msg->data[4], msg->data[5],
+			 msg->data[6], msg->data[7]);
 
 		nvec_msg_free(nvec, msg);
 	}
@@ -924,6 +929,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int nvec_suspend(struct device *dev)
 {
+	int err;
 	struct platform_device *pdev = to_platform_device(dev);
 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
 	struct nvec_msg *msg;
@@ -934,8 +940,9 @@
 	/* keep these sync or you'll break suspend */
 	nvec_toggle_global_events(nvec, false);
 
-	msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
-	nvec_msg_free(nvec, msg);
+	err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
+	if (!err)
+		nvec_msg_free(nvec, msg);
 
 	nvec_disable_i2c_slave(nvec);
 
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index 2ec9de9..c03ca8d 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -168,8 +168,9 @@
 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
 		     short size);
 
-struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
-				 const unsigned char *data, short size);
+int nvec_write_sync(struct nvec_chip *nvec,
+		    const unsigned char *data, short size,
+		    struct nvec_msg **msg);
 
 int nvec_register_notifier(struct nvec_chip *nvec,
 			   struct notifier_block *nb,
diff --git a/drivers/staging/nvec/nvec_paz00.c b/drivers/staging/nvec/nvec_paz00.c
index 68146bf..cddbfd2 100644
--- a/drivers/staging/nvec/nvec_paz00.c
+++ b/drivers/staging/nvec/nvec_paz00.c
@@ -63,7 +63,7 @@
 
 	platform_set_drvdata(pdev, led);
 
-	ret = led_classdev_register(&pdev->dev, &led->cdev);
+	ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
 	if (ret < 0)
 		return ret;
 
@@ -73,18 +73,8 @@
 	return 0;
 }
 
-static int nvec_paz00_remove(struct platform_device *pdev)
-{
-	struct nvec_led *led = platform_get_drvdata(pdev);
-
-	led_classdev_unregister(&led->cdev);
-
-	return 0;
-}
-
 static struct platform_driver nvec_paz00_driver = {
 	.probe  = nvec_paz00_probe,
-	.remove = nvec_paz00_remove,
 	.driver = {
 		.name  = "nvec-paz00",
 	},
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 04a7402..b4a0545 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -207,8 +207,10 @@
 	case TYPE:
 		memcpy(power->bat_type, &res->plc, res->length - 2);
 		power->bat_type[res->length - 2] = '\0';
-		/* this differs a little from the spec
-		   fill in more if you find some */
+		/*
+		 * This differs a little from the spec fill in more if you find
+		 * some.
+		 */
 		if (!strncmp(power->bat_type, "Li", 30))
 			power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_LION;
 		else
@@ -356,12 +358,14 @@
 	if (counter >= ARRAY_SIZE(bat_iter))
 		counter = 0;
 
-/* AC status via sys req */
+	/* AC status via sys req */
 	nvec_write_async(power->nvec, buf, 2);
 	msleep(100);
 
-/* select a battery request function via round robin
-   doing it all at once seems to overload the power supply */
+	/*
+	 * Select a battery request function via round robin doing it all at
+	 * once seems to overload the power supply.
+	 */
 	buf[0] = NVEC_BAT;
 	buf[1] = bat_iter[counter++];
 	nvec_write_async(power->nvec, buf, 2);
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
index cc58a7e..2b29acc 100644
--- a/drivers/staging/octeon-usb/TODO
+++ b/drivers/staging/octeon-usb/TODO
@@ -1,11 +1,8 @@
-This driver is functional and has been tested on EdgeRouter Lite with
-USB mass storage.
+This driver is functional and has been tested on EdgeRouter Lite,
+D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
 
 TODO:
 	- kernel coding style
 	- checkpatch warnings
-	- dead code elimination
-	- device tree bindings
-	- possibly eliminate the extra "hardware abstraction layer"
 
 Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 6f28717..17442b3 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -43,29 +43,15 @@
  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
  */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/prefetch.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+
 #include <linux/usb.h>
-
-#include <linux/time.h>
-#include <linux/delay.h>
-
-#include <asm/octeon/cvmx.h>
-#include <asm/octeon/cvmx-iob-defs.h>
-
+#include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/usb/hcd.h>
-
-#include <linux/err.h>
+#include <linux/prefetch.h>
+#include <linux/platform_device.h>
 
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-sysinfo.h>
-#include <asm/octeon/cvmx-helper-board.h>
 
 #include "octeon-hcd.h"
 
@@ -113,35 +99,35 @@
 };
 
 /**
- * enum cvmx_usb_complete - possible callback function status codes
+ * enum cvmx_usb_status - possible callback function status codes
  *
- * @CVMX_USB_COMPLETE_SUCCESS:	  The transaction / operation finished without
+ * @CVMX_USB_STATUS_OK:		  The transaction / operation finished without
  *				  any errors
- * @CVMX_USB_COMPLETE_SHORT:	  FIXME: This is currently not implemented
- * @CVMX_USB_COMPLETE_CANCEL:	  The transaction was canceled while in flight
+ * @CVMX_USB_STATUS_SHORT:	  FIXME: This is currently not implemented
+ * @CVMX_USB_STATUS_CANCEL:	  The transaction was canceled while in flight
  *				  by a user call to cvmx_usb_cancel
- * @CVMX_USB_COMPLETE_ERROR:	  The transaction aborted with an unexpected
+ * @CVMX_USB_STATUS_ERROR:	  The transaction aborted with an unexpected
  *				  error status
- * @CVMX_USB_COMPLETE_STALL:	  The transaction received a USB STALL response
+ * @CVMX_USB_STATUS_STALL:	  The transaction received a USB STALL response
  *				  from the device
- * @CVMX_USB_COMPLETE_XACTERR:	  The transaction failed with an error from the
+ * @CVMX_USB_STATUS_XACTERR:	  The transaction failed with an error from the
  *				  device even after a number of retries
- * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
+ * @CVMX_USB_STATUS_DATATGLERR:	  The transaction failed with a data toggle
  *				  error even after a number of retries
- * @CVMX_USB_COMPLETE_BABBLEERR:  The transaction failed with a babble error
- * @CVMX_USB_COMPLETE_FRAMEERR:	  The transaction failed with a frame error
+ * @CVMX_USB_STATUS_BABBLEERR:	  The transaction failed with a babble error
+ * @CVMX_USB_STATUS_FRAMEERR:	  The transaction failed with a frame error
  *				  even after a number of retries
  */
-enum cvmx_usb_complete {
-	CVMX_USB_COMPLETE_SUCCESS,
-	CVMX_USB_COMPLETE_SHORT,
-	CVMX_USB_COMPLETE_CANCEL,
-	CVMX_USB_COMPLETE_ERROR,
-	CVMX_USB_COMPLETE_STALL,
-	CVMX_USB_COMPLETE_XACTERR,
-	CVMX_USB_COMPLETE_DATATGLERR,
-	CVMX_USB_COMPLETE_BABBLEERR,
-	CVMX_USB_COMPLETE_FRAMEERR,
+enum cvmx_usb_status {
+	CVMX_USB_STATUS_OK,
+	CVMX_USB_STATUS_SHORT,
+	CVMX_USB_STATUS_CANCEL,
+	CVMX_USB_STATUS_ERROR,
+	CVMX_USB_STATUS_STALL,
+	CVMX_USB_STATUS_XACTERR,
+	CVMX_USB_STATUS_DATATGLERR,
+	CVMX_USB_STATUS_BABBLEERR,
+	CVMX_USB_STATUS_FRAMEERR,
 };
 
 /**
@@ -160,13 +146,13 @@
  *			status call.
  */
 struct cvmx_usb_port_status {
-	uint32_t reserved		: 25;
-	uint32_t port_enabled		: 1;
-	uint32_t port_over_current	: 1;
-	uint32_t port_powered		: 1;
+	u32 reserved			: 25;
+	u32 port_enabled		: 1;
+	u32 port_over_current		: 1;
+	u32 port_powered		: 1;
 	enum cvmx_usb_speed port_speed	: 2;
-	uint32_t connected		: 1;
-	uint32_t connect_change		: 1;
+	u32 connected			: 1;
+	u32 connect_change		: 1;
 };
 
 /**
@@ -180,7 +166,7 @@
 struct cvmx_usb_iso_packet {
 	int offset;
 	int length;
-	enum cvmx_usb_complete status;
+	enum cvmx_usb_status status;
 };
 
 /**
@@ -234,13 +220,13 @@
  * The low level hardware can transfer a maximum of this number of bytes in each
  * transfer. The field is 19 bits wide
  */
-#define MAX_TRANSFER_BYTES	((1<<19)-1)
+#define MAX_TRANSFER_BYTES	((1 << 19) - 1)
 
 /*
  * The low level hardware can transfer a maximum of this number of packets in
  * each transfer. The field is 10 bits wide
  */
-#define MAX_TRANSFER_PACKETS	((1<<10)-1)
+#define MAX_TRANSFER_PACKETS	((1 << 10) - 1)
 
 /**
  * Logical transactions may take numerous low level
@@ -284,9 +270,9 @@
 struct cvmx_usb_transaction {
 	struct list_head node;
 	enum cvmx_usb_transfer type;
-	uint64_t buffer;
+	u64 buffer;
 	int buffer_length;
-	uint64_t control_header;
+	u64 control_header;
 	int iso_start_frame;
 	int iso_number_packets;
 	struct cvmx_usb_iso_packet *iso_packets;
@@ -328,36 +314,37 @@
 struct cvmx_usb_pipe {
 	struct list_head node;
 	struct list_head transactions;
-	uint64_t interval;
-	uint64_t next_tx_frame;
+	u64 interval;
+	u64 next_tx_frame;
 	enum cvmx_usb_pipe_flags flags;
 	enum cvmx_usb_speed device_speed;
 	enum cvmx_usb_transfer transfer_type;
 	enum cvmx_usb_direction transfer_dir;
 	int multi_count;
-	uint16_t max_packet;
-	uint8_t device_addr;
-	uint8_t endpoint_num;
-	uint8_t hub_device_addr;
-	uint8_t hub_port;
-	uint8_t pid_toggle;
-	uint8_t channel;
-	int8_t split_sc_frame;
+	u16 max_packet;
+	u8 device_addr;
+	u8 endpoint_num;
+	u8 hub_device_addr;
+	u8 hub_port;
+	u8 pid_toggle;
+	u8 channel;
+	s8 split_sc_frame;
 };
 
 struct cvmx_usb_tx_fifo {
 	struct {
 		int channel;
 		int size;
-		uint64_t address;
-	} entry[MAX_CHANNELS+1];
+		u64 address;
+	} entry[MAX_CHANNELS + 1];
 	int head;
 	int tail;
 };
 
 /**
- * struct cvmx_usb_state - the state of the USB block
+ * struct octeon_hcd - the state of the USB block
  *
+ * lock:		   Serialization lock.
  * init_flags:		   Flags passed to initialize.
  * index:		   Which USB block this is for.
  * idle_hardware_channels: Bit set for every idle hardware channel.
@@ -372,7 +359,8 @@
  * frame_number:	   Increments every SOF interrupt for time keeping.
  * active_split:	   Points to the current active split, or NULL.
  */
-struct cvmx_usb_state {
+struct octeon_hcd {
+	spinlock_t lock; /* serialization lock */
 	int init_flags;
 	int index;
 	int idle_hardware_channels;
@@ -382,23 +370,18 @@
 	struct cvmx_usb_port_status port_status;
 	struct list_head idle_pipes;
 	struct list_head active_pipes[4];
-	uint64_t frame_number;
+	u64 frame_number;
 	struct cvmx_usb_transaction *active_split;
 	struct cvmx_usb_tx_fifo periodic;
 	struct cvmx_usb_tx_fifo nonperiodic;
 };
 
-struct octeon_hcd {
-	spinlock_t lock;
-	struct cvmx_usb_state usb;
-};
-
 /* This macro spins on a register waiting for it to reach a condition. */
 #define CVMX_WAIT_FOR_FIELD32(address, _union, cond, timeout_usec)	    \
 	({int result;							    \
 	do {								    \
-		uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
-			octeon_get_clock_rate() / 1000000;		    \
+		u64 done = cvmx_get_cycle() + (u64)timeout_usec *	    \
+			   octeon_get_clock_rate() / 1000000;		    \
 		union _union c;						    \
 									    \
 		while (1) {						    \
@@ -431,7 +414,7 @@
 
 /* Returns the IO address to push/pop stuff data from the FIFOs */
 #define USB_FIFO_ADDRESS(channel, usb_index) \
-	(CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
+	(CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
 
 /**
  * struct octeon_temp_buffer - a bounce buffer for USB transfers
@@ -447,11 +430,6 @@
 	u8 data[0];
 };
 
-static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
-{
-	return container_of(p, struct octeon_hcd, usb);
-}
-
 static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
 {
 	return container_of((void *)p, struct usb_hcd, hcd_priv);
@@ -562,14 +540,12 @@
  *
  * Returns: Result of the read
  */
-static inline uint32_t cvmx_usb_read_csr32(struct cvmx_usb_state *usb,
-					   uint64_t address)
+static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
 {
-	uint32_t result = cvmx_read64_uint32(address ^ 4);
+	u32 result = cvmx_read64_uint32(address ^ 4);
 	return result;
 }
 
-
 /**
  * Write a USB 32bit CSR. It performs the necessary address
  * swizzle for 32bit CSRs and logs the value in a readable format
@@ -579,8 +555,8 @@
  * @address: 64bit address to write
  * @value:   Value to write
  */
-static inline void cvmx_usb_write_csr32(struct cvmx_usb_state *usb,
-					uint64_t address, uint32_t value)
+static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
+					u64 address, u32 value)
 {
 	cvmx_write64_uint32(address ^ 4, value);
 	cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
@@ -595,14 +571,13 @@
  *
  * Returns: Non zero if we need to do split transactions
  */
-static inline int cvmx_usb_pipe_needs_split(struct cvmx_usb_state *usb,
+static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
 					    struct cvmx_usb_pipe *pipe)
 {
 	return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
 	       usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
 }
 
-
 /**
  * Trivial utility function to return the correct PID for a pipe
  *
@@ -617,7 +592,7 @@
 	return 0; /* Data0 */
 }
 
-static void cvmx_fifo_setup(struct cvmx_usb_state *usb)
+static void cvmx_fifo_setup(struct octeon_hcd *usb)
 {
 	union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
 	union cvmx_usbcx_gnptxfsiz npsiz;
@@ -675,7 +650,7 @@
  *
  * Returns: 0 or a negative error code.
  */
-static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
+static int cvmx_usb_shutdown(struct octeon_hcd *usb)
 {
 	union cvmx_usbnx_clk_ctl usbn_clk_ctl;
 
@@ -704,12 +679,12 @@
  * off in the disabled state.
  *
  * @dev:	 Pointer to struct device for logging purposes.
- * @usb:	 Pointer to struct cvmx_usb_state.
+ * @usb:	 Pointer to struct octeon_hcd.
  *
  * Returns: 0 or a negative error code.
  */
 static int cvmx_usb_initialize(struct device *dev,
-			       struct cvmx_usb_state *usb)
+			       struct octeon_hcd *usb)
 {
 	int channel;
 	int divisor;
@@ -975,7 +950,7 @@
  *
  * @usb: USB device state populated by cvmx_usb_initialize().
  */
-static void cvmx_usb_reset_port(struct cvmx_usb_state *usb)
+static void cvmx_usb_reset_port(struct octeon_hcd *usb)
 {
 	usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
 						  CVMX_USBCX_HPRT(usb->index));
@@ -1002,7 +977,6 @@
 						  CVMX_USBCX_HPRT(usb->index));
 }
 
-
 /**
  * Disable a USB port. After this call the USB port will not
  * generate data transfers and will not generate events.
@@ -1013,7 +987,7 @@
  *
  * Returns: 0 or a negative error code.
  */
-static int cvmx_usb_disable(struct cvmx_usb_state *usb)
+static int cvmx_usb_disable(struct octeon_hcd *usb)
 {
 	/* Disable the port */
 	USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
@@ -1021,7 +995,6 @@
 	return 0;
 }
 
-
 /**
  * Get the current state of the USB port. Use this call to
  * determine if the usb port has anything connected, is enabled,
@@ -1033,8 +1006,7 @@
  *
  * Returns: Port status information
  */
-static struct cvmx_usb_port_status cvmx_usb_get_status(
-		struct cvmx_usb_state *usb)
+static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
 {
 	union cvmx_usbcx_hprt usbc_hprt;
 	struct cvmx_usb_port_status result;
@@ -1048,7 +1020,7 @@
 	result.port_speed = usbc_hprt.s.prtspd;
 	result.connected = usbc_hprt.s.prtconnsts;
 	result.connect_change =
-		(result.connected != usb->port_status.connected);
+		result.connected != usb->port_status.connected;
 
 	return result;
 }
@@ -1105,7 +1077,7 @@
  *
  * Returns: A non-NULL value is a pipe. NULL means an error.
  */
-static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
+static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
 						int device_addr,
 						int endpoint_num,
 						enum cvmx_usb_speed
@@ -1125,8 +1097,8 @@
 	if (!pipe)
 		return NULL;
 	if ((device_speed == CVMX_USB_SPEED_HIGH) &&
-		(transfer_dir == CVMX_USB_DIRECTION_OUT) &&
-		(transfer_type == CVMX_USB_TRANSFER_BULK))
+	    (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+	    (transfer_type == CVMX_USB_TRANSFER_BULK))
 		pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
 	pipe->device_addr = device_addr;
 	pipe->endpoint_num = endpoint_num;
@@ -1143,9 +1115,9 @@
 	if (!interval)
 		interval = 1;
 	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-		pipe->interval = interval*8;
+		pipe->interval = interval * 8;
 		/* Force start splits to be schedule on uFrame 0 */
-		pipe->next_tx_frame = ((usb->frame_number+7)&~7) +
+		pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
 					pipe->interval;
 	} else {
 		pipe->interval = interval;
@@ -1166,7 +1138,6 @@
 	return pipe;
 }
 
-
 /**
  * Poll the RX FIFOs and remove data as needed. This function is only used
  * in non DMA mode. It is very important that this function be called quickly
@@ -1174,13 +1145,13 @@
  *
  * @usb:	USB device state populated by cvmx_usb_initialize().
  */
-static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
+static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
 {
 	union cvmx_usbcx_grxstsph rx_status;
 	int channel;
 	int bytes;
-	uint64_t address;
-	uint32_t *ptr;
+	u64 address;
+	u32 *ptr;
 
 	rx_status.u32 = cvmx_usb_read_csr32(usb,
 					    CVMX_USBCX_GRXSTSPH(usb->index));
@@ -1213,7 +1184,6 @@
 	CVMX_SYNCW;
 }
 
-
 /**
  * Fill the TX hardware fifo with data out of the software
  * fifos
@@ -1225,7 +1195,7 @@
  * Returns: Non zero if the hardware fifo was too small and needs
  *	    to be serviced again.
  */
-static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
+static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
 			       struct cvmx_usb_tx_fifo *fifo, int available)
 {
 	/*
@@ -1234,9 +1204,9 @@
 	 */
 	while (available && (fifo->head != fifo->tail)) {
 		int i = fifo->tail;
-		const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
-		uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
-							usb->index) ^ 4;
+		const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+		u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
+						   usb->index) ^ 4;
 		int words = available;
 
 		/* Limit the amount of data to what the SW fifo has */
@@ -1275,13 +1245,12 @@
 	return fifo->head != fifo->tail;
 }
 
-
 /**
  * Check the hardware FIFOs and fill them as needed
  *
  * @usb:	USB device state populated by cvmx_usb_initialize().
  */
-static void cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
+static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
 {
 	if (usb->periodic.head != usb->periodic.tail) {
 		union cvmx_usbcx_hptxsts tx_status;
@@ -1312,14 +1281,13 @@
 	}
 }
 
-
 /**
  * Fill the TX FIFO with an outgoing packet
  *
  * @usb:	  USB device state populated by cvmx_usb_initialize().
  * @channel:	  Channel number to get packet from
  */
-static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
+static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
 {
 	union cvmx_usbcx_hccharx hcchar;
 	union cvmx_usbcx_hcspltx usbc_hcsplt;
@@ -1348,7 +1316,7 @@
 		return;
 
 	if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
-		(hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+	    (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
 		fifo = &usb->periodic;
 	else
 		fifo = &usb->nonperiodic;
@@ -1357,7 +1325,7 @@
 	fifo->entry[fifo->head].address =
 		cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
 				   channel * 8);
-	fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
+	fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
 	fifo->head++;
 	if (fifo->head > MAX_CHANNELS)
 		fifo->head = 0;
@@ -1373,12 +1341,11 @@
  * @channel:	  Channel to setup
  * @pipe:	  Pipe for control transaction
  */
-static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
+static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
 					   int channel,
 					   struct cvmx_usb_pipe *pipe)
 {
-	struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
-	struct usb_hcd *hcd = octeon_to_hcd(priv);
+	struct usb_hcd *hcd = octeon_to_hcd(usb);
 	struct device *dev = hcd->self.controller;
 	struct cvmx_usb_transaction *transaction =
 		list_first_entry(&pipe->transactions, typeof(*transaction),
@@ -1488,9 +1455,9 @@
 	 */
 	packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
 					   pipe->max_packet);
-	if (packets_to_transfer == 0)
+	if (packets_to_transfer == 0) {
 		packets_to_transfer = 1;
-	else if ((packets_to_transfer > 1) &&
+	} else if ((packets_to_transfer > 1) &&
 			(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
 		/*
 		 * Limit to one packet when not using DMA. Channels must be
@@ -1515,7 +1482,6 @@
 			     usbc_hctsiz.u32);
 }
 
-
 /**
  * Start a channel to perform the pipe's head transaction
  *
@@ -1523,7 +1489,7 @@
  * @channel:	  Channel to setup
  * @pipe:	  Pipe to start
  */
-static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
+static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
 				   struct cvmx_usb_pipe *pipe)
 {
 	struct cvmx_usb_transaction *transaction =
@@ -1539,7 +1505,7 @@
 	pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
 
 	/* Mark this channel as in use */
-	usb->idle_hardware_channels &= ~(1<<channel);
+	usb->idle_hardware_channels &= ~(1 << channel);
 
 	/* Enable the channel interrupt bits */
 	{
@@ -1579,22 +1545,22 @@
 			usbc_hcintmsk.s.xfercomplmsk = 1;
 		}
 		cvmx_usb_write_csr32(usb,
-				CVMX_USBCX_HCINTMSKX(channel, usb->index),
-				usbc_hcintmsk.u32);
+				     CVMX_USBCX_HCINTMSKX(channel, usb->index),
+				     usbc_hcintmsk.u32);
 
 		/* Enable the channel interrupt to propagate */
 		usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
 					CVMX_USBCX_HAINTMSK(usb->index));
-		usbc_haintmsk.s.haintmsk |= 1<<channel;
+		usbc_haintmsk.s.haintmsk |= 1 << channel;
 		cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
 				     usbc_haintmsk.u32);
 	}
 
 	/* Setup the location the DMA engine uses. */
 	{
-		uint64_t reg;
-		uint64_t dma_address = transaction->buffer +
-					transaction->actual_bytes;
+		u64 reg;
+		u64 dma_address = transaction->buffer +
+				  transaction->actual_bytes;
 
 		if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
 			dma_address = transaction->buffer +
@@ -1636,15 +1602,16 @@
 			 * We only store the lower two bits since the time ahead
 			 * can only be two frames
 			 */
-			if ((transaction->stage&1) == 0) {
+			if ((transaction->stage & 1) == 0) {
 				if (transaction->type == CVMX_USB_TRANSFER_BULK)
 					pipe->split_sc_frame =
 						(usb->frame_number + 1) & 0x7f;
 				else
 					pipe->split_sc_frame =
 						(usb->frame_number + 2) & 0x7f;
-			} else
+			} else {
 				pipe->split_sc_frame = -1;
+			}
 
 			usbc_hcsplt.s.spltena = 1;
 			usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
@@ -1666,10 +1633,9 @@
 			 * begin/middle/end of the data or all
 			 */
 			if (!usbc_hcsplt.s.compsplt &&
-				(pipe->transfer_dir ==
-				 CVMX_USB_DIRECTION_OUT) &&
-				(pipe->transfer_type ==
-				 CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+			    (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+			    (pipe->transfer_type ==
+			     CVMX_USB_TRANSFER_ISOCHRONOUS)) {
 				/*
 				 * Clear the split complete frame number as
 				 * there isn't going to be a split complete
@@ -1732,11 +1698,11 @@
 		 */
 		packets_to_transfer =
 			DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
-		if (packets_to_transfer == 0)
+		if (packets_to_transfer == 0) {
 			packets_to_transfer = 1;
-		else if ((packets_to_transfer > 1) &&
-				(usb->init_flags &
-				 CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+		} else if ((packets_to_transfer > 1) &&
+			   (usb->init_flags &
+			    CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
 			/*
 			 * Limit to one packet when not using DMA. Channels must
 			 * be restarted between every packet for IN
@@ -1783,7 +1749,7 @@
 		 * Set the startframe odd/even properly. This is only used for
 		 * periodic
 		 */
-		usbc_hcchar.s.oddfrm = usb->frame_number&1;
+		usbc_hcchar.s.oddfrm = usb->frame_number & 1;
 
 		/*
 		 * Set the number of back to back packets allowed by this
@@ -1843,9 +1809,11 @@
 		break;
 	}
 	{
-		union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 =
+		union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
 			cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCTSIZX(channel, usb->index))};
+					    CVMX_USBCX_HCTSIZX(channel,
+							       usb->index))
+		};
 		transaction->xfersize = usbc_hctsiz.s.xfersize;
 		transaction->pktcnt = usbc_hctsiz.s.pktcnt;
 	}
@@ -1858,21 +1826,19 @@
 		cvmx_usb_fill_tx_fifo(usb, channel);
 }
 
-
 /**
  * Find a pipe that is ready to be scheduled to hardware.
  * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @list:	 Pipe list to search
- * @current_frame:
- *		 Frame counter to use as a time reference.
+ * @xfer_type:	 Transfer type
  *
  * Returns: Pipe or NULL if none are ready
  */
 static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
-		struct cvmx_usb_state *usb,
-		struct list_head *list,
-		uint64_t current_frame)
+		struct octeon_hcd *usb,
+		enum cvmx_usb_transfer xfer_type)
 {
+	struct list_head *list = usb->active_pipes + xfer_type;
+	u64 current_frame = usb->frame_number;
 	struct cvmx_usb_pipe *pipe;
 
 	list_for_each_entry(pipe, list, node) {
@@ -1880,11 +1846,11 @@
 			list_first_entry(&pipe->transactions, typeof(*t),
 					 node);
 		if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
-			(pipe->next_tx_frame <= current_frame) &&
-			((pipe->split_sc_frame == -1) ||
-			 ((((int)current_frame - (int)pipe->split_sc_frame)
-			   & 0x7f) < 0x40)) &&
-			(!usb->active_split || (usb->active_split == t))) {
+		    (pipe->next_tx_frame <= current_frame) &&
+		    ((pipe->split_sc_frame == -1) ||
+		     ((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
+		      0x40)) &&
+		    (!usb->active_split || (usb->active_split == t))) {
 			prefetch(t);
 			return pipe;
 		}
@@ -1892,6 +1858,32 @@
 	return NULL;
 }
 
+static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
+						int is_sof)
+{
+	struct cvmx_usb_pipe *pipe;
+
+	/* Find a pipe needing service. */
+	if (is_sof) {
+		/*
+		 * Only process periodic pipes on SOF interrupts. This way we
+		 * are sure that the periodic data is sent in the beginning of
+		 * the frame.
+		 */
+		pipe = cvmx_usb_find_ready_pipe(usb,
+						CVMX_USB_TRANSFER_ISOCHRONOUS);
+		if (pipe)
+			return pipe;
+		pipe = cvmx_usb_find_ready_pipe(usb,
+						CVMX_USB_TRANSFER_INTERRUPT);
+		if (pipe)
+			return pipe;
+	}
+	pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
+	if (pipe)
+		return pipe;
+	return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
+}
 
 /**
  * Called whenever a pipe might need to be scheduled to the
@@ -1900,7 +1892,7 @@
  * @usb:	 USB device state populated by cvmx_usb_initialize().
  * @is_sof:	 True if this schedule was called on a SOF interrupt.
  */
-static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
+static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
 {
 	int channel;
 	struct cvmx_usb_pipe *pipe;
@@ -1922,7 +1914,7 @@
 						CVMX_USBCX_HFIR(usb->index))
 		};
 
-		if (hfnum.s.frrem < hfir.s.frint/4)
+		if (hfnum.s.frrem < hfir.s.frint / 4)
 			goto done;
 	}
 
@@ -1932,35 +1924,7 @@
 		if (unlikely(channel > 7))
 			break;
 
-		/* Find a pipe needing service */
-		pipe = NULL;
-		if (is_sof) {
-			/*
-			 * Only process periodic pipes on SOF interrupts. This
-			 * way we are sure that the periodic data is sent in the
-			 * beginning of the frame
-			 */
-			pipe = cvmx_usb_find_ready_pipe(usb,
-					usb->active_pipes +
-					CVMX_USB_TRANSFER_ISOCHRONOUS,
-					usb->frame_number);
-			if (likely(!pipe))
-				pipe = cvmx_usb_find_ready_pipe(usb,
-						usb->active_pipes +
-						CVMX_USB_TRANSFER_INTERRUPT,
-						usb->frame_number);
-		}
-		if (likely(!pipe)) {
-			pipe = cvmx_usb_find_ready_pipe(usb,
-					usb->active_pipes +
-					CVMX_USB_TRANSFER_CONTROL,
-					usb->frame_number);
-			if (likely(!pipe))
-				pipe = cvmx_usb_find_ready_pipe(usb,
-						usb->active_pipes +
-						CVMX_USB_TRANSFER_BULK,
-						usb->frame_number);
-		}
+		pipe = cvmx_usb_next_pipe(usb, is_sof);
 		if (!pipe)
 			break;
 
@@ -1974,7 +1938,7 @@
 	 */
 	need_sof = 0;
 	for (ttype = CVMX_USB_TRANSFER_CONTROL;
-			ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+	     ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
 		list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
 			if (pipe->next_tx_frame > usb->frame_number) {
 				need_sof = 1;
@@ -1986,19 +1950,18 @@
 			cvmx_usbcx_gintmsk, sofmsk, need_sof);
 }
 
-static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
-					     enum cvmx_usb_complete status,
+static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
+					     enum cvmx_usb_status status,
 					     struct cvmx_usb_pipe *pipe,
 					     struct cvmx_usb_transaction
 						*transaction,
 					     int bytes_transferred,
 					     struct urb *urb)
 {
-	struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
-	struct usb_hcd *hcd = octeon_to_hcd(priv);
+	struct usb_hcd *hcd = octeon_to_hcd(usb);
 	struct device *dev = hcd->self.controller;
 
-	if (likely(status == CVMX_USB_COMPLETE_SUCCESS))
+	if (likely(status == CVMX_USB_STATUS_OK))
 		urb->actual_length = bytes_transferred;
 	else
 		urb->actual_length = 0;
@@ -2006,7 +1969,8 @@
 	urb->hcpriv = NULL;
 
 	/* For Isochronous transactions we need to update the URB packet status
-	   list from data in our private copy */
+	 * list from data in our private copy
+	 */
 	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 		int i;
 		/*
@@ -2014,12 +1978,11 @@
 		 * field.
 		 */
 		struct cvmx_usb_iso_packet *iso_packet =
-			(struct cvmx_usb_iso_packet *) urb->setup_packet;
+			(struct cvmx_usb_iso_packet *)urb->setup_packet;
 		/* Recalculate the transfer size by adding up each packet */
 		urb->actual_length = 0;
 		for (i = 0; i < urb->number_of_packets; i++) {
-			if (iso_packet[i].status ==
-					CVMX_USB_COMPLETE_SUCCESS) {
+			if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
 				urb->iso_frame_desc[i].status = 0;
 				urb->iso_frame_desc[i].actual_length =
 					iso_packet[i].length;
@@ -2039,41 +2002,41 @@
 	}
 
 	switch (status) {
-	case CVMX_USB_COMPLETE_SUCCESS:
+	case CVMX_USB_STATUS_OK:
 		urb->status = 0;
 		break;
-	case CVMX_USB_COMPLETE_CANCEL:
+	case CVMX_USB_STATUS_CANCEL:
 		if (urb->status == 0)
 			urb->status = -ENOENT;
 		break;
-	case CVMX_USB_COMPLETE_STALL:
+	case CVMX_USB_STATUS_STALL:
 		dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
 			pipe, transaction, bytes_transferred);
 		urb->status = -EPIPE;
 		break;
-	case CVMX_USB_COMPLETE_BABBLEERR:
+	case CVMX_USB_STATUS_BABBLEERR:
 		dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
 			pipe, transaction, bytes_transferred);
 		urb->status = -EPIPE;
 		break;
-	case CVMX_USB_COMPLETE_SHORT:
+	case CVMX_USB_STATUS_SHORT:
 		dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
 			pipe, transaction, bytes_transferred);
 		urb->status = -EREMOTEIO;
 		break;
-	case CVMX_USB_COMPLETE_ERROR:
-	case CVMX_USB_COMPLETE_XACTERR:
-	case CVMX_USB_COMPLETE_DATATGLERR:
-	case CVMX_USB_COMPLETE_FRAMEERR:
+	case CVMX_USB_STATUS_ERROR:
+	case CVMX_USB_STATUS_XACTERR:
+	case CVMX_USB_STATUS_DATATGLERR:
+	case CVMX_USB_STATUS_FRAMEERR:
 		dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
 			status, pipe, transaction, bytes_transferred);
 		urb->status = -EPROTO;
 		break;
 	}
-	usb_hcd_unlink_urb_from_ep(octeon_to_hcd(priv), urb);
-	spin_unlock(&priv->lock);
-	usb_hcd_giveback_urb(octeon_to_hcd(priv), urb, urb->status);
-	spin_lock(&priv->lock);
+	usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
+	spin_unlock(&usb->lock);
+	usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
+	spin_lock(&usb->lock);
 }
 
 /**
@@ -2087,10 +2050,10 @@
  * @complete_code:
  *		 Completion code
  */
-static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
-				      struct cvmx_usb_pipe *pipe,
-				      struct cvmx_usb_transaction *transaction,
-				      enum cvmx_usb_complete complete_code)
+static void cvmx_usb_complete(struct octeon_hcd *usb,
+			      struct cvmx_usb_pipe *pipe,
+			      struct cvmx_usb_transaction *transaction,
+			      enum cvmx_usb_status complete_code)
 {
 	/* If this was a split then clear our split in progress marker */
 	if (usb->active_split == transaction)
@@ -2110,7 +2073,7 @@
 		 * next one
 		 */
 		if ((transaction->iso_number_packets > 1) &&
-			(complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+		    (complete_code == CVMX_USB_STATUS_OK)) {
 			/* No bytes transferred for this packet as of yet */
 			transaction->actual_bytes = 0;
 			/* One less ISO waiting to transfer */
@@ -2133,7 +2096,6 @@
 	kfree(transaction);
 }
 
-
 /**
  * Submit a usb transaction to a pipe. Called for all types
  * of transactions.
@@ -2157,12 +2119,12 @@
  * Returns: Transaction or NULL on failure.
  */
 static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
-				struct cvmx_usb_state *usb,
+				struct octeon_hcd *usb,
 				struct cvmx_usb_pipe *pipe,
 				enum cvmx_usb_transfer type,
-				uint64_t buffer,
+				u64 buffer,
 				int buffer_length,
-				uint64_t control_header,
+				u64 control_header,
 				int iso_start_frame,
 				int iso_number_packets,
 				struct cvmx_usb_iso_packet *iso_packets,
@@ -2208,7 +2170,6 @@
 	return transaction;
 }
 
-
 /**
  * Call to submit a USB Bulk transfer to a pipe.
  *
@@ -2219,7 +2180,7 @@
  * Returns: A submitted transaction or NULL on failure.
  */
 static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
-						struct cvmx_usb_state *usb,
+						struct octeon_hcd *usb,
 						struct cvmx_usb_pipe *pipe,
 						struct urb *urb)
 {
@@ -2233,7 +2194,6 @@
 					   urb);
 }
 
-
 /**
  * Call to submit a USB Interrupt transfer to a pipe.
  *
@@ -2244,7 +2204,7 @@
  * Returns: A submitted transaction or NULL on failure.
  */
 static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
-						struct cvmx_usb_state *usb,
+						struct octeon_hcd *usb,
 						struct cvmx_usb_pipe *pipe,
 						struct urb *urb)
 {
@@ -2259,7 +2219,6 @@
 					   urb);
 }
 
-
 /**
  * Call to submit a USB Control transfer to a pipe.
  *
@@ -2270,12 +2229,12 @@
  * Returns: A submitted transaction or NULL on failure.
  */
 static struct cvmx_usb_transaction *cvmx_usb_submit_control(
-						struct cvmx_usb_state *usb,
+						struct octeon_hcd *usb,
 						struct cvmx_usb_pipe *pipe,
 						struct urb *urb)
 {
 	int buffer_length = urb->transfer_buffer_length;
-	uint64_t control_header = urb->setup_dma;
+	u64 control_header = urb->setup_dma;
 	struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
 
 	if ((header->bRequestType & USB_DIR_IN) == 0)
@@ -2291,7 +2250,6 @@
 					   urb);
 }
 
-
 /**
  * Call to submit a USB Isochronous transfer to a pipe.
  *
@@ -2302,13 +2260,13 @@
  * Returns: A submitted transaction or NULL on failure.
  */
 static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
-						struct cvmx_usb_state *usb,
+						struct octeon_hcd *usb,
 						struct cvmx_usb_pipe *pipe,
 						struct urb *urb)
 {
 	struct cvmx_usb_iso_packet *packets;
 
-	packets = (struct cvmx_usb_iso_packet *) urb->setup_packet;
+	packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
 	return cvmx_usb_submit_transaction(usb, pipe,
 					   CVMX_USB_TRANSFER_ISOCHRONOUS,
 					   urb->transfer_dma,
@@ -2319,7 +2277,6 @@
 					   packets, urb);
 }
 
-
 /**
  * Cancel one outstanding request in a pipe. Canceling a request
  * can fail if the transaction has already completed before cancel
@@ -2333,7 +2290,7 @@
  *
  * Returns: 0 or a negative error code.
  */
-static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
+static int cvmx_usb_cancel(struct octeon_hcd *usb,
 			   struct cvmx_usb_pipe *pipe,
 			   struct cvmx_usb_transaction *transaction)
 {
@@ -2359,17 +2316,15 @@
 		if (usbc_hcchar.s.chena) {
 			usbc_hcchar.s.chdis = 1;
 			cvmx_usb_write_csr32(usb,
-					CVMX_USBCX_HCCHARX(pipe->channel,
-						usb->index),
-					usbc_hcchar.u32);
+					     CVMX_USBCX_HCCHARX(pipe->channel,
+								usb->index),
+					     usbc_hcchar.u32);
 		}
 	}
-	cvmx_usb_perform_complete(usb, pipe, transaction,
-				  CVMX_USB_COMPLETE_CANCEL);
+	cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
 	return 0;
 }
 
-
 /**
  * Cancel all outstanding requests in a pipe. Logically all this
  * does is call cvmx_usb_cancel() in a loop.
@@ -2379,7 +2334,7 @@
  *
  * Returns: 0 or a negative error code.
  */
-static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb,
+static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
 			       struct cvmx_usb_pipe *pipe)
 {
 	struct cvmx_usb_transaction *transaction, *next;
@@ -2394,7 +2349,6 @@
 	return 0;
 }
 
-
 /**
  * Close a pipe created with cvmx_usb_open_pipe().
  *
@@ -2404,7 +2358,7 @@
  * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
  *	    outstanding transfers.
  */
-static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb,
+static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
 			       struct cvmx_usb_pipe *pipe)
 {
 	/* Fail if the pipe has pending transactions */
@@ -2425,7 +2379,7 @@
  *
  * Returns: USB frame number
  */
-static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
+static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
 {
 	int frame_number;
 	union cvmx_usbcx_hfnum usbc_hfnum;
@@ -2436,6 +2390,197 @@
 	return frame_number;
 }
 
+static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
+				      struct cvmx_usb_pipe *pipe,
+				      struct cvmx_usb_transaction *transaction,
+				      union cvmx_usbcx_hccharx usbc_hcchar,
+				      int buffer_space_left,
+				      int bytes_in_last_packet)
+{
+	switch (transaction->stage) {
+	case CVMX_USB_STAGE_NON_CONTROL:
+	case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+		/* This should be impossible */
+		cvmx_usb_complete(usb, pipe, transaction,
+				  CVMX_USB_STATUS_ERROR);
+		break;
+	case CVMX_USB_STAGE_SETUP:
+		pipe->pid_toggle = 1;
+		if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+			transaction->stage =
+				CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+		} else {
+			struct usb_ctrlrequest *header =
+				cvmx_phys_to_ptr(transaction->control_header);
+			if (header->wLength)
+				transaction->stage = CVMX_USB_STAGE_DATA;
+			else
+				transaction->stage = CVMX_USB_STAGE_STATUS;
+		}
+		break;
+	case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+		{
+			struct usb_ctrlrequest *header =
+				cvmx_phys_to_ptr(transaction->control_header);
+			if (header->wLength)
+				transaction->stage = CVMX_USB_STAGE_DATA;
+			else
+				transaction->stage = CVMX_USB_STAGE_STATUS;
+		}
+		break;
+	case CVMX_USB_STAGE_DATA:
+		if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+			transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+			/*
+			 * For setup OUT data that are splits,
+			 * the hardware doesn't appear to count
+			 * transferred data. Here we manually
+			 * update the data transferred
+			 */
+			if (!usbc_hcchar.s.epdir) {
+				if (buffer_space_left < pipe->max_packet)
+					transaction->actual_bytes +=
+						buffer_space_left;
+				else
+					transaction->actual_bytes +=
+						pipe->max_packet;
+			}
+		} else if ((buffer_space_left == 0) ||
+			   (bytes_in_last_packet < pipe->max_packet)) {
+			pipe->pid_toggle = 1;
+			transaction->stage = CVMX_USB_STAGE_STATUS;
+		}
+		break;
+	case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+		if ((buffer_space_left == 0) ||
+		    (bytes_in_last_packet < pipe->max_packet)) {
+			pipe->pid_toggle = 1;
+			transaction->stage = CVMX_USB_STAGE_STATUS;
+		} else {
+			transaction->stage = CVMX_USB_STAGE_DATA;
+		}
+		break;
+	case CVMX_USB_STAGE_STATUS:
+		if (cvmx_usb_pipe_needs_split(usb, pipe))
+			transaction->stage =
+				CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+		else
+			cvmx_usb_complete(usb, pipe, transaction,
+					  CVMX_USB_STATUS_OK);
+		break;
+	case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+		cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+		break;
+	}
+}
+
+static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
+				   struct cvmx_usb_pipe *pipe,
+				   struct cvmx_usb_transaction *transaction,
+				   union cvmx_usbcx_hcintx usbc_hcint,
+				   int buffer_space_left,
+				   int bytes_in_last_packet)
+{
+	/*
+	 * The only time a bulk transfer isn't complete when it finishes with
+	 * an ACK is during a split transaction. For splits we need to continue
+	 * the transfer if more data is needed.
+	 */
+	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+		if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+			transaction->stage =
+				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+		else if (buffer_space_left &&
+			 (bytes_in_last_packet == pipe->max_packet))
+			transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+		else
+			cvmx_usb_complete(usb, pipe, transaction,
+					  CVMX_USB_STATUS_OK);
+	} else {
+		if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+		    (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+		    (usbc_hcint.s.nak))
+			pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
+		if (!buffer_space_left ||
+		    (bytes_in_last_packet < pipe->max_packet))
+			cvmx_usb_complete(usb, pipe, transaction,
+					  CVMX_USB_STATUS_OK);
+	}
+}
+
+static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
+				   struct cvmx_usb_pipe *pipe,
+				   struct cvmx_usb_transaction *transaction,
+				   int buffer_space_left,
+				   int bytes_in_last_packet)
+{
+	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+		if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
+			transaction->stage =
+				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+		} else if (buffer_space_left &&
+			   (bytes_in_last_packet == pipe->max_packet)) {
+			transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+		} else {
+			pipe->next_tx_frame += pipe->interval;
+			cvmx_usb_complete(usb, pipe, transaction,
+					  CVMX_USB_STATUS_OK);
+		}
+	} else if (!buffer_space_left ||
+		   (bytes_in_last_packet < pipe->max_packet)) {
+		pipe->next_tx_frame += pipe->interval;
+		cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+	}
+}
+
+static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
+				   struct cvmx_usb_pipe *pipe,
+				   struct cvmx_usb_transaction *transaction,
+				   int buffer_space_left,
+				   int bytes_in_last_packet,
+				   int bytes_this_transfer)
+{
+	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+		/*
+		 * ISOCHRONOUS OUT splits don't require a complete split stage.
+		 * Instead they use a sequence of begin OUT splits to transfer
+		 * the data 188 bytes at a time. Once the transfer is complete,
+		 * the pipe sleeps until the next schedule interval.
+		 */
+		if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+			/*
+			 * If no space left or this wasn't a max size packet
+			 * then this transfer is complete. Otherwise start it
+			 * again to send the next 188 bytes
+			 */
+			if (!buffer_space_left || (bytes_this_transfer < 188)) {
+				pipe->next_tx_frame += pipe->interval;
+				cvmx_usb_complete(usb, pipe, transaction,
+						  CVMX_USB_STATUS_OK);
+			}
+			return;
+		}
+		if (transaction->stage ==
+		    CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+			/*
+			 * We are in the incoming data phase. Keep getting data
+			 * until we run out of space or get a small packet
+			 */
+			if ((buffer_space_left == 0) ||
+			    (bytes_in_last_packet < pipe->max_packet)) {
+				pipe->next_tx_frame += pipe->interval;
+				cvmx_usb_complete(usb, pipe, transaction,
+						  CVMX_USB_STATUS_OK);
+			}
+		} else {
+			transaction->stage =
+				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+		}
+	} else {
+		pipe->next_tx_frame += pipe->interval;
+		cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+	}
+}
 
 /**
  * Poll a channel for status
@@ -2445,10 +2590,9 @@
  *
  * Returns: Zero on success
  */
-static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
+static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
 {
-	struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
-	struct usb_hcd *hcd = octeon_to_hcd(priv);
+	struct usb_hcd *hcd = octeon_to_hcd(usb);
 	struct device *dev = hcd->self.controller;
 	union cvmx_usbcx_hcintx usbc_hcint;
 	union cvmx_usbcx_hctsizx usbc_hctsiz;
@@ -2475,9 +2619,9 @@
 			 * write of HCCHARX without changing things
 			 */
 			cvmx_usb_write_csr32(usb,
-					CVMX_USBCX_HCCHARX(channel,
-							   usb->index),
-					usbc_hcchar.u32);
+					     CVMX_USBCX_HCCHARX(channel,
+								usb->index),
+					     usbc_hcchar.u32);
 			return 0;
 		}
 
@@ -2492,14 +2636,12 @@
 				hcintmsk.u32 = 0;
 				hcintmsk.s.chhltdmsk = 1;
 				cvmx_usb_write_csr32(usb,
-						CVMX_USBCX_HCINTMSKX(channel,
-							usb->index),
-						hcintmsk.u32);
+						     CVMX_USBCX_HCINTMSKX(channel, usb->index),
+						     hcintmsk.u32);
 				usbc_hcchar.s.chdis = 1;
 				cvmx_usb_write_csr32(usb,
-						CVMX_USBCX_HCCHARX(channel,
-							usb->index),
-						usbc_hcchar.u32);
+						     CVMX_USBCX_HCCHARX(channel, usb->index),
+						     usbc_hcchar.u32);
 				return 0;
 			} else if (usbc_hcint.s.xfercompl) {
 				/*
@@ -2523,7 +2665,7 @@
 
 	/* Disable the channel interrupts now that it is done */
 	cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
-	usb->idle_hardware_channels |= (1<<channel);
+	usb->idle_hardware_channels |= (1 << channel);
 
 	/* Make sure this channel is tied to a valid pipe */
 	pipe = usb->pipe_for_channel[channel];
@@ -2593,7 +2735,7 @@
 	 * transferred
 	 */
 	if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
-		(transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+	    (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
 		bytes_this_transfer = 0;
 
 	/*
@@ -2621,8 +2763,8 @@
 	 * will clear this flag
 	 */
 	if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
-		(pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
-		(pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+	    (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+	    (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
 		pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
 
 	if (unlikely(WARN_ON_ONCE(bytes_this_transfer < 0))) {
@@ -2631,8 +2773,8 @@
 		 * keeps substracting same byte count over and over again. In
 		 * such case we just need to fail every transaction.
 		 */
-		cvmx_usb_perform_complete(usb, pipe, transaction,
-					  CVMX_USB_COMPLETE_ERROR);
+		cvmx_usb_complete(usb, pipe, transaction,
+				  CVMX_USB_STATUS_ERROR);
 		return 0;
 	}
 
@@ -2644,24 +2786,24 @@
 		 * the actual bytes transferred
 		 */
 		pipe->pid_toggle = 0;
-		cvmx_usb_perform_complete(usb, pipe, transaction,
-					  CVMX_USB_COMPLETE_STALL);
+		cvmx_usb_complete(usb, pipe, transaction,
+				  CVMX_USB_STATUS_STALL);
 	} else if (usbc_hcint.s.xacterr) {
 		/*
 		 * XactErr as a response means the device signaled
 		 * something wrong with the transfer. For example, PID
 		 * toggle errors cause these.
 		 */
-		cvmx_usb_perform_complete(usb, pipe, transaction,
-					  CVMX_USB_COMPLETE_XACTERR);
+		cvmx_usb_complete(usb, pipe, transaction,
+				  CVMX_USB_STATUS_XACTERR);
 	} else if (usbc_hcint.s.bblerr) {
 		/* Babble Error (BblErr) */
-		cvmx_usb_perform_complete(usb, pipe, transaction,
-					  CVMX_USB_COMPLETE_BABBLEERR);
+		cvmx_usb_complete(usb, pipe, transaction,
+				  CVMX_USB_STATUS_BABBLEERR);
 	} else if (usbc_hcint.s.datatglerr) {
 		/* Data toggle error */
-		cvmx_usb_perform_complete(usb, pipe, transaction,
-					  CVMX_USB_COMPLETE_DATATGLERR);
+		cvmx_usb_complete(usb, pipe, transaction,
+				  CVMX_USB_STATUS_DATATGLERR);
 	} else if (usbc_hcint.s.nyet) {
 		/*
 		 * NYET as a response is only allowed in three cases: as a
@@ -2676,10 +2818,10 @@
 			 * again. Otherwise this transaction is complete
 			 */
 			if ((buffer_space_left == 0) ||
-				(bytes_in_last_packet < pipe->max_packet))
-				cvmx_usb_perform_complete(usb, pipe,
-						transaction,
-						CVMX_USB_COMPLETE_SUCCESS);
+			    (bytes_in_last_packet < pipe->max_packet))
+				cvmx_usb_complete(usb, pipe,
+						  transaction,
+						  CVMX_USB_STATUS_OK);
 		} else {
 			/*
 			 * Split transactions retry the split complete 4 times
@@ -2713,205 +2855,26 @@
 
 		switch (transaction->type) {
 		case CVMX_USB_TRANSFER_CONTROL:
-			switch (transaction->stage) {
-			case CVMX_USB_STAGE_NON_CONTROL:
-			case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
-				/* This should be impossible */
-				cvmx_usb_perform_complete(usb, pipe,
-					transaction, CVMX_USB_COMPLETE_ERROR);
-				break;
-			case CVMX_USB_STAGE_SETUP:
-				pipe->pid_toggle = 1;
-				if (cvmx_usb_pipe_needs_split(usb, pipe))
-					transaction->stage =
-						CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
-				else {
-					struct usb_ctrlrequest *header =
-						cvmx_phys_to_ptr(transaction->control_header);
-					if (header->wLength)
-						transaction->stage =
-							CVMX_USB_STAGE_DATA;
-					else
-						transaction->stage =
-							CVMX_USB_STAGE_STATUS;
-				}
-				break;
-			case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
-				{
-					struct usb_ctrlrequest *header =
-						cvmx_phys_to_ptr(transaction->control_header);
-					if (header->wLength)
-						transaction->stage =
-							CVMX_USB_STAGE_DATA;
-					else
-						transaction->stage =
-							CVMX_USB_STAGE_STATUS;
-				}
-				break;
-			case CVMX_USB_STAGE_DATA:
-				if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-					transaction->stage =
-						CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
-					/*
-					 * For setup OUT data that are splits,
-					 * the hardware doesn't appear to count
-					 * transferred data. Here we manually
-					 * update the data transferred
-					 */
-					if (!usbc_hcchar.s.epdir) {
-						if (buffer_space_left < pipe->max_packet)
-							transaction->actual_bytes +=
-								buffer_space_left;
-						else
-							transaction->actual_bytes +=
-								pipe->max_packet;
-					}
-				} else if ((buffer_space_left == 0) ||
-						(bytes_in_last_packet <
-						 pipe->max_packet)) {
-					pipe->pid_toggle = 1;
-					transaction->stage =
-						CVMX_USB_STAGE_STATUS;
-				}
-				break;
-			case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
-				if ((buffer_space_left == 0) ||
-						(bytes_in_last_packet <
-						 pipe->max_packet)) {
-					pipe->pid_toggle = 1;
-					transaction->stage =
-						CVMX_USB_STAGE_STATUS;
-				} else {
-					transaction->stage =
-						CVMX_USB_STAGE_DATA;
-				}
-				break;
-			case CVMX_USB_STAGE_STATUS:
-				if (cvmx_usb_pipe_needs_split(usb, pipe))
-					transaction->stage =
-						CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
-				else
-					cvmx_usb_perform_complete(usb, pipe,
-						transaction,
-						CVMX_USB_COMPLETE_SUCCESS);
-				break;
-			case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
-				cvmx_usb_perform_complete(usb, pipe,
-						transaction,
-						CVMX_USB_COMPLETE_SUCCESS);
-				break;
-			}
+			cvmx_usb_transfer_control(usb, pipe, transaction,
+						  usbc_hcchar,
+						  buffer_space_left,
+						  bytes_in_last_packet);
 			break;
 		case CVMX_USB_TRANSFER_BULK:
+			cvmx_usb_transfer_bulk(usb, pipe, transaction,
+					       usbc_hcint, buffer_space_left,
+					       bytes_in_last_packet);
+			break;
 		case CVMX_USB_TRANSFER_INTERRUPT:
-			/*
-			 * The only time a bulk transfer isn't complete when it
-			 * finishes with an ACK is during a split transaction.
-			 * For splits we need to continue the transfer if more
-			 * data is needed
-			 */
-			if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-				if (transaction->stage ==
-						CVMX_USB_STAGE_NON_CONTROL)
-					transaction->stage =
-						CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
-				else {
-					if (buffer_space_left &&
-						(bytes_in_last_packet ==
-						 pipe->max_packet))
-						transaction->stage =
-							CVMX_USB_STAGE_NON_CONTROL;
-					else {
-						if (transaction->type ==
-							CVMX_USB_TRANSFER_INTERRUPT)
-							pipe->next_tx_frame +=
-								pipe->interval;
-							cvmx_usb_perform_complete(
-								usb,
-								pipe,
-								transaction,
-								CVMX_USB_COMPLETE_SUCCESS);
-					}
-				}
-			} else {
-				if ((pipe->device_speed ==
-					CVMX_USB_SPEED_HIGH) &&
-				    (pipe->transfer_type ==
-				     CVMX_USB_TRANSFER_BULK) &&
-				    (pipe->transfer_dir ==
-				     CVMX_USB_DIRECTION_OUT) &&
-				    (usbc_hcint.s.nak))
-					pipe->flags |=
-						CVMX_USB_PIPE_FLAGS_NEED_PING;
-				if (!buffer_space_left ||
-					(bytes_in_last_packet <
-					 pipe->max_packet)) {
-					if (transaction->type ==
-						CVMX_USB_TRANSFER_INTERRUPT)
-						pipe->next_tx_frame +=
-							pipe->interval;
-					cvmx_usb_perform_complete(usb, pipe,
-						transaction,
-						CVMX_USB_COMPLETE_SUCCESS);
-				}
-			}
+			cvmx_usb_transfer_intr(usb, pipe, transaction,
+					       buffer_space_left,
+					       bytes_in_last_packet);
 			break;
 		case CVMX_USB_TRANSFER_ISOCHRONOUS:
-			if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-				/*
-				 * ISOCHRONOUS OUT splits don't require a
-				 * complete split stage. Instead they use a
-				 * sequence of begin OUT splits to transfer the
-				 * data 188 bytes at a time. Once the transfer
-				 * is complete, the pipe sleeps until the next
-				 * schedule interval
-				 */
-				if (pipe->transfer_dir ==
-					CVMX_USB_DIRECTION_OUT) {
-					/*
-					 * If no space left or this wasn't a max
-					 * size packet then this transfer is
-					 * complete. Otherwise start it again to
-					 * send the next 188 bytes
-					 */
-					if (!buffer_space_left ||
-						(bytes_this_transfer < 188)) {
-						pipe->next_tx_frame +=
-							pipe->interval;
-						cvmx_usb_perform_complete(usb,
-							pipe, transaction,
-							CVMX_USB_COMPLETE_SUCCESS);
-					}
-				} else {
-					if (transaction->stage ==
-						CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
-						/*
-						 * We are in the incoming data
-						 * phase. Keep getting data
-						 * until we run out of space or
-						 * get a small packet
-						 */
-						if ((buffer_space_left == 0) ||
-							(bytes_in_last_packet <
-							 pipe->max_packet)) {
-							pipe->next_tx_frame +=
-								pipe->interval;
-							cvmx_usb_perform_complete(
-								usb,
-								pipe,
-								transaction,
-								CVMX_USB_COMPLETE_SUCCESS);
-						}
-					} else
-						transaction->stage =
-							CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
-				}
-			} else {
-				pipe->next_tx_frame += pipe->interval;
-				cvmx_usb_perform_complete(usb, pipe,
-						transaction,
-						CVMX_USB_COMPLETE_SUCCESS);
-			}
+			cvmx_usb_transfer_isoc(usb, pipe, transaction,
+					       buffer_space_left,
+					       bytes_in_last_packet,
+					       bytes_this_transfer);
 			break;
 		}
 	} else if (usbc_hcint.s.nak) {
@@ -2946,20 +2909,18 @@
 			 * We get channel halted interrupts with no result bits
 			 * sets when the cable is unplugged
 			 */
-			cvmx_usb_perform_complete(usb, pipe, transaction,
-					CVMX_USB_COMPLETE_ERROR);
+			cvmx_usb_complete(usb, pipe, transaction,
+					  CVMX_USB_STATUS_ERROR);
 		}
 	}
 	return 0;
 }
 
-static void octeon_usb_port_callback(struct cvmx_usb_state *usb)
+static void octeon_usb_port_callback(struct octeon_hcd *usb)
 {
-	struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
-
-	spin_unlock(&priv->lock);
-	usb_hcd_poll_rh_status(octeon_to_hcd(priv));
-	spin_lock(&priv->lock);
+	spin_unlock(&usb->lock);
+	usb_hcd_poll_rh_status(octeon_to_hcd(usb));
+	spin_lock(&usb->lock);
 }
 
 /**
@@ -2972,7 +2933,7 @@
  *
  * Returns: 0 or a negative error code.
  */
-static int cvmx_usb_poll(struct cvmx_usb_state *usb)
+static int cvmx_usb_poll(struct octeon_hcd *usb)
 {
 	union cvmx_usbcx_hfnum usbc_hfnum;
 	union cvmx_usbcx_gintsts usbc_gintsts;
@@ -2981,7 +2942,7 @@
 
 	/* Update the frame counter */
 	usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
-	if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
+	if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
 		usb->frame_number += 0x4000;
 	usb->frame_number &= ~0x3fffull;
 	usb->frame_number |= usbc_hfnum.s.frnum;
@@ -3028,8 +2989,8 @@
 		 */
 		octeon_usb_port_callback(usb);
 		/* Clear the port change bits */
-		usbc_hprt.u32 = cvmx_usb_read_csr32(usb,
-					CVMX_USBCX_HPRT(usb->index));
+		usbc_hprt.u32 =
+			cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
 		usbc_hprt.s.prtena = 0;
 		cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
 				     usbc_hprt.u32);
@@ -3056,7 +3017,7 @@
 
 			channel = __fls(usbc_haint.u32);
 			cvmx_usb_poll_channel(usb, channel);
-			usbc_haint.u32 ^= 1<<channel;
+			usbc_haint.u32 ^= 1 << channel;
 		}
 	}
 
@@ -3073,12 +3034,12 @@
 
 static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
 {
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	cvmx_usb_poll(&priv->usb);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&usb->lock, flags);
+	cvmx_usb_poll(usb);
+	spin_unlock_irqrestore(&usb->lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -3095,16 +3056,16 @@
 
 static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
 {
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 
-	return cvmx_usb_get_frame_number(&priv->usb);
+	return cvmx_usb_get_frame_number(usb);
 }
 
 static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
 				  struct urb *urb,
 				  gfp_t mem_flags)
 {
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 	struct device *dev = hcd->self.controller;
 	struct cvmx_usb_transaction *transaction = NULL;
 	struct cvmx_usb_pipe *pipe;
@@ -3114,11 +3075,11 @@
 	int rc;
 
 	urb->status = 0;
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&usb->lock, flags);
 
 	rc = usb_hcd_link_urb_to_ep(hcd, urb);
 	if (rc) {
-		spin_unlock_irqrestore(&priv->lock, flags);
+		spin_unlock_irqrestore(&usb->lock, flags);
 		return rc;
 	}
 
@@ -3184,7 +3145,7 @@
 				dev = dev->parent;
 			}
 		}
-		pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe),
+		pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
 					  usb_pipeendpoint(urb->pipe), speed,
 					  le16_to_cpu(ep->desc.wMaxPacketSize)
 					  & 0x7ff,
@@ -3198,7 +3159,7 @@
 					  split_device, split_port);
 		if (!pipe) {
 			usb_hcd_unlink_urb_from_ep(hcd, urb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			dev_dbg(dev, "Failed to create pipe\n");
 			return -ENOMEM;
 		}
@@ -3227,8 +3188,7 @@
 					urb->iso_frame_desc[i].offset;
 				iso_packet[i].length =
 					urb->iso_frame_desc[i].length;
-				iso_packet[i].status =
-					CVMX_USB_COMPLETE_ERROR;
+				iso_packet[i].status = CVMX_USB_STATUS_ERROR;
 			}
 			/*
 			 * Store a pointer to the list in the URB setup_packet
@@ -3236,7 +3196,7 @@
 			 * this saves us a bunch of logic.
 			 */
 			urb->setup_packet = (char *)iso_packet;
-			transaction = cvmx_usb_submit_isochronous(&priv->usb,
+			transaction = cvmx_usb_submit_isochronous(usb,
 								  pipe, urb);
 			/*
 			 * If submit failed we need to free our private packet
@@ -3252,29 +3212,29 @@
 		dev_dbg(dev, "Submit interrupt to %d.%d\n",
 			usb_pipedevice(urb->pipe),
 			usb_pipeendpoint(urb->pipe));
-		transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb);
+		transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
 		break;
 	case PIPE_CONTROL:
 		dev_dbg(dev, "Submit control to %d.%d\n",
 			usb_pipedevice(urb->pipe),
 			usb_pipeendpoint(urb->pipe));
-		transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb);
+		transaction = cvmx_usb_submit_control(usb, pipe, urb);
 		break;
 	case PIPE_BULK:
 		dev_dbg(dev, "Submit bulk to %d.%d\n",
 			usb_pipedevice(urb->pipe),
 			usb_pipeendpoint(urb->pipe));
-		transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb);
+		transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
 		break;
 	}
 	if (!transaction) {
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
-		spin_unlock_irqrestore(&priv->lock, flags);
+		spin_unlock_irqrestore(&usb->lock, flags);
 		dev_dbg(dev, "Failed to submit\n");
 		return -ENOMEM;
 	}
 	urb->hcpriv = transaction;
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock_irqrestore(&usb->lock, flags);
 	return 0;
 }
 
@@ -3282,24 +3242,24 @@
 				  struct urb *urb,
 				  int status)
 {
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 	unsigned long flags;
 	int rc;
 
 	if (!urb->dev)
 		return -EINVAL;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&usb->lock, flags);
 
 	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
 	if (rc)
 		goto out;
 
 	urb->status = status;
-	cvmx_usb_cancel(&priv->usb, urb->ep->hcpriv, urb->hcpriv);
+	cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
 
 out:
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock_irqrestore(&usb->lock, flags);
 
 	return rc;
 }
@@ -3310,28 +3270,28 @@
 	struct device *dev = hcd->self.controller;
 
 	if (ep->hcpriv) {
-		struct octeon_hcd *priv = hcd_to_octeon(hcd);
+		struct octeon_hcd *usb = hcd_to_octeon(hcd);
 		struct cvmx_usb_pipe *pipe = ep->hcpriv;
 		unsigned long flags;
 
-		spin_lock_irqsave(&priv->lock, flags);
-		cvmx_usb_cancel_all(&priv->usb, pipe);
-		if (cvmx_usb_close_pipe(&priv->usb, pipe))
+		spin_lock_irqsave(&usb->lock, flags);
+		cvmx_usb_cancel_all(usb, pipe);
+		if (cvmx_usb_close_pipe(usb, pipe))
 			dev_dbg(dev, "Closing pipe %p failed\n", pipe);
-		spin_unlock_irqrestore(&priv->lock, flags);
+		spin_unlock_irqrestore(&usb->lock, flags);
 		ep->hcpriv = NULL;
 	}
 }
 
 static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
 {
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 	struct cvmx_usb_port_status port_status;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	port_status = cvmx_usb_get_status(&priv->usb);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&usb->lock, flags);
+	port_status = cvmx_usb_get_status(usb);
+	spin_unlock_irqrestore(&usb->lock, flags);
 	buf[0] = 0;
 	buf[0] = port_status.connect_change << 1;
 
@@ -3339,12 +3299,11 @@
 }
 
 static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
-				u16 wIndex, char *buf, u16 wLength)
+				  u16 wIndex, char *buf, u16 wLength)
 {
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 	struct device *dev = hcd->self.controller;
 	struct cvmx_usb_port_status usb_port_status;
-	struct cvmx_usb_state *usb = &priv->usb;
 	int port_status;
 	struct usb_hub_descriptor *desc;
 	unsigned long flags;
@@ -3371,9 +3330,9 @@
 		switch (wValue) {
 		case USB_PORT_FEAT_ENABLE:
 			dev_dbg(dev, " ENABLE\n");
-			spin_lock_irqsave(&priv->lock, flags);
-			cvmx_usb_disable(&priv->usb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
+			cvmx_usb_disable(usb);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			break;
 		case USB_PORT_FEAT_SUSPEND:
 			dev_dbg(dev, " SUSPEND\n");
@@ -3390,20 +3349,18 @@
 		case USB_PORT_FEAT_C_CONNECTION:
 			dev_dbg(dev, " C_CONNECTION\n");
 			/* Clears drivers internal connect status change flag */
-			spin_lock_irqsave(&priv->lock, flags);
-			priv->usb.port_status =
-				cvmx_usb_get_status(&priv->usb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
+			usb->port_status = cvmx_usb_get_status(usb);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			break;
 		case USB_PORT_FEAT_C_RESET:
 			dev_dbg(dev, " C_RESET\n");
 			/*
 			 * Clears the driver's internal Port Reset Change flag.
 			 */
-			spin_lock_irqsave(&priv->lock, flags);
-			priv->usb.port_status =
-				cvmx_usb_get_status(&priv->usb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
+			usb->port_status = cvmx_usb_get_status(usb);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			break;
 		case USB_PORT_FEAT_C_ENABLE:
 			dev_dbg(dev, " C_ENABLE\n");
@@ -3411,10 +3368,9 @@
 			 * Clears the driver's internal Port Enable/Disable
 			 * Change flag.
 			 */
-			spin_lock_irqsave(&priv->lock, flags);
-			priv->usb.port_status =
-				cvmx_usb_get_status(&priv->usb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
+			usb->port_status = cvmx_usb_get_status(usb);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			break;
 		case USB_PORT_FEAT_C_SUSPEND:
 			dev_dbg(dev, " C_SUSPEND\n");
@@ -3427,10 +3383,9 @@
 		case USB_PORT_FEAT_C_OVER_CURRENT:
 			dev_dbg(dev, " C_OVER_CURRENT\n");
 			/* Clears the driver's overcurrent Change flag */
-			spin_lock_irqsave(&priv->lock, flags);
-			priv->usb.port_status =
-				cvmx_usb_get_status(&priv->usb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
+			usb->port_status = cvmx_usb_get_status(usb);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			break;
 		default:
 			dev_dbg(dev, " UNKNOWN\n");
@@ -3451,7 +3406,7 @@
 		break;
 	case GetHubStatus:
 		dev_dbg(dev, "GetHubStatus\n");
-		*(__le32 *) buf = 0;
+		*(__le32 *)buf = 0;
 		break;
 	case GetPortStatus:
 		dev_dbg(dev, "GetPortStatus\n");
@@ -3460,9 +3415,9 @@
 			return -EINVAL;
 		}
 
-		spin_lock_irqsave(&priv->lock, flags);
-		usb_port_status = cvmx_usb_get_status(&priv->usb);
-		spin_unlock_irqrestore(&priv->lock, flags);
+		spin_lock_irqsave(&usb->lock, flags);
+		usb_port_status = cvmx_usb_get_status(usb);
+		spin_unlock_irqrestore(&usb->lock, flags);
 		port_status = 0;
 
 		if (usb_port_status.connect_change) {
@@ -3503,7 +3458,7 @@
 			dev_dbg(dev, " LOWSPEED\n");
 		}
 
-		*((__le32 *) buf) = cpu_to_le32(port_status);
+		*((__le32 *)buf) = cpu_to_le32(port_status);
 		break;
 	case SetHubFeature:
 		dev_dbg(dev, "SetHubFeature\n");
@@ -3525,16 +3480,16 @@
 			/*
 			 * Program the port power bit to drive VBUS on the USB.
 			 */
-			spin_lock_irqsave(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
 			USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
 					cvmx_usbcx_hprt, prtpwr, 1);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			return 0;
 		case USB_PORT_FEAT_RESET:
 			dev_dbg(dev, " RESET\n");
-			spin_lock_irqsave(&priv->lock, flags);
-			cvmx_usb_reset_port(&priv->usb);
-			spin_unlock_irqrestore(&priv->lock, flags);
+			spin_lock_irqsave(&usb->lock, flags);
+			cvmx_usb_reset_port(usb);
+			spin_unlock_irqrestore(&usb->lock, flags);
 			return 0;
 		case USB_PORT_FEAT_INDICATOR:
 			dev_dbg(dev, " INDICATOR\n");
@@ -3579,23 +3534,26 @@
 	struct device_node *usbn_node;
 	int irq = platform_get_irq(pdev, 0);
 	struct device *dev = &pdev->dev;
-	struct octeon_hcd *priv;
+	struct octeon_hcd *usb;
 	struct usb_hcd *hcd;
 	u32 clock_rate = 48000000;
 	bool is_crystal_clock = false;
 	const char *clock_type;
 	int i;
 
-	if (dev->of_node == NULL) {
+	if (!dev->of_node) {
 		dev_err(dev, "Error: empty of_node\n");
 		return -ENXIO;
 	}
 	usbn_node = dev->of_node->parent;
 
 	i = of_property_read_u32(usbn_node,
-				 "refclk-frequency", &clock_rate);
+				 "clock-frequency", &clock_rate);
+	if (i)
+		i = of_property_read_u32(usbn_node,
+					 "refclk-frequency", &clock_rate);
 	if (i) {
-		dev_err(dev, "No USBN \"refclk-frequency\"\n");
+		dev_err(dev, "No USBN \"clock-frequency\"\n");
 		return -ENXIO;
 	}
 	switch (clock_rate) {
@@ -3609,14 +3567,16 @@
 		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
 		break;
 	default:
-		dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n",
-				clock_rate);
+		dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
+			clock_rate);
 		return -ENXIO;
-
 	}
 
 	i = of_property_read_string(usbn_node,
-				    "refclk-type", &clock_type);
+				    "cavium,refclk-type", &clock_type);
+	if (i)
+		i = of_property_read_string(usbn_node,
+					    "refclk-type", &clock_type);
 
 	if (!i && strcmp("crystal", clock_type) == 0)
 		is_crystal_clock = true;
@@ -3627,7 +3587,7 @@
 		initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
 
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res_mem == NULL) {
+	if (!res_mem) {
 		dev_err(dev, "found no memory resource\n");
 		return -ENXIO;
 	}
@@ -3673,31 +3633,31 @@
 		return -1;
 	}
 	hcd->uses_new_polling = 1;
-	priv = (struct octeon_hcd *)hcd->hcd_priv;
+	usb = (struct octeon_hcd *)hcd->hcd_priv;
 
-	spin_lock_init(&priv->lock);
+	spin_lock_init(&usb->lock);
 
-	priv->usb.init_flags = initialize_flags;
+	usb->init_flags = initialize_flags;
 
 	/* Initialize the USB state structure */
-	priv->usb.index = usb_num;
-	INIT_LIST_HEAD(&priv->usb.idle_pipes);
-	for (i = 0; i < ARRAY_SIZE(priv->usb.active_pipes); i++)
-		INIT_LIST_HEAD(&priv->usb.active_pipes[i]);
+	usb->index = usb_num;
+	INIT_LIST_HEAD(&usb->idle_pipes);
+	for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
+		INIT_LIST_HEAD(&usb->active_pipes[i]);
 
 	/* Due to an errata, CN31XX doesn't support DMA */
 	if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
-		priv->usb.init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+		usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
 		/* Only use one channel with non DMA */
-		priv->usb.idle_hardware_channels = 0x1;
+		usb->idle_hardware_channels = 0x1;
 	} else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
 		/* CN5XXX have an errata with channel 3 */
-		priv->usb.idle_hardware_channels = 0xf7;
+		usb->idle_hardware_channels = 0xf7;
 	} else {
-		priv->usb.idle_hardware_channels = 0xff;
+		usb->idle_hardware_channels = 0xff;
 	}
 
-	status = cvmx_usb_initialize(dev, &priv->usb);
+	status = cvmx_usb_initialize(dev, usb);
 	if (status) {
 		dev_dbg(dev, "USB initialization failed with %d\n", status);
 		kfree(hcd);
@@ -3722,13 +3682,13 @@
 	int status;
 	struct device *dev = &pdev->dev;
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	struct octeon_hcd *usb = hcd_to_octeon(hcd);
 	unsigned long flags;
 
 	usb_remove_hcd(hcd);
-	spin_lock_irqsave(&priv->lock, flags);
-	status = cvmx_usb_shutdown(&priv->usb);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&usb->lock, flags);
+	status = cvmx_usb_shutdown(usb);
+	spin_unlock_irqrestore(&usb->lock, flags);
 	if (status)
 		dev_dbg(dev, "USB shutdown failed with %d\n", status);
 
@@ -3747,7 +3707,7 @@
 
 static struct platform_driver octeon_usb_driver = {
 	.driver = {
-		.name       = "OcteonUSB",
+		.name		= "octeon-hcd",
 		.of_match_table = octeon_usb_match,
 	},
 	.probe      = octeon_usb_probe,
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
index 70e7fa5..3353aefe 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -110,7 +110,7 @@
  * initialization. Do not change this register after the initial programming.
  */
 union cvmx_usbcx_gahbcfg {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_gahbcfg_s
 	 * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
@@ -145,13 +145,13 @@
 	 *	* 1'b1: Unmask the interrupt assertion to the application.
 	 */
 	struct cvmx_usbcx_gahbcfg_s {
-		__BITFIELD_FIELD(uint32_t reserved_9_31	: 23,
-		__BITFIELD_FIELD(uint32_t ptxfemplvl	: 1,
-		__BITFIELD_FIELD(uint32_t nptxfemplvl	: 1,
-		__BITFIELD_FIELD(uint32_t reserved_6_6	: 1,
-		__BITFIELD_FIELD(uint32_t dmaen		: 1,
-		__BITFIELD_FIELD(uint32_t hbstlen	: 4,
-		__BITFIELD_FIELD(uint32_t glblintrmsk	: 1,
+		__BITFIELD_FIELD(u32 reserved_9_31	: 23,
+		__BITFIELD_FIELD(u32 ptxfemplvl		: 1,
+		__BITFIELD_FIELD(u32 nptxfemplvl	: 1,
+		__BITFIELD_FIELD(u32 reserved_6_6	: 1,
+		__BITFIELD_FIELD(u32 dmaen		: 1,
+		__BITFIELD_FIELD(u32 hbstlen		: 4,
+		__BITFIELD_FIELD(u32 glblintrmsk	: 1,
 		;)))))))
 	} s;
 };
@@ -164,7 +164,7 @@
  * This register contains the configuration options of the O2P USB core.
  */
 union cvmx_usbcx_ghwcfg3 {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_ghwcfg3_s
 	 * @dfifodepth: DFIFO Depth (DfifoDepth)
@@ -212,16 +212,16 @@
 	 *	* Others: Reserved
 	 */
 	struct cvmx_usbcx_ghwcfg3_s {
-		__BITFIELD_FIELD(uint32_t dfifodepth			: 16,
-		__BITFIELD_FIELD(uint32_t reserved_13_15		: 3,
-		__BITFIELD_FIELD(uint32_t ahbphysync			: 1,
-		__BITFIELD_FIELD(uint32_t rsttype			: 1,
-		__BITFIELD_FIELD(uint32_t optfeature			: 1,
-		__BITFIELD_FIELD(uint32_t vendor_control_interface_support : 1,
-		__BITFIELD_FIELD(uint32_t i2c_selection			: 1,
-		__BITFIELD_FIELD(uint32_t otgen				: 1,
-		__BITFIELD_FIELD(uint32_t pktsizewidth			: 3,
-		__BITFIELD_FIELD(uint32_t xfersizewidth			: 4,
+		__BITFIELD_FIELD(u32 dfifodepth				: 16,
+		__BITFIELD_FIELD(u32 reserved_13_15			: 3,
+		__BITFIELD_FIELD(u32 ahbphysync				: 1,
+		__BITFIELD_FIELD(u32 rsttype				: 1,
+		__BITFIELD_FIELD(u32 optfeature				: 1,
+		__BITFIELD_FIELD(u32 vendor_control_interface_support	: 1,
+		__BITFIELD_FIELD(u32 i2c_selection			: 1,
+		__BITFIELD_FIELD(u32 otgen				: 1,
+		__BITFIELD_FIELD(u32 pktsizewidth			: 3,
+		__BITFIELD_FIELD(u32 xfersizewidth			: 4,
 		;))))))))))
 	} s;
 };
@@ -238,7 +238,7 @@
  * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
  */
 union cvmx_usbcx_gintmsk {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_gintmsk_s
 	 * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
@@ -279,38 +279,38 @@
 	 * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
 	 */
 	struct cvmx_usbcx_gintmsk_s {
-		__BITFIELD_FIELD(uint32_t wkupintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t sessreqintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t disconnintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t conidstschngmsk	: 1,
-		__BITFIELD_FIELD(uint32_t reserved_27_27	: 1,
-		__BITFIELD_FIELD(uint32_t ptxfempmsk		: 1,
-		__BITFIELD_FIELD(uint32_t hchintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t prtintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_23_23	: 1,
-		__BITFIELD_FIELD(uint32_t fetsuspmsk		: 1,
-		__BITFIELD_FIELD(uint32_t incomplpmsk		: 1,
-		__BITFIELD_FIELD(uint32_t incompisoinmsk	: 1,
-		__BITFIELD_FIELD(uint32_t oepintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t inepintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t epmismsk		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_16_16	: 1,
-		__BITFIELD_FIELD(uint32_t eopfmsk		: 1,
-		__BITFIELD_FIELD(uint32_t isooutdropmsk		: 1,
-		__BITFIELD_FIELD(uint32_t enumdonemsk		: 1,
-		__BITFIELD_FIELD(uint32_t usbrstmsk		: 1,
-		__BITFIELD_FIELD(uint32_t usbsuspmsk		: 1,
-		__BITFIELD_FIELD(uint32_t erlysuspmsk		: 1,
-		__BITFIELD_FIELD(uint32_t i2cint		: 1,
-		__BITFIELD_FIELD(uint32_t ulpickintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t goutnakeffmsk		: 1,
-		__BITFIELD_FIELD(uint32_t ginnakeffmsk		: 1,
-		__BITFIELD_FIELD(uint32_t nptxfempmsk		: 1,
-		__BITFIELD_FIELD(uint32_t rxflvlmsk		: 1,
-		__BITFIELD_FIELD(uint32_t sofmsk		: 1,
-		__BITFIELD_FIELD(uint32_t otgintmsk		: 1,
-		__BITFIELD_FIELD(uint32_t modemismsk		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_0_0		: 1,
+		__BITFIELD_FIELD(u32 wkupintmsk		: 1,
+		__BITFIELD_FIELD(u32 sessreqintmsk	: 1,
+		__BITFIELD_FIELD(u32 disconnintmsk	: 1,
+		__BITFIELD_FIELD(u32 conidstschngmsk	: 1,
+		__BITFIELD_FIELD(u32 reserved_27_27	: 1,
+		__BITFIELD_FIELD(u32 ptxfempmsk		: 1,
+		__BITFIELD_FIELD(u32 hchintmsk		: 1,
+		__BITFIELD_FIELD(u32 prtintmsk		: 1,
+		__BITFIELD_FIELD(u32 reserved_23_23	: 1,
+		__BITFIELD_FIELD(u32 fetsuspmsk		: 1,
+		__BITFIELD_FIELD(u32 incomplpmsk	: 1,
+		__BITFIELD_FIELD(u32 incompisoinmsk	: 1,
+		__BITFIELD_FIELD(u32 oepintmsk		: 1,
+		__BITFIELD_FIELD(u32 inepintmsk		: 1,
+		__BITFIELD_FIELD(u32 epmismsk		: 1,
+		__BITFIELD_FIELD(u32 reserved_16_16	: 1,
+		__BITFIELD_FIELD(u32 eopfmsk		: 1,
+		__BITFIELD_FIELD(u32 isooutdropmsk	: 1,
+		__BITFIELD_FIELD(u32 enumdonemsk	: 1,
+		__BITFIELD_FIELD(u32 usbrstmsk		: 1,
+		__BITFIELD_FIELD(u32 usbsuspmsk		: 1,
+		__BITFIELD_FIELD(u32 erlysuspmsk	: 1,
+		__BITFIELD_FIELD(u32 i2cint		: 1,
+		__BITFIELD_FIELD(u32 ulpickintmsk	: 1,
+		__BITFIELD_FIELD(u32 goutnakeffmsk	: 1,
+		__BITFIELD_FIELD(u32 ginnakeffmsk	: 1,
+		__BITFIELD_FIELD(u32 nptxfempmsk	: 1,
+		__BITFIELD_FIELD(u32 rxflvlmsk		: 1,
+		__BITFIELD_FIELD(u32 sofmsk		: 1,
+		__BITFIELD_FIELD(u32 otgintmsk		: 1,
+		__BITFIELD_FIELD(u32 modemismsk		: 1,
+		__BITFIELD_FIELD(u32 reserved_0_0	: 1,
 		;))))))))))))))))))))))))))))))))
 	} s;
 };
@@ -331,7 +331,7 @@
  * automatically.
  */
 union cvmx_usbcx_gintsts {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_gintsts_s
 	 * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
@@ -509,38 +509,38 @@
 	 *	* 1'b1: Host mode
 	 */
 	struct cvmx_usbcx_gintsts_s {
-		__BITFIELD_FIELD(uint32_t wkupint		: 1,
-		__BITFIELD_FIELD(uint32_t sessreqint		: 1,
-		__BITFIELD_FIELD(uint32_t disconnint		: 1,
-		__BITFIELD_FIELD(uint32_t conidstschng		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_27_27	: 1,
-		__BITFIELD_FIELD(uint32_t ptxfemp		: 1,
-		__BITFIELD_FIELD(uint32_t hchint		: 1,
-		__BITFIELD_FIELD(uint32_t prtint		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_23_23	: 1,
-		__BITFIELD_FIELD(uint32_t fetsusp		: 1,
-		__BITFIELD_FIELD(uint32_t incomplp		: 1,
-		__BITFIELD_FIELD(uint32_t incompisoin		: 1,
-		__BITFIELD_FIELD(uint32_t oepint		: 1,
-		__BITFIELD_FIELD(uint32_t iepint		: 1,
-		__BITFIELD_FIELD(uint32_t epmis			: 1,
-		__BITFIELD_FIELD(uint32_t reserved_16_16	: 1,
-		__BITFIELD_FIELD(uint32_t eopf			: 1,
-		__BITFIELD_FIELD(uint32_t isooutdrop		: 1,
-		__BITFIELD_FIELD(uint32_t enumdone		: 1,
-		__BITFIELD_FIELD(uint32_t usbrst		: 1,
-		__BITFIELD_FIELD(uint32_t usbsusp		: 1,
-		__BITFIELD_FIELD(uint32_t erlysusp		: 1,
-		__BITFIELD_FIELD(uint32_t i2cint		: 1,
-		__BITFIELD_FIELD(uint32_t ulpickint		: 1,
-		__BITFIELD_FIELD(uint32_t goutnakeff		: 1,
-		__BITFIELD_FIELD(uint32_t ginnakeff		: 1,
-		__BITFIELD_FIELD(uint32_t nptxfemp		: 1,
-		__BITFIELD_FIELD(uint32_t rxflvl		: 1,
-		__BITFIELD_FIELD(uint32_t sof			: 1,
-		__BITFIELD_FIELD(uint32_t otgint		: 1,
-		__BITFIELD_FIELD(uint32_t modemis		: 1,
-		__BITFIELD_FIELD(uint32_t curmod		: 1,
+		__BITFIELD_FIELD(u32 wkupint		: 1,
+		__BITFIELD_FIELD(u32 sessreqint		: 1,
+		__BITFIELD_FIELD(u32 disconnint		: 1,
+		__BITFIELD_FIELD(u32 conidstschng	: 1,
+		__BITFIELD_FIELD(u32 reserved_27_27	: 1,
+		__BITFIELD_FIELD(u32 ptxfemp		: 1,
+		__BITFIELD_FIELD(u32 hchint		: 1,
+		__BITFIELD_FIELD(u32 prtint		: 1,
+		__BITFIELD_FIELD(u32 reserved_23_23	: 1,
+		__BITFIELD_FIELD(u32 fetsusp		: 1,
+		__BITFIELD_FIELD(u32 incomplp		: 1,
+		__BITFIELD_FIELD(u32 incompisoin	: 1,
+		__BITFIELD_FIELD(u32 oepint		: 1,
+		__BITFIELD_FIELD(u32 iepint		: 1,
+		__BITFIELD_FIELD(u32 epmis		: 1,
+		__BITFIELD_FIELD(u32 reserved_16_16	: 1,
+		__BITFIELD_FIELD(u32 eopf		: 1,
+		__BITFIELD_FIELD(u32 isooutdrop		: 1,
+		__BITFIELD_FIELD(u32 enumdone		: 1,
+		__BITFIELD_FIELD(u32 usbrst		: 1,
+		__BITFIELD_FIELD(u32 usbsusp		: 1,
+		__BITFIELD_FIELD(u32 erlysusp		: 1,
+		__BITFIELD_FIELD(u32 i2cint		: 1,
+		__BITFIELD_FIELD(u32 ulpickint		: 1,
+		__BITFIELD_FIELD(u32 goutnakeff		: 1,
+		__BITFIELD_FIELD(u32 ginnakeff		: 1,
+		__BITFIELD_FIELD(u32 nptxfemp		: 1,
+		__BITFIELD_FIELD(u32 rxflvl		: 1,
+		__BITFIELD_FIELD(u32 sof		: 1,
+		__BITFIELD_FIELD(u32 otgint		: 1,
+		__BITFIELD_FIELD(u32 modemis		: 1,
+		__BITFIELD_FIELD(u32 curmod		: 1,
 		;))))))))))))))))))))))))))))))))
 	} s;
 };
@@ -554,7 +554,7 @@
  * Non-Periodic TxFIFO.
  */
 union cvmx_usbcx_gnptxfsiz {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_gnptxfsiz_s
 	 * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
@@ -566,8 +566,8 @@
 	 *	Transmit FIFO RAM.
 	 */
 	struct cvmx_usbcx_gnptxfsiz_s {
-		__BITFIELD_FIELD(uint32_t nptxfdep	: 16,
-		__BITFIELD_FIELD(uint32_t nptxfstaddr	: 16,
+		__BITFIELD_FIELD(u32 nptxfdep		: 16,
+		__BITFIELD_FIELD(u32 nptxfstaddr	: 16,
 		;))
 	} s;
 };
@@ -581,7 +581,7 @@
  * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
  */
 union cvmx_usbcx_gnptxsts {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_gnptxsts_s
 	 * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
@@ -617,10 +617,10 @@
 	 *	* Others: Reserved
 	 */
 	struct cvmx_usbcx_gnptxsts_s {
-		__BITFIELD_FIELD(uint32_t reserved_31_31	: 1,
-		__BITFIELD_FIELD(uint32_t nptxqtop		: 7,
-		__BITFIELD_FIELD(uint32_t nptxqspcavail		: 8,
-		__BITFIELD_FIELD(uint32_t nptxfspcavail		: 16,
+		__BITFIELD_FIELD(u32 reserved_31_31	: 1,
+		__BITFIELD_FIELD(u32 nptxqtop		: 7,
+		__BITFIELD_FIELD(u32 nptxqspcavail	: 8,
+		__BITFIELD_FIELD(u32 nptxfspcavail	: 16,
 		;))))
 	} s;
 };
@@ -634,7 +634,7 @@
  * the core.
  */
 union cvmx_usbcx_grstctl {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_grstctl_s
 	 * @ahbidle: AHB Master Idle (AHBIdle)
@@ -739,16 +739,16 @@
 	 *	selected, the PHY domain has to be reset for proper operation.
 	 */
 	struct cvmx_usbcx_grstctl_s {
-		__BITFIELD_FIELD(uint32_t ahbidle		: 1,
-		__BITFIELD_FIELD(uint32_t dmareq		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_11_29	: 19,
-		__BITFIELD_FIELD(uint32_t txfnum		: 5,
-		__BITFIELD_FIELD(uint32_t txfflsh		: 1,
-		__BITFIELD_FIELD(uint32_t rxfflsh		: 1,
-		__BITFIELD_FIELD(uint32_t intknqflsh		: 1,
-		__BITFIELD_FIELD(uint32_t frmcntrrst		: 1,
-		__BITFIELD_FIELD(uint32_t hsftrst		: 1,
-		__BITFIELD_FIELD(uint32_t csftrst		: 1,
+		__BITFIELD_FIELD(u32 ahbidle		: 1,
+		__BITFIELD_FIELD(u32 dmareq		: 1,
+		__BITFIELD_FIELD(u32 reserved_11_29	: 19,
+		__BITFIELD_FIELD(u32 txfnum		: 5,
+		__BITFIELD_FIELD(u32 txfflsh		: 1,
+		__BITFIELD_FIELD(u32 rxfflsh		: 1,
+		__BITFIELD_FIELD(u32 intknqflsh		: 1,
+		__BITFIELD_FIELD(u32 frmcntrrst		: 1,
+		__BITFIELD_FIELD(u32 hsftrst		: 1,
+		__BITFIELD_FIELD(u32 csftrst		: 1,
 		;))))))))))
 	} s;
 };
@@ -762,7 +762,7 @@
  * RxFIFO.
  */
 union cvmx_usbcx_grxfsiz {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_grxfsiz_s
 	 * @rxfdep: RxFIFO Depth (RxFDep)
@@ -771,8 +771,8 @@
 	 *	* Maximum value is 32768
 	 */
 	struct cvmx_usbcx_grxfsiz_s {
-		__BITFIELD_FIELD(uint32_t reserved_16_31	: 16,
-		__BITFIELD_FIELD(uint32_t rxfdep		: 16,
+		__BITFIELD_FIELD(u32 reserved_16_31	: 16,
+		__BITFIELD_FIELD(u32 rxfdep		: 16,
 		;))
 	} s;
 };
@@ -792,7 +792,7 @@
  *       hardware.
  */
 union cvmx_usbcx_grxstsph {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_grxstsph_s
 	 * @pktsts: Packet Status (PktSts)
@@ -814,11 +814,11 @@
 	 *	packet belongs.
 	 */
 	struct cvmx_usbcx_grxstsph_s {
-		__BITFIELD_FIELD(uint32_t reserved_21_31	: 11,
-		__BITFIELD_FIELD(uint32_t pktsts		: 4,
-		__BITFIELD_FIELD(uint32_t dpid			: 2,
-		__BITFIELD_FIELD(uint32_t bcnt			: 11,
-		__BITFIELD_FIELD(uint32_t chnum			: 4,
+		__BITFIELD_FIELD(u32 reserved_21_31	: 11,
+		__BITFIELD_FIELD(u32 pktsts		: 4,
+		__BITFIELD_FIELD(u32 dpid		: 2,
+		__BITFIELD_FIELD(u32 bcnt		: 11,
+		__BITFIELD_FIELD(u32 chnum		: 4,
 		;)))))
 	} s;
 };
@@ -835,7 +835,7 @@
  * to this register after the initial programming.
  */
 union cvmx_usbcx_gusbcfg {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_gusbcfg_s
 	 * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
@@ -895,19 +895,19 @@
 	 *	* One 48-MHz PHY clock = 0.25 bit times
 	 */
 	struct cvmx_usbcx_gusbcfg_s {
-		__BITFIELD_FIELD(uint32_t reserved_17_31	: 15,
-		__BITFIELD_FIELD(uint32_t otgi2csel		: 1,
-		__BITFIELD_FIELD(uint32_t phylpwrclksel		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_14_14	: 1,
-		__BITFIELD_FIELD(uint32_t usbtrdtim		: 4,
-		__BITFIELD_FIELD(uint32_t hnpcap		: 1,
-		__BITFIELD_FIELD(uint32_t srpcap		: 1,
-		__BITFIELD_FIELD(uint32_t ddrsel		: 1,
-		__BITFIELD_FIELD(uint32_t physel		: 1,
-		__BITFIELD_FIELD(uint32_t fsintf		: 1,
-		__BITFIELD_FIELD(uint32_t ulpi_utmi_sel		: 1,
-		__BITFIELD_FIELD(uint32_t phyif			: 1,
-		__BITFIELD_FIELD(uint32_t toutcal		: 3,
+		__BITFIELD_FIELD(u32 reserved_17_31	: 15,
+		__BITFIELD_FIELD(u32 otgi2csel		: 1,
+		__BITFIELD_FIELD(u32 phylpwrclksel	: 1,
+		__BITFIELD_FIELD(u32 reserved_14_14	: 1,
+		__BITFIELD_FIELD(u32 usbtrdtim		: 4,
+		__BITFIELD_FIELD(u32 hnpcap		: 1,
+		__BITFIELD_FIELD(u32 srpcap		: 1,
+		__BITFIELD_FIELD(u32 ddrsel		: 1,
+		__BITFIELD_FIELD(u32 physel		: 1,
+		__BITFIELD_FIELD(u32 fsintf		: 1,
+		__BITFIELD_FIELD(u32 ulpi_utmi_sel	: 1,
+		__BITFIELD_FIELD(u32 phyif		: 1,
+		__BITFIELD_FIELD(u32 toutcal		: 3,
 		;)))))))))))))
 	} s;
 };
@@ -925,15 +925,15 @@
  * in the corresponding Host Channel-n Interrupt register.
  */
 union cvmx_usbcx_haint {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_haint_s
 	 * @haint: Channel Interrupts (HAINT)
 	 *	One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
 	 */
 	struct cvmx_usbcx_haint_s {
-		__BITFIELD_FIELD(uint32_t reserved_16_31	: 16,
-		__BITFIELD_FIELD(uint32_t haint			: 16,
+		__BITFIELD_FIELD(u32 reserved_16_31	: 16,
+		__BITFIELD_FIELD(u32 haint		: 16,
 		;))
 	} s;
 };
@@ -950,15 +950,15 @@
  * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
  */
 union cvmx_usbcx_haintmsk {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_haintmsk_s
 	 * @haintmsk: Channel Interrupt Mask (HAINTMsk)
 	 *	One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
 	 */
 	struct cvmx_usbcx_haintmsk_s {
-		__BITFIELD_FIELD(uint32_t reserved_16_31	: 16,
-		__BITFIELD_FIELD(uint32_t haintmsk		: 16,
+		__BITFIELD_FIELD(u32 reserved_16_31	: 16,
+		__BITFIELD_FIELD(u32 haintmsk		: 16,
 		;))
 	} s;
 };
@@ -970,7 +970,7 @@
  *
  */
 union cvmx_usbcx_hccharx {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hccharx_s
 	 * @chena: Channel Enable (ChEna)
@@ -1028,17 +1028,17 @@
 	 *	Indicates the maximum packet size of the associated endpoint.
 	 */
 	struct cvmx_usbcx_hccharx_s {
-		__BITFIELD_FIELD(uint32_t chena			: 1,
-		__BITFIELD_FIELD(uint32_t chdis			: 1,
-		__BITFIELD_FIELD(uint32_t oddfrm		: 1,
-		__BITFIELD_FIELD(uint32_t devaddr		: 7,
-		__BITFIELD_FIELD(uint32_t ec			: 2,
-		__BITFIELD_FIELD(uint32_t eptype		: 2,
-		__BITFIELD_FIELD(uint32_t lspddev		: 1,
-		__BITFIELD_FIELD(uint32_t reserved_16_16	: 1,
-		__BITFIELD_FIELD(uint32_t epdir			: 1,
-		__BITFIELD_FIELD(uint32_t epnum			: 4,
-		__BITFIELD_FIELD(uint32_t mps			: 11,
+		__BITFIELD_FIELD(u32 chena		: 1,
+		__BITFIELD_FIELD(u32 chdis		: 1,
+		__BITFIELD_FIELD(u32 oddfrm		: 1,
+		__BITFIELD_FIELD(u32 devaddr		: 7,
+		__BITFIELD_FIELD(u32 ec			: 2,
+		__BITFIELD_FIELD(u32 eptype		: 2,
+		__BITFIELD_FIELD(u32 lspddev		: 1,
+		__BITFIELD_FIELD(u32 reserved_16_16	: 1,
+		__BITFIELD_FIELD(u32 epdir		: 1,
+		__BITFIELD_FIELD(u32 epnum		: 4,
+		__BITFIELD_FIELD(u32 mps		: 11,
 		;)))))))))))
 	} s;
 };
@@ -1052,7 +1052,7 @@
  * register after initializing the host.
  */
 union cvmx_usbcx_hcfg {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hcfg_s
 	 * @fslssupp: FS- and LS-Only Support (FSLSSupp)
@@ -1084,9 +1084,9 @@
 	 *	* 2'b11: Reserved
 	 */
 	struct cvmx_usbcx_hcfg_s {
-		__BITFIELD_FIELD(uint32_t reserved_3_31	: 29,
-		__BITFIELD_FIELD(uint32_t fslssupp	: 1,
-		__BITFIELD_FIELD(uint32_t fslspclksel	: 2,
+		__BITFIELD_FIELD(u32 reserved_3_31	: 29,
+		__BITFIELD_FIELD(u32 fslssupp		: 1,
+		__BITFIELD_FIELD(u32 fslspclksel	: 2,
 		;)))
 	} s;
 };
@@ -1106,7 +1106,7 @@
  * HAINT and GINTSTS registers.
  */
 union cvmx_usbcx_hcintx {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hcintx_s
 	 * @datatglerr: Data Toggle Error (DataTglErr)
@@ -1126,18 +1126,18 @@
 	 *	Transfer completed normally without any errors.
 	 */
 	struct cvmx_usbcx_hcintx_s {
-		__BITFIELD_FIELD(uint32_t reserved_11_31	: 21,
-		__BITFIELD_FIELD(uint32_t datatglerr		: 1,
-		__BITFIELD_FIELD(uint32_t frmovrun		: 1,
-		__BITFIELD_FIELD(uint32_t bblerr		: 1,
-		__BITFIELD_FIELD(uint32_t xacterr		: 1,
-		__BITFIELD_FIELD(uint32_t nyet			: 1,
-		__BITFIELD_FIELD(uint32_t ack			: 1,
-		__BITFIELD_FIELD(uint32_t nak			: 1,
-		__BITFIELD_FIELD(uint32_t stall			: 1,
-		__BITFIELD_FIELD(uint32_t ahberr		: 1,
-		__BITFIELD_FIELD(uint32_t chhltd		: 1,
-		__BITFIELD_FIELD(uint32_t xfercompl		: 1,
+		__BITFIELD_FIELD(u32 reserved_11_31	: 21,
+		__BITFIELD_FIELD(u32 datatglerr		: 1,
+		__BITFIELD_FIELD(u32 frmovrun		: 1,
+		__BITFIELD_FIELD(u32 bblerr		: 1,
+		__BITFIELD_FIELD(u32 xacterr		: 1,
+		__BITFIELD_FIELD(u32 nyet		: 1,
+		__BITFIELD_FIELD(u32 ack		: 1,
+		__BITFIELD_FIELD(u32 nak		: 1,
+		__BITFIELD_FIELD(u32 stall		: 1,
+		__BITFIELD_FIELD(u32 ahberr		: 1,
+		__BITFIELD_FIELD(u32 chhltd		: 1,
+		__BITFIELD_FIELD(u32 xfercompl		: 1,
 		;))))))))))))
 	} s;
 };
@@ -1152,7 +1152,7 @@
  * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
  */
 union cvmx_usbcx_hcintmskx {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hcintmskx_s
 	 * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
@@ -1168,18 +1168,18 @@
 	 * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
 	 */
 	struct cvmx_usbcx_hcintmskx_s {
-		__BITFIELD_FIELD(uint32_t reserved_11_31		: 21,
-		__BITFIELD_FIELD(uint32_t datatglerrmsk			: 1,
-		__BITFIELD_FIELD(uint32_t frmovrunmsk			: 1,
-		__BITFIELD_FIELD(uint32_t bblerrmsk			: 1,
-		__BITFIELD_FIELD(uint32_t xacterrmsk			: 1,
-		__BITFIELD_FIELD(uint32_t nyetmsk			: 1,
-		__BITFIELD_FIELD(uint32_t ackmsk			: 1,
-		__BITFIELD_FIELD(uint32_t nakmsk			: 1,
-		__BITFIELD_FIELD(uint32_t stallmsk			: 1,
-		__BITFIELD_FIELD(uint32_t ahberrmsk			: 1,
-		__BITFIELD_FIELD(uint32_t chhltdmsk			: 1,
-		__BITFIELD_FIELD(uint32_t xfercomplmsk			: 1,
+		__BITFIELD_FIELD(u32 reserved_11_31		: 21,
+		__BITFIELD_FIELD(u32 datatglerrmsk		: 1,
+		__BITFIELD_FIELD(u32 frmovrunmsk		: 1,
+		__BITFIELD_FIELD(u32 bblerrmsk			: 1,
+		__BITFIELD_FIELD(u32 xacterrmsk			: 1,
+		__BITFIELD_FIELD(u32 nyetmsk			: 1,
+		__BITFIELD_FIELD(u32 ackmsk			: 1,
+		__BITFIELD_FIELD(u32 nakmsk			: 1,
+		__BITFIELD_FIELD(u32 stallmsk			: 1,
+		__BITFIELD_FIELD(u32 ahberrmsk			: 1,
+		__BITFIELD_FIELD(u32 chhltdmsk			: 1,
+		__BITFIELD_FIELD(u32 xfercomplmsk		: 1,
 		;))))))))))))
 	} s;
 };
@@ -1191,7 +1191,7 @@
  *
  */
 union cvmx_usbcx_hcspltx {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hcspltx_s
 	 * @spltena: Split Enable (SpltEna)
@@ -1219,12 +1219,12 @@
 	 *	translator.
 	 */
 	struct cvmx_usbcx_hcspltx_s {
-		__BITFIELD_FIELD(uint32_t spltena			: 1,
-		__BITFIELD_FIELD(uint32_t reserved_17_30		: 14,
-		__BITFIELD_FIELD(uint32_t compsplt			: 1,
-		__BITFIELD_FIELD(uint32_t xactpos			: 2,
-		__BITFIELD_FIELD(uint32_t hubaddr			: 7,
-		__BITFIELD_FIELD(uint32_t prtaddr			: 7,
+		__BITFIELD_FIELD(u32 spltena			: 1,
+		__BITFIELD_FIELD(u32 reserved_17_30		: 14,
+		__BITFIELD_FIELD(u32 compsplt			: 1,
+		__BITFIELD_FIELD(u32 xactpos			: 2,
+		__BITFIELD_FIELD(u32 hubaddr			: 7,
+		__BITFIELD_FIELD(u32 prtaddr			: 7,
 		;))))))
 	} s;
 };
@@ -1236,7 +1236,7 @@
  *
  */
 union cvmx_usbcx_hctsizx {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hctsizx_s
 	 * @dopng: Do Ping (DoPng)
@@ -1265,10 +1265,10 @@
 	 *	size for IN transactions (periodic and non-periodic).
 	 */
 	struct cvmx_usbcx_hctsizx_s {
-		__BITFIELD_FIELD(uint32_t dopng			: 1,
-		__BITFIELD_FIELD(uint32_t pid			: 2,
-		__BITFIELD_FIELD(uint32_t pktcnt		: 10,
-		__BITFIELD_FIELD(uint32_t xfersize		: 19,
+		__BITFIELD_FIELD(u32 dopng		: 1,
+		__BITFIELD_FIELD(u32 pid		: 2,
+		__BITFIELD_FIELD(u32 pktcnt		: 10,
+		__BITFIELD_FIELD(u32 xfersize		: 19,
 		;))))
 	} s;
 };
@@ -1282,7 +1282,7 @@
  * which the O2P USB core has enumerated.
  */
 union cvmx_usbcx_hfir {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hfir_s
 	 * @frint: Frame Interval (FrInt)
@@ -1303,8 +1303,8 @@
 	 *	* 1 ms (PHY clock frequency for FS/LS)
 	 */
 	struct cvmx_usbcx_hfir_s {
-		__BITFIELD_FIELD(uint32_t reserved_16_31		: 16,
-		__BITFIELD_FIELD(uint32_t frint				: 16,
+		__BITFIELD_FIELD(u32 reserved_16_31		: 16,
+		__BITFIELD_FIELD(u32 frint			: 16,
 		;))
 	} s;
 };
@@ -1319,7 +1319,7 @@
  * in the current (micro)frame.
  */
 union cvmx_usbcx_hfnum {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hfnum_s
 	 * @frrem: Frame Time Remaining (FrRem)
@@ -1333,8 +1333,8 @@
 	 *	USB, and is reset to 0 when it reaches 16'h3FFF.
 	 */
 	struct cvmx_usbcx_hfnum_s {
-		__BITFIELD_FIELD(uint32_t frrem		: 16,
-		__BITFIELD_FIELD(uint32_t frnum		: 16,
+		__BITFIELD_FIELD(u32 frrem		: 16,
+		__BITFIELD_FIELD(u32 frnum		: 16,
 		;))
 	} s;
 };
@@ -1355,7 +1355,7 @@
  * the application must write a 1 to the bit to clear the interrupt.
  */
 union cvmx_usbcx_hprt {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hprt_s
 	 * @prtspd: Port Speed (PrtSpd)
@@ -1461,21 +1461,21 @@
 	 *	* 1: A device is attached to the port.
 	 */
 	struct cvmx_usbcx_hprt_s {
-		__BITFIELD_FIELD(uint32_t reserved_19_31	: 13,
-		__BITFIELD_FIELD(uint32_t prtspd		: 2,
-		__BITFIELD_FIELD(uint32_t prttstctl		: 4,
-		__BITFIELD_FIELD(uint32_t prtpwr		: 1,
-		__BITFIELD_FIELD(uint32_t prtlnsts		: 2,
-		__BITFIELD_FIELD(uint32_t reserved_9_9		: 1,
-		__BITFIELD_FIELD(uint32_t prtrst		: 1,
-		__BITFIELD_FIELD(uint32_t prtsusp		: 1,
-		__BITFIELD_FIELD(uint32_t prtres		: 1,
-		__BITFIELD_FIELD(uint32_t prtovrcurrchng	: 1,
-		__BITFIELD_FIELD(uint32_t prtovrcurract		: 1,
-		__BITFIELD_FIELD(uint32_t prtenchng		: 1,
-		__BITFIELD_FIELD(uint32_t prtena		: 1,
-		__BITFIELD_FIELD(uint32_t prtconndet		: 1,
-		__BITFIELD_FIELD(uint32_t prtconnsts		: 1,
+		__BITFIELD_FIELD(u32 reserved_19_31	: 13,
+		__BITFIELD_FIELD(u32 prtspd		: 2,
+		__BITFIELD_FIELD(u32 prttstctl		: 4,
+		__BITFIELD_FIELD(u32 prtpwr		: 1,
+		__BITFIELD_FIELD(u32 prtlnsts		: 2,
+		__BITFIELD_FIELD(u32 reserved_9_9	: 1,
+		__BITFIELD_FIELD(u32 prtrst		: 1,
+		__BITFIELD_FIELD(u32 prtsusp		: 1,
+		__BITFIELD_FIELD(u32 prtres		: 1,
+		__BITFIELD_FIELD(u32 prtovrcurrchng	: 1,
+		__BITFIELD_FIELD(u32 prtovrcurract	: 1,
+		__BITFIELD_FIELD(u32 prtenchng		: 1,
+		__BITFIELD_FIELD(u32 prtena		: 1,
+		__BITFIELD_FIELD(u32 prtconndet		: 1,
+		__BITFIELD_FIELD(u32 prtconnsts		: 1,
 		;)))))))))))))))
 	} s;
 };
@@ -1489,7 +1489,7 @@
  * TxFIFO, as shown in Figures 310 and 311.
  */
 union cvmx_usbcx_hptxfsiz {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hptxfsiz_s
 	 * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
@@ -1499,8 +1499,8 @@
 	 * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
 	 */
 	struct cvmx_usbcx_hptxfsiz_s {
-		__BITFIELD_FIELD(uint32_t ptxfsize	: 16,
-		__BITFIELD_FIELD(uint32_t ptxfstaddr	: 16,
+		__BITFIELD_FIELD(u32 ptxfsize	: 16,
+		__BITFIELD_FIELD(u32 ptxfstaddr	: 16,
 		;))
 	} s;
 };
@@ -1514,7 +1514,7 @@
  * TxFIFO and the Periodic Transmit Request Queue
  */
 union cvmx_usbcx_hptxsts {
-	uint32_t u32;
+	u32 u32;
 	/**
 	 * struct cvmx_usbcx_hptxsts_s
 	 * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
@@ -1555,9 +1555,9 @@
 	 *	* Others: Reserved
 	 */
 	struct cvmx_usbcx_hptxsts_s {
-		__BITFIELD_FIELD(uint32_t ptxqtop	: 8,
-		__BITFIELD_FIELD(uint32_t ptxqspcavail	: 8,
-		__BITFIELD_FIELD(uint32_t ptxfspcavail	: 16,
+		__BITFIELD_FIELD(u32 ptxqtop		: 8,
+		__BITFIELD_FIELD(u32 ptxqspcavail	: 8,
+		__BITFIELD_FIELD(u32 ptxfspcavail	: 16,
 		;)))
 	} s;
 };
@@ -1571,7 +1571,7 @@
  * hreset and phy_rst signals.
  */
 union cvmx_usbnx_clk_ctl {
-	uint64_t u64;
+	u64 u64;
 	/**
 	 * struct cvmx_usbnx_clk_ctl_s
 	 * @divide2: The 'hclk' used by the USB subsystem is derived
@@ -1661,21 +1661,21 @@
 	 *	until AFTER this field is set and then read.
 	 */
 	struct cvmx_usbnx_clk_ctl_s {
-		__BITFIELD_FIELD(uint64_t reserved_20_63	: 44,
-		__BITFIELD_FIELD(uint64_t divide2		: 2,
-		__BITFIELD_FIELD(uint64_t hclk_rst		: 1,
-		__BITFIELD_FIELD(uint64_t p_x_on		: 1,
-		__BITFIELD_FIELD(uint64_t p_rtype		: 2,
-		__BITFIELD_FIELD(uint64_t p_com_on		: 1,
-		__BITFIELD_FIELD(uint64_t p_c_sel		: 2,
-		__BITFIELD_FIELD(uint64_t cdiv_byp		: 1,
-		__BITFIELD_FIELD(uint64_t sd_mode		: 2,
-		__BITFIELD_FIELD(uint64_t s_bist		: 1,
-		__BITFIELD_FIELD(uint64_t por			: 1,
-		__BITFIELD_FIELD(uint64_t enable		: 1,
-		__BITFIELD_FIELD(uint64_t prst			: 1,
-		__BITFIELD_FIELD(uint64_t hrst			: 1,
-		__BITFIELD_FIELD(uint64_t divide		: 3,
+		__BITFIELD_FIELD(u64 reserved_20_63	: 44,
+		__BITFIELD_FIELD(u64 divide2		: 2,
+		__BITFIELD_FIELD(u64 hclk_rst		: 1,
+		__BITFIELD_FIELD(u64 p_x_on		: 1,
+		__BITFIELD_FIELD(u64 p_rtype		: 2,
+		__BITFIELD_FIELD(u64 p_com_on		: 1,
+		__BITFIELD_FIELD(u64 p_c_sel		: 2,
+		__BITFIELD_FIELD(u64 cdiv_byp		: 1,
+		__BITFIELD_FIELD(u64 sd_mode		: 2,
+		__BITFIELD_FIELD(u64 s_bist		: 1,
+		__BITFIELD_FIELD(u64 por		: 1,
+		__BITFIELD_FIELD(u64 enable		: 1,
+		__BITFIELD_FIELD(u64 prst		: 1,
+		__BITFIELD_FIELD(u64 hrst		: 1,
+		__BITFIELD_FIELD(u64 divide		: 3,
 		;)))))))))))))))
 	} s;
 };
@@ -1688,7 +1688,7 @@
  * Contains general control and status information for the USBN block.
  */
 union cvmx_usbnx_usbp_ctl_status {
-	uint64_t u64;
+	u64 u64;
 	/**
 	 * struct cvmx_usbnx_usbp_ctl_status_s
 	 * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
@@ -1804,41 +1804,41 @@
 	 *	de-assertion.
 	 */
 	struct cvmx_usbnx_usbp_ctl_status_s {
-		__BITFIELD_FIELD(uint64_t txrisetune		: 1,
-		__BITFIELD_FIELD(uint64_t txvreftune		: 4,
-		__BITFIELD_FIELD(uint64_t txfslstune		: 4,
-		__BITFIELD_FIELD(uint64_t txhsxvtune		: 2,
-		__BITFIELD_FIELD(uint64_t sqrxtune		: 3,
-		__BITFIELD_FIELD(uint64_t compdistune		: 3,
-		__BITFIELD_FIELD(uint64_t otgtune		: 3,
-		__BITFIELD_FIELD(uint64_t otgdisable		: 1,
-		__BITFIELD_FIELD(uint64_t portreset		: 1,
-		__BITFIELD_FIELD(uint64_t drvvbus		: 1,
-		__BITFIELD_FIELD(uint64_t lsbist		: 1,
-		__BITFIELD_FIELD(uint64_t fsbist		: 1,
-		__BITFIELD_FIELD(uint64_t hsbist		: 1,
-		__BITFIELD_FIELD(uint64_t bist_done		: 1,
-		__BITFIELD_FIELD(uint64_t bist_err		: 1,
-		__BITFIELD_FIELD(uint64_t tdata_out		: 4,
-		__BITFIELD_FIELD(uint64_t siddq			: 1,
-		__BITFIELD_FIELD(uint64_t txpreemphasistune	: 1,
-		__BITFIELD_FIELD(uint64_t dma_bmode		: 1,
-		__BITFIELD_FIELD(uint64_t usbc_end		: 1,
-		__BITFIELD_FIELD(uint64_t usbp_bist		: 1,
-		__BITFIELD_FIELD(uint64_t tclk			: 1,
-		__BITFIELD_FIELD(uint64_t dp_pulld		: 1,
-		__BITFIELD_FIELD(uint64_t dm_pulld		: 1,
-		__BITFIELD_FIELD(uint64_t hst_mode		: 1,
-		__BITFIELD_FIELD(uint64_t tuning		: 4,
-		__BITFIELD_FIELD(uint64_t tx_bs_enh		: 1,
-		__BITFIELD_FIELD(uint64_t tx_bs_en		: 1,
-		__BITFIELD_FIELD(uint64_t loop_enb		: 1,
-		__BITFIELD_FIELD(uint64_t vtest_enb		: 1,
-		__BITFIELD_FIELD(uint64_t bist_enb		: 1,
-		__BITFIELD_FIELD(uint64_t tdata_sel		: 1,
-		__BITFIELD_FIELD(uint64_t taddr_in		: 4,
-		__BITFIELD_FIELD(uint64_t tdata_in		: 8,
-		__BITFIELD_FIELD(uint64_t ate_reset		: 1,
+		__BITFIELD_FIELD(u64 txrisetune		: 1,
+		__BITFIELD_FIELD(u64 txvreftune		: 4,
+		__BITFIELD_FIELD(u64 txfslstune		: 4,
+		__BITFIELD_FIELD(u64 txhsxvtune		: 2,
+		__BITFIELD_FIELD(u64 sqrxtune		: 3,
+		__BITFIELD_FIELD(u64 compdistune	: 3,
+		__BITFIELD_FIELD(u64 otgtune		: 3,
+		__BITFIELD_FIELD(u64 otgdisable		: 1,
+		__BITFIELD_FIELD(u64 portreset		: 1,
+		__BITFIELD_FIELD(u64 drvvbus		: 1,
+		__BITFIELD_FIELD(u64 lsbist		: 1,
+		__BITFIELD_FIELD(u64 fsbist		: 1,
+		__BITFIELD_FIELD(u64 hsbist		: 1,
+		__BITFIELD_FIELD(u64 bist_done		: 1,
+		__BITFIELD_FIELD(u64 bist_err		: 1,
+		__BITFIELD_FIELD(u64 tdata_out		: 4,
+		__BITFIELD_FIELD(u64 siddq		: 1,
+		__BITFIELD_FIELD(u64 txpreemphasistune	: 1,
+		__BITFIELD_FIELD(u64 dma_bmode		: 1,
+		__BITFIELD_FIELD(u64 usbc_end		: 1,
+		__BITFIELD_FIELD(u64 usbp_bist		: 1,
+		__BITFIELD_FIELD(u64 tclk		: 1,
+		__BITFIELD_FIELD(u64 dp_pulld		: 1,
+		__BITFIELD_FIELD(u64 dm_pulld		: 1,
+		__BITFIELD_FIELD(u64 hst_mode		: 1,
+		__BITFIELD_FIELD(u64 tuning		: 4,
+		__BITFIELD_FIELD(u64 tx_bs_enh		: 1,
+		__BITFIELD_FIELD(u64 tx_bs_en		: 1,
+		__BITFIELD_FIELD(u64 loop_enb		: 1,
+		__BITFIELD_FIELD(u64 vtest_enb		: 1,
+		__BITFIELD_FIELD(u64 bist_enb		: 1,
+		__BITFIELD_FIELD(u64 tdata_sel		: 1,
+		__BITFIELD_FIELD(u64 taddr_in		: 4,
+		__BITFIELD_FIELD(u64 tdata_in		: 8,
+		__BITFIELD_FIELD(u64 ate_reset		: 1,
 		;)))))))))))))))))))))))))))))))))))
 	} s;
 };
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index fd9b3d8..0ac11b1 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -118,13 +118,20 @@
 	struct octeon_ethernet *priv = netdev_priv(dev);
 	cvmx_helper_link_info_t link_info;
 
+	link_info.u64		= 0;
+	link_info.s.link_up	= priv->phydev->link ? 1 : 0;
+	link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
+	link_info.s.speed	= priv->phydev->speed;
+	priv->link_info		= link_info.u64;
+
+	/*
+	 * The polling task need to know about link status changes.
+	 */
+	if (priv->poll)
+		priv->poll(dev);
+
 	if (priv->last_link != priv->phydev->link) {
 		priv->last_link = priv->phydev->link;
-		link_info.u64 = 0;
-		link_info.s.link_up = priv->last_link ? 1 : 0;
-		link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
-		link_info.s.speed = priv->phydev->speed;
-
 		cvmx_helper_link_set(priv->port, link_info);
 		cvm_oct_note_carrier(priv, link_info);
 	}
@@ -174,6 +181,15 @@
 		goto no_phy;
 
 	phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+	if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
+		int rc;
+
+		rc = of_phy_register_fixed_link(priv->of_node);
+		if (rc)
+			return rc;
+
+		phy_node = of_node_get(priv->of_node);
+	}
 	if (!phy_node)
 		goto no_phy;
 
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 1055ee1..91b148c 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -30,8 +30,6 @@
 
 static DEFINE_SPINLOCK(global_register_lock);
 
-static int number_rgmii_ports;
-
 static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
 {
 	union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
@@ -63,251 +61,106 @@
 		       gmxx_rxx_int_reg.u64);
 }
 
+static void cvm_oct_check_preamble_errors(struct net_device *dev)
+{
+	struct octeon_ethernet *priv = netdev_priv(dev);
+	cvmx_helper_link_info_t link_info;
+	unsigned long flags;
+
+	link_info.u64 = priv->link_info;
+
+	/*
+	 * Take the global register lock since we are going to
+	 * touch registers that affect more than one port.
+	 */
+	spin_lock_irqsave(&global_register_lock, flags);
+
+	if (link_info.s.speed == 10 && priv->last_speed == 10) {
+		/*
+		 * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
+		 * getting preamble errors.
+		 */
+		int interface = INTERFACE(priv->port);
+		int index = INDEX(priv->port);
+		union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
+
+		gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+							(index, interface));
+		if (gmxx_rxx_int_reg.s.pcterr) {
+			/*
+			 * We are getting preamble errors at 10Mbps. Most
+			 * likely the PHY is giving us packets with misaligned
+			 * preambles. In order to get these packets we need to
+			 * disable preamble checking and do it in software.
+			 */
+			cvm_oct_set_hw_preamble(priv, false);
+			printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
+					   dev->name);
+		}
+	} else {
+		/*
+		 * Since the 10Mbps preamble workaround is allowed we need to
+		 * enable preamble checking, FCS stripping, and clear error
+		 * bits on every speed change. If errors occur during 10Mbps
+		 * operation the above code will change this stuff
+		 */
+		if (priv->last_speed != link_info.s.speed)
+			cvm_oct_set_hw_preamble(priv, true);
+		priv->last_speed = link_info.s.speed;
+	}
+	spin_unlock_irqrestore(&global_register_lock, flags);
+}
+
 static void cvm_oct_rgmii_poll(struct net_device *dev)
 {
 	struct octeon_ethernet *priv = netdev_priv(dev);
-	unsigned long flags = 0;
 	cvmx_helper_link_info_t link_info;
-	int use_global_register_lock = (priv->phydev == NULL);
+	bool status_change;
 
-	BUG_ON(in_interrupt());
-	if (use_global_register_lock) {
-		/*
-		 * Take the global register lock since we are going to
-		 * touch registers that affect more than one port.
-		 */
-		spin_lock_irqsave(&global_register_lock, flags);
-	} else {
-		mutex_lock(&priv->phydev->mdio.bus->mdio_lock);
-	}
+	link_info = cvmx_helper_link_autoconf(priv->port);
+	status_change = priv->link_info != link_info.u64;
+	priv->link_info = link_info.u64;
 
-	link_info = cvmx_helper_link_get(priv->port);
-	if (link_info.u64 == priv->link_info) {
-		if (link_info.s.speed == 10) {
-			/*
-			 * Read the GMXX_RXX_INT_REG[PCTERR] bit and
-			 * see if we are getting preamble errors.
-			 */
-			int interface = INTERFACE(priv->port);
-			int index = INDEX(priv->port);
-			union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
+	cvm_oct_check_preamble_errors(dev);
 
-			gmxx_rxx_int_reg.u64 =
-			    cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
-					  (index, interface));
-			if (gmxx_rxx_int_reg.s.pcterr) {
-				/*
-				 * We are getting preamble errors at
-				 * 10Mbps.  Most likely the PHY is
-				 * giving us packets with mis aligned
-				 * preambles. In order to get these
-				 * packets we need to disable preamble
-				 * checking and do it in software.
-				 */
-				cvm_oct_set_hw_preamble(priv, false);
-				printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
-						   dev->name);
-			}
-		}
-
-		if (use_global_register_lock)
-			spin_unlock_irqrestore(&global_register_lock, flags);
-		else
-			mutex_unlock(&priv->phydev->mdio.bus->mdio_lock);
+	if (likely(!status_change))
 		return;
+
+	/* Tell core. */
+	if (link_info.s.link_up) {
+		if (!netif_carrier_ok(dev))
+			netif_carrier_on(dev);
+	} else if (netif_carrier_ok(dev)) {
+		netif_carrier_off(dev);
 	}
-
-	/* Since the 10Mbps preamble workaround is allowed we need to enable
-	 * preamble checking, FCS stripping, and clear error bits on
-	 * every speed change. If errors occur during 10Mbps operation
-	 * the above code will change this stuff
-	 */
-	cvm_oct_set_hw_preamble(priv, true);
-
-	if (priv->phydev == NULL) {
-		link_info = cvmx_helper_link_autoconf(priv->port);
-		priv->link_info = link_info.u64;
-	}
-
-	if (use_global_register_lock)
-		spin_unlock_irqrestore(&global_register_lock, flags);
-	else
-		mutex_unlock(&priv->phydev->mdio.bus->mdio_lock);
-
-	if (priv->phydev == NULL) {
-		/* Tell core. */
-		if (link_info.s.link_up) {
-			if (!netif_carrier_ok(dev))
-				netif_carrier_on(dev);
-		} else if (netif_carrier_ok(dev)) {
-			netif_carrier_off(dev);
-		}
-		cvm_oct_note_carrier(priv, link_info);
-	}
-}
-
-static int cmv_oct_rgmii_gmx_interrupt(int interface)
-{
-	int index;
-	int count = 0;
-
-	/* Loop through every port of this interface */
-	for (index = 0;
-	     index < cvmx_helper_ports_on_interface(interface);
-	     index++) {
-		union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
-
-		/* Read the GMX interrupt status bits */
-		gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
-					  (index, interface));
-		gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
-					  (index, interface));
-
-		/* Poll the port if inband status changed */
-		if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link ||
-		    gmx_rx_int_reg.s.phy_spd) {
-			struct net_device *dev =
-				    cvm_oct_device[cvmx_helper_get_ipd_port
-						   (interface, index)];
-			struct octeon_ethernet *priv = netdev_priv(dev);
-
-			if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
-				queue_work(cvm_oct_poll_queue,
-					   &priv->port_work);
-
-			gmx_rx_int_reg.u64 = 0;
-			gmx_rx_int_reg.s.phy_dupx = 1;
-			gmx_rx_int_reg.s.phy_link = 1;
-			gmx_rx_int_reg.s.phy_spd = 1;
-			cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
-				       gmx_rx_int_reg.u64);
-			count++;
-		}
-	}
-	return count;
-}
-
-static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
-{
-	union cvmx_npi_rsl_int_blocks rsl_int_blocks;
-	int count = 0;
-
-	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
-
-	/* Check and see if this interrupt was caused by the GMX0 block */
-	if (rsl_int_blocks.s.gmx0)
-		count += cmv_oct_rgmii_gmx_interrupt(0);
-
-	/* Check and see if this interrupt was caused by the GMX1 block */
-	if (rsl_int_blocks.s.gmx1)
-		count += cmv_oct_rgmii_gmx_interrupt(1);
-
-	return count ? IRQ_HANDLED : IRQ_NONE;
+	cvm_oct_note_carrier(priv, link_info);
 }
 
 int cvm_oct_rgmii_open(struct net_device *dev)
 {
-	return cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
-}
-
-static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
-{
-	struct octeon_ethernet *priv =
-		container_of(work, struct octeon_ethernet, port_work);
-	cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
-}
-
-int cvm_oct_rgmii_init(struct net_device *dev)
-{
 	struct octeon_ethernet *priv = netdev_priv(dev);
-	int r;
+	int ret;
 
-	cvm_oct_common_init(dev);
-	INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
-	/*
-	 * Due to GMX errata in CN3XXX series chips, it is necessary
-	 * to take the link down immediately when the PHY changes
-	 * state. In order to do this we call the poll function every
-	 * time the RGMII inband status changes.  This may cause
-	 * problems if the PHY doesn't implement inband status
-	 * properly.
-	 */
-	if (number_rgmii_ports == 0) {
-		r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
-				IRQF_SHARED, "RGMII", &number_rgmii_ports);
-		if (r != 0)
-			return r;
-	}
-	number_rgmii_ports++;
+	ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
+	if (ret)
+		return ret;
 
-	/*
-	 * Only true RGMII ports need to be polled. In GMII mode, port
-	 * 0 is really a RGMII port.
-	 */
-	if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
-	     && (priv->port == 0))
-	    || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
-
-		if (!octeon_is_simulation()) {
-
-			union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
-			int interface = INTERFACE(priv->port);
-			int index = INDEX(priv->port);
-
-			/*
-			 * Enable interrupts on inband status changes
-			 * for this port.
-			 */
-			gmx_rx_int_en.u64 = 0;
-			gmx_rx_int_en.s.phy_dupx = 1;
-			gmx_rx_int_en.s.phy_link = 1;
-			gmx_rx_int_en.s.phy_spd = 1;
-			cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
-				       gmx_rx_int_en.u64);
+	if (priv->phydev) {
+		/*
+		 * In phydev mode, we need still periodic polling for the
+		 * preamble error checking, and we also need to call this
+		 * function on every link state change.
+		 *
+		 * Only true RGMII ports need to be polled. In GMII mode, port
+		 * 0 is really a RGMII port.
+		 */
+		if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
+		     priv->port  == 0) ||
+		    (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+			priv->poll = cvm_oct_check_preamble_errors;
+			cvm_oct_check_preamble_errors(dev);
 		}
 	}
 
 	return 0;
 }
-
-void cvm_oct_rgmii_uninit(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-
-	cvm_oct_common_uninit(dev);
-
-	/*
-	 * Only true RGMII ports need to be polled. In GMII mode, port
-	 * 0 is really a RGMII port.
-	 */
-	if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
-	     && (priv->port == 0))
-	    || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
-
-		if (!octeon_is_simulation()) {
-
-			union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
-			int interface = INTERFACE(priv->port);
-			int index = INDEX(priv->port);
-
-			/*
-			 * Disable interrupts on inband status changes
-			 * for this port.
-			 */
-			gmx_rx_int_en.u64 =
-			    cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
-					  (index, interface));
-			gmx_rx_int_en.s.phy_dupx = 0;
-			gmx_rx_int_en.s.phy_link = 0;
-			gmx_rx_int_en.s.phy_spd = 0;
-			cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
-				       gmx_rx_int_en.u64);
-		}
-	}
-
-	/* Remove the interrupt handler when the last port is removed. */
-	number_rgmii_ports--;
-	if (number_rgmii_ports == 0)
-		free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
-	cancel_work_sync(&priv->port_work);
-}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 6aed3cf..ed55304 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -26,8 +26,6 @@
 #include <net/xfrm.h>
 #endif /* CONFIG_XFRM */
 
-#include <linux/atomic.h>
-
 #include <asm/octeon/octeon.h>
 
 #include "ethernet-defines.h"
@@ -364,17 +362,8 @@
 
 				/* Increment RX stats for virtual ports */
 				if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
-#ifdef CONFIG_64BIT
-					atomic64_add(1,
-						     (atomic64_t *)&priv->stats.rx_packets);
-					atomic64_add(skb->len,
-						     (atomic64_t *)&priv->stats.rx_bytes);
-#else
-					atomic_add(1,
-						   (atomic_t *)&priv->stats.rx_packets);
-					atomic_add(skb->len,
-						   (atomic_t *)&priv->stats.rx_bytes);
-#endif
+					priv->stats.rx_packets++;
+					priv->stats.rx_bytes += skb->len;
 				}
 				netif_receive_skb(skb);
 			} else {
@@ -383,13 +372,7 @@
 				  printk_ratelimited("%s: Device not up, packet dropped\n",
 					   dev->name);
 				*/
-#ifdef CONFIG_64BIT
-				atomic64_add(1,
-					     (atomic64_t *)&priv->stats.rx_dropped);
-#else
-				atomic_add(1,
-					   (atomic_t *)&priv->stats.rx_dropped);
-#endif
+				priv->stats.rx_dropped++;
 				dev_kfree_skb_irq(skb);
 			}
 		} else {
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index c053c4a..e229ebf 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -95,10 +95,10 @@
 	for (qos = 0; qos < queues_per_port; qos++) {
 		if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
 			continue;
-		skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
+		skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
 						       MAX_SKB_TO_FREE);
 		skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
-							 priv->fau+qos*4);
+							 priv->fau + qos * 4);
 
 
 		total_freed += skb_to_free;
@@ -126,7 +126,7 @@
 		}
 		total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
 	}
-	if (total_freed >= 0 && netif_queue_stopped(dev))
+	if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
 		netif_wake_queue(dev);
 	if (total_remaining)
 		cvm_oct_kick_tx_poll_watchdog();
@@ -403,7 +403,7 @@
 	    ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
 	     (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
 		/* Use hardware checksum calc */
-		pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
+		pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
 	}
 
 	if (USE_ASYNC_IOBDMA) {
@@ -419,7 +419,8 @@
 		    cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
 	}
 
-	skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+	skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+						priv->fau + qos * 4);
 
 	/*
 	 * If we're sending faster than the receive can free them then
@@ -430,7 +431,7 @@
 
 	if (pko_command.s.dontfree) {
 		queue_type = QUEUE_CORE;
-		pko_command.s.reg0 = priv->fau+qos*4;
+		pko_command.s.reg0 = priv->fau + qos * 4;
 	} else {
 		queue_type = QUEUE_HW;
 	}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index f69fb5c..00adc52 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -226,18 +226,7 @@
 		priv->stats.multicast += rx_status.multicast_packets;
 		priv->stats.rx_crc_errors += rx_status.inb_errors;
 		priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-
-		/*
-		 * The drop counter must be incremented atomically
-		 * since the RX tasklet also increments it.
-		 */
-#ifdef CONFIG_64BIT
-		atomic64_add(rx_status.dropped_packets,
-			     (atomic64_t *)&priv->stats.rx_dropped);
-#else
-		atomic_add(rx_status.dropped_packets,
-			     (atomic_t *)&priv->stats.rx_dropped);
-#endif
+		priv->stats.rx_dropped += rx_status.dropped_packets;
 	}
 
 	return &priv->stats;
@@ -601,8 +590,8 @@
 #endif
 };
 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
-	.ndo_init		= cvm_oct_rgmii_init,
-	.ndo_uninit		= cvm_oct_rgmii_uninit,
+	.ndo_init		= cvm_oct_common_init,
+	.ndo_uninit		= cvm_oct_common_uninit,
 	.ndo_open		= cvm_oct_rgmii_open,
 	.ndo_stop		= cvm_oct_common_stop,
 	.ndo_start_xmit		= cvm_oct_xmit,
@@ -790,7 +779,6 @@
 				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
 
 			switch (priv->imode) {
-
 			/* These types don't support ports to IPD/PKO */
 			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
 			case CVMX_HELPER_INTERFACE_MODE_PCIE:
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index fdf24d1..5b4fdd2 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -41,20 +41,18 @@
 	/* Device statistics */
 	struct net_device_stats stats;
 	struct phy_device *phydev;
+	unsigned int last_speed;
 	unsigned int last_link;
 	/* Last negotiated link state */
 	u64 link_info;
 	/* Called periodically to check link status */
 	void (*poll)(struct net_device *dev);
 	struct delayed_work	port_periodic_work;
-	struct work_struct	port_work;	/* may be unused. */
 	struct device_node	*of_node;
 };
 
 int cvm_oct_free_work(void *work_queue_entry);
 
-int cvm_oct_rgmii_init(struct net_device *dev);
-void cvm_oct_rgmii_uninit(struct net_device *dev);
 int cvm_oct_rgmii_open(struct net_device *dev);
 
 int cvm_oct_sgmii_init(struct net_device *dev);
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
deleted file mode 100644
index d277f04..0000000
--- a/drivers/staging/olpc_dcon/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-config FB_OLPC_DCON
-	tristate "One Laptop Per Child Display CONtroller support"
-	depends on OLPC && FB
-	depends on I2C
-	depends on (GPIO_CS5535 || GPIO_CS5535=n)
-	select BACKLIGHT_CLASS_DEVICE
-	---help---
-	  In order to support very low power operation, the XO laptop uses a
-	  secondary Display CONtroller, or DCON.  This secondary controller
-	  is present in the video pipeline between the primary display
-	  controller (integrate into the processor or chipset) and the LCD
-	  panel.  It allows the main processor/display controller to be
-	  completely powered off while still retaining an image on the display.
-	  This controller is only available on OLPC platforms.  Unless you have
-	  one of these platforms, you will want to say 'N'.
-
-config FB_OLPC_DCON_1
-	bool "OLPC XO-1 DCON support"
-	depends on FB_OLPC_DCON && GPIO_CS5535
-	default y
-	---help---
-	  Enable support for the DCON in XO-1 model laptops.  The kernel
-	  communicates with the DCON using model-specific code.  If you
-	  have an XO-1 (or if you're unsure what model you have), you should
-	  say 'Y'.
-
-config FB_OLPC_DCON_1_5
-	bool "OLPC XO-1.5 DCON support"
-	depends on FB_OLPC_DCON && ACPI
-	default y
-	---help---
-	  Enable support for the DCON in XO-1.5 model laptops.  The kernel
-	  communicates with the DCON using model-specific code.  If you
-	  have an XO-1.5 (or if you're unsure what model you have), you
-	  should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
deleted file mode 100644
index 36c7e67..0000000
--- a/drivers/staging/olpc_dcon/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-olpc-dcon-objs += olpc_dcon.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1)	+= olpc_dcon_xo_1.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5)	+= olpc_dcon_xo_1_5.o
-obj-$(CONFIG_FB_OLPC_DCON)	+= olpc-dcon.o
-
-
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
deleted file mode 100644
index 61c2e65..0000000
--- a/drivers/staging/olpc_dcon/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-TODO:
-	- see if vx855 gpio API can be made similar enough to cs5535 so we can
-	  share more code
-	- allow simultaneous XO-1 and XO-1.5 support
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-copy:
-	Daniel Drake <dsd@laptop.org>
-	Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
deleted file mode 100644
index f45b2ef..0000000
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ /dev/null
@@ -1,813 +0,0 @@
-/*
- * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
- *
- * Copyright © 2006-2007  Red Hat, Inc.
- * Copyright © 2006-2007  Advanced Micro Devices, Inc.
- * Copyright © 2009       VIA Technology, Inc.
- * Copyright (c) 2010-2011  Andres Salomon <dilinger@queued.net>
- *
- * This program is free software.  You can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/fb.h>
-#include <linux/console.h>
-#include <linux/i2c.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/backlight.h>
-#include <linux/device.h>
-#include <linux/uaccess.h>
-#include <linux/ctype.h>
-#include <linux/reboot.h>
-#include <linux/olpc-ec.h>
-#include <asm/tsc.h>
-#include <asm/olpc.h>
-
-#include "olpc_dcon.h"
-
-/* Module definitions */
-
-static ushort resumeline = 898;
-module_param(resumeline, ushort, 0444);
-
-static struct dcon_platform_data *pdata;
-
-/* I2C structures */
-
-/* Platform devices */
-static struct platform_device *dcon_device;
-
-static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
-
-static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
-{
-	return i2c_smbus_write_word_data(dcon->client, reg, val);
-}
-
-static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
-{
-	return i2c_smbus_read_word_data(dcon->client, reg);
-}
-
-/* ===== API functions - these are called by a variety of users ==== */
-
-static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
-{
-	u16 ver;
-	int rc = 0;
-
-	ver = dcon_read(dcon, DCON_REG_ID);
-	if ((ver >> 8) != 0xDC) {
-		pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
-		rc = -ENXIO;
-		goto err;
-	}
-
-	if (is_init) {
-		pr_info("Discovered DCON version %x\n", ver & 0xFF);
-		rc = pdata->init(dcon);
-		if (rc != 0) {
-			pr_err("Unable to init.\n");
-			goto err;
-		}
-	}
-
-	if (ver < 0xdc02) {
-		dev_err(&dcon->client->dev,
-				"DCON v1 is unsupported, giving up..\n");
-		rc = -ENODEV;
-		goto err;
-	}
-
-	/* SDRAM setup/hold time */
-	dcon_write(dcon, 0x3a, 0xc040);
-	dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000);  /* clear option bits */
-	dcon_write(dcon, DCON_REG_MEM_OPT_A,
-				MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
-	dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
-
-	/* Colour swizzle, AA, no passthrough, backlight */
-	if (is_init) {
-		dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
-				MODE_CSWIZZLE | MODE_COL_AA;
-	}
-	dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
-
-	/* Set the scanline to interrupt on during resume */
-	dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
-
-err:
-	return rc;
-}
-
-/*
- * The smbus doesn't always come back due to what is believed to be
- * hardware (power rail) bugs.  For older models where this is known to
- * occur, our solution is to attempt to wait for the bus to stabilize;
- * if it doesn't happen, cut power to the dcon, repower it, and wait
- * for the bus to stabilize.  Rinse, repeat until we have a working
- * smbus.  For newer models, we simply BUG(); we want to know if this
- * still happens despite the power fixes that have been made!
- */
-static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
-{
-	unsigned long timeout;
-	u8 pm;
-	int x;
-
-power_up:
-	if (is_powered_down) {
-		pm = 1;
-		x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
-		if (x) {
-			pr_warn("unable to force dcon to power up: %d!\n", x);
-			return x;
-		}
-		usleep_range(10000, 11000);  /* we'll be conservative */
-	}
-
-	pdata->bus_stabilize_wiggle();
-
-	for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
-		usleep_range(1000, 1100);
-		x = dcon_read(dcon, DCON_REG_ID);
-	}
-	if (x < 0) {
-		pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
-		BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
-		pm = 0;
-		olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
-		msleep(100);
-		is_powered_down = 1;
-		goto power_up;	/* argh, stupid hardware.. */
-	}
-
-	if (is_powered_down)
-		return dcon_hw_init(dcon, 0);
-	return 0;
-}
-
-static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
-{
-	dcon->bl_val = level;
-	dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
-
-	/* Purposely turn off the backlight when we go to level 0 */
-	if (dcon->bl_val == 0) {
-		dcon->disp_mode &= ~MODE_BL_ENABLE;
-		dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
-	} else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
-		dcon->disp_mode |= MODE_BL_ENABLE;
-		dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
-	}
-}
-
-/* Set the output type to either color or mono */
-static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
-{
-	if (dcon->mono == enable_mono)
-		return 0;
-
-	dcon->mono = enable_mono;
-
-	if (enable_mono) {
-		dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
-		dcon->disp_mode |= MODE_MONO_LUMA;
-	} else {
-		dcon->disp_mode &= ~(MODE_MONO_LUMA);
-		dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
-	}
-
-	dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
-	return 0;
-}
-
-/* For now, this will be really stupid - we need to address how
- * DCONLOAD works in a sleep and account for it accordingly
- */
-
-static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
-{
-	int x;
-
-	/* Turn off the backlight and put the DCON to sleep */
-
-	if (dcon->asleep == sleep)
-		return;
-
-	if (!olpc_board_at_least(olpc_board(0xc2)))
-		return;
-
-	if (sleep) {
-		u8 pm = 0;
-
-		x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
-		if (x)
-			pr_warn("unable to force dcon to power down: %d!\n", x);
-		else
-			dcon->asleep = sleep;
-	} else {
-		/* Only re-enable the backlight if the backlight value is set */
-		if (dcon->bl_val != 0)
-			dcon->disp_mode |= MODE_BL_ENABLE;
-		x = dcon_bus_stabilize(dcon, 1);
-		if (x)
-			pr_warn("unable to reinit dcon hardware: %d!\n", x);
-		else
-			dcon->asleep = sleep;
-
-		/* Restore backlight */
-		dcon_set_backlight(dcon, dcon->bl_val);
-	}
-
-	/* We should turn off some stuff in the framebuffer - but what? */
-}
-
-/* the DCON seems to get confused if we change DCONLOAD too
- * frequently -- i.e., approximately faster than frame time.
- * normally we don't change it this fast, so in general we won't
- * delay here.
- */
-static void dcon_load_holdoff(struct dcon_priv *dcon)
-{
-	ktime_t delta_t, now;
-
-	while (1) {
-		now = ktime_get();
-		delta_t = ktime_sub(now, dcon->load_time);
-		if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
-			break;
-		mdelay(4);
-	}
-}
-
-static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
-{
-	int err;
-
-	console_lock();
-	if (!lock_fb_info(dcon->fbinfo)) {
-		console_unlock();
-		dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
-		return false;
-	}
-
-	dcon->ignore_fb_events = true;
-	err = fb_blank(dcon->fbinfo,
-			blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
-	dcon->ignore_fb_events = false;
-	unlock_fb_info(dcon->fbinfo);
-	console_unlock();
-
-	if (err) {
-		dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
-				blank ? "" : "un");
-		return false;
-	}
-	return true;
-}
-
-/* Set the source of the display (CPU or DCON) */
-static void dcon_source_switch(struct work_struct *work)
-{
-	struct dcon_priv *dcon = container_of(work, struct dcon_priv,
-			switch_source);
-	int source = dcon->pending_src;
-
-	if (dcon->curr_src == source)
-		return;
-
-	dcon_load_holdoff(dcon);
-
-	dcon->switched = false;
-
-	switch (source) {
-	case DCON_SOURCE_CPU:
-		pr_info("dcon_source_switch to CPU\n");
-		/* Enable the scanline interrupt bit */
-		if (dcon_write(dcon, DCON_REG_MODE,
-				dcon->disp_mode | MODE_SCAN_INT))
-			pr_err("couldn't enable scanline interrupt!\n");
-		else
-			/* Wait up to one second for the scanline interrupt */
-			wait_event_timeout(dcon->waitq, dcon->switched, HZ);
-
-		if (!dcon->switched)
-			pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
-
-		/* Turn off the scanline interrupt */
-		if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
-			pr_err("couldn't disable scanline interrupt!\n");
-
-		/*
-		 * Ideally we'd like to disable interrupts here so that the
-		 * fb unblanking and DCON turn on happen at a known time value;
-		 * however, we can't do that right now with fb_blank
-		 * messing with semaphores.
-		 *
-		 * For now, we just hope..
-		 */
-		if (!dcon_blank_fb(dcon, false)) {
-			pr_err("Failed to enter CPU mode\n");
-			dcon->pending_src = DCON_SOURCE_DCON;
-			return;
-		}
-
-		/* And turn off the DCON */
-		pdata->set_dconload(1);
-		dcon->load_time = ktime_get();
-
-		pr_info("The CPU has control\n");
-		break;
-	case DCON_SOURCE_DCON:
-	{
-		ktime_t delta_t;
-
-		pr_info("dcon_source_switch to DCON\n");
-
-		/* Clear DCONLOAD - this implies that the DCON is in control */
-		pdata->set_dconload(0);
-		dcon->load_time = ktime_get();
-
-		wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
-
-		if (!dcon->switched) {
-			pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
-		} else {
-			/* sometimes the DCON doesn't follow its own rules,
-			 * and doesn't wait for two vsync pulses before
-			 * ack'ing the frame load with an IRQ.  the result
-			 * is that the display shows the *previously*
-			 * loaded frame.  we can detect this by looking at
-			 * the time between asserting DCONLOAD and the IRQ --
-			 * if it's less than 20msec, then the DCON couldn't
-			 * have seen two VSYNC pulses.  in that case we
-			 * deassert and reassert, and hope for the best.
-			 * see http://dev.laptop.org/ticket/9664
-			 */
-			delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
-			if (dcon->switched && ktime_to_ns(delta_t)
-			    < NSEC_PER_MSEC * 20) {
-				pr_err("missed loading, retrying\n");
-				pdata->set_dconload(1);
-				mdelay(41);
-				pdata->set_dconload(0);
-				dcon->load_time = ktime_get();
-				mdelay(41);
-			}
-		}
-
-		dcon_blank_fb(dcon, true);
-		pr_info("The DCON has control\n");
-		break;
-	}
-	default:
-		BUG();
-	}
-
-	dcon->curr_src = source;
-}
-
-static void dcon_set_source(struct dcon_priv *dcon, int arg)
-{
-	if (dcon->pending_src == arg)
-		return;
-
-	dcon->pending_src = arg;
-
-	if (dcon->curr_src != arg)
-		schedule_work(&dcon->switch_source);
-}
-
-static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
-{
-	dcon_set_source(dcon, arg);
-	flush_scheduled_work();
-}
-
-static ssize_t dcon_mode_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	struct dcon_priv *dcon = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%4.4X\n", dcon->disp_mode);
-}
-
-static ssize_t dcon_sleep_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	struct dcon_priv *dcon = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%d\n", dcon->asleep);
-}
-
-static ssize_t dcon_freeze_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	struct dcon_priv *dcon = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
-}
-
-static ssize_t dcon_mono_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	struct dcon_priv *dcon = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%d\n", dcon->mono);
-}
-
-static ssize_t dcon_resumeline_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "%d\n", resumeline);
-}
-
-static ssize_t dcon_mono_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	unsigned long enable_mono;
-	int rc;
-
-	rc = kstrtoul(buf, 10, &enable_mono);
-	if (rc)
-		return rc;
-
-	dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
-
-	return count;
-}
-
-static ssize_t dcon_freeze_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct dcon_priv *dcon = dev_get_drvdata(dev);
-	unsigned long output;
-	int ret;
-
-	ret = kstrtoul(buf, 10, &output);
-	if (ret)
-		return ret;
-
-	pr_info("dcon_freeze_store: %lu\n", output);
-
-	switch (output) {
-	case 0:
-		dcon_set_source(dcon, DCON_SOURCE_CPU);
-		break;
-	case 1:
-		dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
-		break;
-	case 2:  /* normally unused */
-		dcon_set_source(dcon, DCON_SOURCE_DCON);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return count;
-}
-
-static ssize_t dcon_resumeline_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	unsigned short rl;
-	int rc;
-
-	rc = kstrtou16(buf, 10, &rl);
-	if (rc)
-		return rc;
-
-	resumeline = rl;
-	dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
-
-	return count;
-}
-
-static ssize_t dcon_sleep_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	unsigned long output;
-	int ret;
-
-	ret = kstrtoul(buf, 10, &output);
-	if (ret)
-		return ret;
-
-	dcon_sleep(dev_get_drvdata(dev), output ? true : false);
-	return count;
-}
-
-static struct device_attribute dcon_device_files[] = {
-	__ATTR(mode, 0444, dcon_mode_show, NULL),
-	__ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
-	__ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
-	__ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
-	__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
-};
-
-static int dcon_bl_update(struct backlight_device *dev)
-{
-	struct dcon_priv *dcon = bl_get_data(dev);
-	u8 level = dev->props.brightness & 0x0F;
-
-	if (dev->props.power != FB_BLANK_UNBLANK)
-		level = 0;
-
-	if (level != dcon->bl_val)
-		dcon_set_backlight(dcon, level);
-
-	/* power down the DCON when the screen is blanked */
-	if (!dcon->ignore_fb_events)
-		dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
-
-	return 0;
-}
-
-static int dcon_bl_get(struct backlight_device *dev)
-{
-	struct dcon_priv *dcon = bl_get_data(dev);
-
-	return dcon->bl_val;
-}
-
-static const struct backlight_ops dcon_bl_ops = {
-	.update_status = dcon_bl_update,
-	.get_brightness = dcon_bl_get,
-};
-
-static struct backlight_properties dcon_bl_props = {
-	.max_brightness = 15,
-	.type = BACKLIGHT_RAW,
-	.power = FB_BLANK_UNBLANK,
-};
-
-static int dcon_reboot_notify(struct notifier_block *nb,
-			      unsigned long foo, void *bar)
-{
-	struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
-
-	if (!dcon || !dcon->client)
-		return NOTIFY_DONE;
-
-	/* Turn off the DCON. Entirely. */
-	dcon_write(dcon, DCON_REG_MODE, 0x39);
-	dcon_write(dcon, DCON_REG_MODE, 0x32);
-	return NOTIFY_DONE;
-}
-
-static int unfreeze_on_panic(struct notifier_block *nb,
-			     unsigned long e, void *p)
-{
-	pdata->set_dconload(1);
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block dcon_panic_nb = {
-	.notifier_call = unfreeze_on_panic,
-};
-
-static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
-	strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
-
-	return 0;
-}
-
-static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
-	struct dcon_priv *dcon;
-	int rc, i, j;
-
-	if (!pdata)
-		return -ENXIO;
-
-	dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
-	if (!dcon)
-		return -ENOMEM;
-
-	dcon->client = client;
-	init_waitqueue_head(&dcon->waitq);
-	INIT_WORK(&dcon->switch_source, dcon_source_switch);
-	dcon->reboot_nb.notifier_call = dcon_reboot_notify;
-	dcon->reboot_nb.priority = -1;
-
-	i2c_set_clientdata(client, dcon);
-
-	if (num_registered_fb < 1) {
-		dev_err(&client->dev, "DCON driver requires a registered fb\n");
-		rc = -EIO;
-		goto einit;
-	}
-	dcon->fbinfo = registered_fb[0];
-
-	rc = dcon_hw_init(dcon, 1);
-	if (rc)
-		goto einit;
-
-	/* Add the DCON device */
-
-	dcon_device = platform_device_alloc("dcon", -1);
-
-	if (!dcon_device) {
-		pr_err("Unable to create the DCON device\n");
-		rc = -ENOMEM;
-		goto eirq;
-	}
-	rc = platform_device_add(dcon_device);
-	platform_set_drvdata(dcon_device, dcon);
-
-	if (rc) {
-		pr_err("Unable to add the DCON device\n");
-		goto edev;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
-		rc = device_create_file(&dcon_device->dev,
-					&dcon_device_files[i]);
-		if (rc) {
-			dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
-			goto ecreate;
-		}
-	}
-
-	dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
-
-	/* Add the backlight device for the DCON */
-	dcon_bl_props.brightness = dcon->bl_val;
-	dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
-		dcon, &dcon_bl_ops, &dcon_bl_props);
-	if (IS_ERR(dcon->bl_dev)) {
-		dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
-				PTR_ERR(dcon->bl_dev));
-		dcon->bl_dev = NULL;
-	}
-
-	register_reboot_notifier(&dcon->reboot_nb);
-	atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
-
-	return 0;
-
- ecreate:
-	for (j = 0; j < i; j++)
-		device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
- edev:
-	platform_device_unregister(dcon_device);
-	dcon_device = NULL;
- eirq:
-	free_irq(DCON_IRQ, dcon);
- einit:
-	kfree(dcon);
-	return rc;
-}
-
-static int dcon_remove(struct i2c_client *client)
-{
-	struct dcon_priv *dcon = i2c_get_clientdata(client);
-
-	unregister_reboot_notifier(&dcon->reboot_nb);
-	atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
-
-	free_irq(DCON_IRQ, dcon);
-
-	backlight_device_unregister(dcon->bl_dev);
-
-	if (dcon_device)
-		platform_device_unregister(dcon_device);
-	cancel_work_sync(&dcon->switch_source);
-
-	kfree(dcon);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int dcon_suspend(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct dcon_priv *dcon = i2c_get_clientdata(client);
-
-	if (!dcon->asleep) {
-		/* Set up the DCON to have the source */
-		dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
-	}
-
-	return 0;
-}
-
-static int dcon_resume(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct dcon_priv *dcon = i2c_get_clientdata(client);
-
-	if (!dcon->asleep) {
-		dcon_bus_stabilize(dcon, 0);
-		dcon_set_source(dcon, DCON_SOURCE_CPU);
-	}
-
-	return 0;
-}
-
-#else
-
-#define dcon_suspend NULL
-#define dcon_resume NULL
-
-#endif /* CONFIG_PM */
-
-irqreturn_t dcon_interrupt(int irq, void *id)
-{
-	struct dcon_priv *dcon = id;
-	u8 status;
-
-	if (pdata->read_status(&status))
-		return IRQ_NONE;
-
-	switch (status & 3) {
-	case 3:
-		pr_debug("DCONLOAD_MISSED interrupt\n");
-		break;
-
-	case 2:	/* switch to DCON mode */
-	case 1: /* switch to CPU mode */
-		dcon->switched = true;
-		dcon->irq_time = ktime_get();
-		wake_up(&dcon->waitq);
-		break;
-
-	case 0:
-		/* workaround resume case:  the DCON (on 1.5) doesn't
-		 * ever assert status 0x01 when switching to CPU mode
-		 * during resume.  this is because DCONLOAD is de-asserted
-		 * _immediately_ upon exiting S3, so the actual release
-		 * of the DCON happened long before this point.
-		 * see http://dev.laptop.org/ticket/9869
-		 */
-		if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
-			dcon->switched = true;
-			dcon->irq_time = ktime_get();
-			wake_up(&dcon->waitq);
-			pr_debug("switching w/ status 0/0\n");
-		} else {
-			pr_debug("scanline interrupt w/CPU\n");
-		}
-	}
-
-	return IRQ_HANDLED;
-}
-
-static const struct dev_pm_ops dcon_pm_ops = {
-	.suspend = dcon_suspend,
-	.resume = dcon_resume,
-};
-
-static const struct i2c_device_id dcon_idtable[] = {
-	{ "olpc_dcon",  0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, dcon_idtable);
-
-static struct i2c_driver dcon_driver = {
-	.driver = {
-		.name	= "olpc_dcon",
-		.pm = &dcon_pm_ops,
-	},
-	.class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
-	.id_table = dcon_idtable,
-	.probe = dcon_probe,
-	.remove = dcon_remove,
-	.detect = dcon_detect,
-	.address_list = normal_i2c,
-};
-
-static int __init olpc_dcon_init(void)
-{
-#ifdef CONFIG_FB_OLPC_DCON_1_5
-	/* XO-1.5 */
-	if (olpc_board_at_least(olpc_board(0xd0)))
-		pdata = &dcon_pdata_xo_1_5;
-#endif
-#ifdef CONFIG_FB_OLPC_DCON_1
-	if (!pdata)
-		pdata = &dcon_pdata_xo_1;
-#endif
-
-	return i2c_add_driver(&dcon_driver);
-}
-
-static void __exit olpc_dcon_exit(void)
-{
-	i2c_del_driver(&dcon_driver);
-}
-
-module_init(olpc_dcon_init);
-module_exit(olpc_dcon_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
deleted file mode 100644
index 215e7ec..0000000
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef OLPC_DCON_H_
-#define OLPC_DCON_H_
-
-#include <linux/notifier.h>
-#include <linux/workqueue.h>
-
-/* DCON registers */
-
-#define DCON_REG_ID		 0
-#define DCON_REG_MODE		 1
-
-#define MODE_PASSTHRU	(1<<0)
-#define MODE_SLEEP	(1<<1)
-#define MODE_SLEEP_AUTO	(1<<2)
-#define MODE_BL_ENABLE	(1<<3)
-#define MODE_BLANK	(1<<4)
-#define MODE_CSWIZZLE	(1<<5)
-#define MODE_COL_AA	(1<<6)
-#define MODE_MONO_LUMA	(1<<7)
-#define MODE_SCAN_INT	(1<<8)
-#define MODE_CLOCKDIV	(1<<9)
-#define MODE_DEBUG	(1<<14)
-#define MODE_SELFTEST	(1<<15)
-
-#define DCON_REG_HRES		0x2
-#define DCON_REG_HTOTAL		0x3
-#define DCON_REG_HSYNC_WIDTH	0x4
-#define DCON_REG_VRES		0x5
-#define DCON_REG_VTOTAL		0x6
-#define DCON_REG_VSYNC_WIDTH	0x7
-#define DCON_REG_TIMEOUT	0x8
-#define DCON_REG_SCAN_INT	0x9
-#define DCON_REG_BRIGHT		0xa
-#define DCON_REG_MEM_OPT_A	0x41
-#define DCON_REG_MEM_OPT_B	0x42
-
-/* Load Delay Locked Loop (DLL) settings for clock delay */
-#define MEM_DLL_CLOCK_DELAY	(1<<0)
-/* Memory controller power down function */
-#define MEM_POWER_DOWN		(1<<8)
-/* Memory controller software reset */
-#define MEM_SOFT_RESET		(1<<0)
-
-/* Status values */
-
-#define DCONSTAT_SCANINT	0
-#define DCONSTAT_SCANINT_DCON	1
-#define DCONSTAT_DISPLAYLOAD	2
-#define DCONSTAT_MISSED		3
-
-/* Source values */
-
-#define DCON_SOURCE_DCON        0
-#define DCON_SOURCE_CPU         1
-
-/* Interrupt */
-#define DCON_IRQ                6
-
-struct dcon_priv {
-	struct i2c_client *client;
-	struct fb_info *fbinfo;
-	struct backlight_device *bl_dev;
-
-	wait_queue_head_t waitq;
-	struct work_struct switch_source;
-	struct notifier_block reboot_nb;
-
-	/* Shadow register for the DCON_REG_MODE register */
-	u8 disp_mode;
-
-	/* The current backlight value - this saves us some smbus traffic */
-	u8 bl_val;
-
-	/* Current source, initialized at probe time */
-	int curr_src;
-
-	/* Desired source */
-	int pending_src;
-
-	/* Variables used during switches */
-	bool switched;
-	ktime_t irq_time;
-	ktime_t load_time;
-
-	/* Current output type; true == mono, false == color */
-	bool mono;
-	bool asleep;
-	/* This get set while controlling fb blank state from the driver */
-	bool ignore_fb_events;
-};
-
-struct dcon_platform_data {
-	int (*init)(struct dcon_priv *);
-	void (*bus_stabilize_wiggle)(void);
-	void (*set_dconload)(int);
-	int (*read_status)(u8 *);
-};
-
-#include <linux/interrupt.h>
-
-irqreturn_t dcon_interrupt(int irq, void *id);
-
-#ifdef CONFIG_FB_OLPC_DCON_1
-extern struct dcon_platform_data dcon_pdata_xo_1;
-#endif
-
-#ifdef CONFIG_FB_OLPC_DCON_1_5
-extern struct dcon_platform_data dcon_pdata_xo_1_5;
-#endif
-
-#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
deleted file mode 100644
index 0c5a10c..0000000
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
- *
- * Copyright © 2006-2007  Red Hat, Inc.
- * Copyright © 2006-2007  Advanced Micro Devices, Inc.
- * Copyright © 2009       VIA Technology, Inc.
- * Copyright (c) 2010  Andres Salomon <dilinger@queued.net>
- *
- * This program is free software.  You can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/cs5535.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <asm/olpc.h>
-
-#include "olpc_dcon.h"
-
-static int dcon_init_xo_1(struct dcon_priv *dcon)
-{
-	unsigned char lob;
-
-	if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
-		pr_err("failed to request STAT0 GPIO\n");
-		return -EIO;
-	}
-	if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
-		pr_err("failed to request STAT1 GPIO\n");
-		goto err_gp_stat1;
-	}
-	if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
-		pr_err("failed to request IRQ GPIO\n");
-		goto err_gp_irq;
-	}
-	if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
-		pr_err("failed to request LOAD GPIO\n");
-		goto err_gp_load;
-	}
-	if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
-		pr_err("failed to request BLANK GPIO\n");
-		goto err_gp_blank;
-	}
-
-	/* Turn off the event enable for GPIO7 just to be safe */
-	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
-
-	/*
-	 * Determine the current state by reading the GPIO bit; earlier
-	 * stages of the boot process have established the state.
-	 *
-	 * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
-	 * this is because OFW will disable input for the pin and set a value..
-	 * READ_BACK will only contain a valid value if input is enabled and
-	 * then a value is set.  So, future readings of the pin can use
-	 * READ_BACK, but the first one cannot.  Awesome, huh?
-	 */
-	dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
-		? DCON_SOURCE_CPU
-		: DCON_SOURCE_DCON;
-	dcon->pending_src = dcon->curr_src;
-
-	/* Set the directions for the GPIO pins */
-	gpio_direction_input(OLPC_GPIO_DCON_STAT0);
-	gpio_direction_input(OLPC_GPIO_DCON_STAT1);
-	gpio_direction_input(OLPC_GPIO_DCON_IRQ);
-	gpio_direction_input(OLPC_GPIO_DCON_BLANK);
-	gpio_direction_output(OLPC_GPIO_DCON_LOAD,
-			dcon->curr_src == DCON_SOURCE_CPU);
-
-	/* Set up the interrupt mappings */
-
-	/* Set the IRQ to pair 2 */
-	cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
-
-	/* Enable group 2 to trigger the DCON interrupt */
-	cs5535_gpio_set_irq(2, DCON_IRQ);
-
-	/* Select edge level for interrupt (in PIC) */
-	lob = inb(0x4d0);
-	lob &= ~(1 << DCON_IRQ);
-	outb(lob, 0x4d0);
-
-	/* Register the interrupt handler */
-	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
-		pr_err("failed to request DCON's irq\n");
-		goto err_req_irq;
-	}
-
-	/* Clear INV_EN for GPIO7 (DCONIRQ) */
-	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
-
-	/* Enable filter for GPIO12 (DCONBLANK) */
-	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
-
-	/* Disable filter for GPIO7 */
-	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
-
-	/* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
-	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
-
-	/* Add GPIO12 to the Filter Event Pair #7 */
-	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
-
-	/* Turn off negative Edge Enable for GPIO12 */
-	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
-
-	/* Enable negative Edge Enable for GPIO7 */
-	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
-
-	/* Zero the filter amount for Filter Event Pair #7 */
-	cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
-
-	/* Clear the negative edge status for GPIO7 and GPIO12 */
-	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
-	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
-
-	/* FIXME:  Clear the positive status as well, just to be sure */
-	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
-	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
-
-	/* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
-	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
-
-	return 0;
-
-err_req_irq:
-	gpio_free(OLPC_GPIO_DCON_BLANK);
-err_gp_blank:
-	gpio_free(OLPC_GPIO_DCON_LOAD);
-err_gp_load:
-	gpio_free(OLPC_GPIO_DCON_IRQ);
-err_gp_irq:
-	gpio_free(OLPC_GPIO_DCON_STAT1);
-err_gp_stat1:
-	gpio_free(OLPC_GPIO_DCON_STAT0);
-	return -EIO;
-}
-
-static void dcon_wiggle_xo_1(void)
-{
-	int x;
-
-	/*
-	 * According to HiMax, when powering the DCON up we should hold
-	 * SMB_DATA high for 8 SMB_CLK cycles.  This will force the DCON
-	 * state machine to reset to a (sane) initial state.  Mitch Bradley
-	 * did some testing and discovered that holding for 16 SMB_CLK cycles
-	 * worked a lot more reliably, so that's what we do here.
-	 *
-	 * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
-	 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
-	 * GPIO15.
-	 */
-	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
-	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
-	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
-	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
-	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
-	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
-	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
-	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
-	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
-
-	for (x = 0; x < 16; x++) {
-		udelay(5);
-		cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
-		udelay(5);
-		cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
-	}
-	udelay(5);
-	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
-	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
-	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
-}
-
-static void dcon_set_dconload_1(int val)
-{
-	gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
-}
-
-static int dcon_read_status_xo_1(u8 *status)
-{
-	*status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
-	*status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
-
-	/* Clear the negative edge status for GPIO7 */
-	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
-
-	return 0;
-}
-
-struct dcon_platform_data dcon_pdata_xo_1 = {
-	.init = dcon_init_xo_1,
-	.bus_stabilize_wiggle = dcon_wiggle_xo_1,
-	.set_dconload = dcon_set_dconload_1,
-	.read_status = dcon_read_status_xo_1,
-};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
deleted file mode 100644
index 6a4d379..0000000
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2009,2010       One Laptop per Child
- *
- * This program is free software.  You can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <asm/olpc.h>
-
-/* TODO: this eventually belongs in linux/vx855.h */
-#define NR_VX855_GPI    14
-#define NR_VX855_GPO    13
-#define NR_VX855_GPIO   15
-
-#define VX855_GPI(n)    (n)
-#define VX855_GPO(n)    (NR_VX855_GPI + (n))
-#define VX855_GPIO(n)   (NR_VX855_GPI + NR_VX855_GPO + (n))
-
-#include "olpc_dcon.h"
-
-/* Hardware setup on the XO 1.5:
- *	DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
- *	DCONBLANK connects to VX855_GPIO8 (not SSPICLK)  unused in driver
- *	DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
- *	DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
- *	DCONIRQ connects to VX855_GPIO12
- *	DCONSMBDATA connects to VX855 graphics CRTSPD
- *	DCONSMBCLK connects to VX855 graphics CRTSPCLK
- */
-
-#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
-#define VX855_GPI_STATUS_CHG 0x450  /* PMIO_Rx50 */
-#define VX855_GPI_SCI_SMI 0x452  /* PMIO_Rx52 */
-#define BIT_GPIO12 0x40
-
-#define PREFIX "OLPC DCON:"
-
-static void dcon_clear_irq(void)
-{
-	/* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
-	outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
-}
-
-static int dcon_was_irq(void)
-{
-	u_int8_t tmp;
-
-	/* irq status will appear in PMIO_Rx50[6] on gpio12 */
-	tmp = inb(VX855_GPI_STATUS_CHG);
-	return !!(tmp & BIT_GPIO12);
-
-	return 0;
-}
-
-static int dcon_init_xo_1_5(struct dcon_priv *dcon)
-{
-	unsigned int irq;
-
-	dcon_clear_irq();
-
-	/* set   PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
-	outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
-
-	/* Determine the current state of DCONLOAD, likely set by firmware */
-	/* GPIO1 */
-	dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
-			DCON_SOURCE_CPU : DCON_SOURCE_DCON;
-	dcon->pending_src = dcon->curr_src;
-
-	/* we're sharing the IRQ with ACPI */
-	irq = acpi_gbl_FADT.sci_interrupt;
-	if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
-		pr_err("DCON (IRQ%d) allocation failed\n", irq);
-		return 1;
-	}
-
-	return 0;
-}
-
-static void set_i2c_line(int sda, int scl)
-{
-	unsigned char tmp;
-	unsigned int port = 0x26;
-
-	/* FIXME: This directly accesses the CRT GPIO controller !!! */
-	outb(port, 0x3c4);
-	tmp = inb(0x3c5);
-
-	if (scl)
-		tmp |= 0x20;
-	else
-		tmp &= ~0x20;
-
-	if (sda)
-		tmp |= 0x10;
-	else
-		tmp &= ~0x10;
-
-	tmp |= 0x01;
-
-	outb(port, 0x3c4);
-	outb(tmp, 0x3c5);
-}
-
-
-static void dcon_wiggle_xo_1_5(void)
-{
-	int x;
-
-	/*
-	 * According to HiMax, when powering the DCON up we should hold
-	 * SMB_DATA high for 8 SMB_CLK cycles.  This will force the DCON
-	 * state machine to reset to a (sane) initial state.  Mitch Bradley
-	 * did some testing and discovered that holding for 16 SMB_CLK cycles
-	 * worked a lot more reliably, so that's what we do here.
-	 */
-	set_i2c_line(1, 1);
-
-	for (x = 0; x < 16; x++) {
-		udelay(5);
-		set_i2c_line(1, 0);
-		udelay(5);
-		set_i2c_line(1, 1);
-	}
-	udelay(5);
-
-	/* set   PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
-	outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
-}
-
-static void dcon_set_dconload_xo_1_5(int val)
-{
-	gpio_set_value(VX855_GPIO(1), val);
-}
-
-static int dcon_read_status_xo_1_5(u8 *status)
-{
-	if (!dcon_was_irq())
-		return -1;
-
-	/* i believe this is the same as "inb(0x44b) & 3" */
-	*status = gpio_get_value(VX855_GPI(10));
-	*status |= gpio_get_value(VX855_GPI(11)) << 1;
-
-	dcon_clear_irq();
-
-	return 0;
-}
-
-struct dcon_platform_data dcon_pdata_xo_1_5 = {
-	.init = dcon_init_xo_1_5,
-	.bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
-	.set_dconload = dcon_set_dconload_xo_1_5,
-	.read_status = dcon_read_status_xo_1_5,
-};
diff --git a/drivers/staging/panel/Kconfig b/drivers/staging/panel/Kconfig
deleted file mode 100644
index 3defa01..0000000
--- a/drivers/staging/panel/Kconfig
+++ /dev/null
@@ -1,278 +0,0 @@
-config PANEL
-	tristate "Parallel port LCD/Keypad Panel support"
-	depends on PARPORT
-	---help---
-	  Say Y here if you have an HD44780 or KS-0074 LCD connected to your
-	  parallel port. This driver also features 4 and 6-key keypads. The LCD
-	  is accessible through the /dev/lcd char device (10, 156), and the
-	  keypad through /dev/keypad (10, 185). Both require misc device to be
-	  enabled. This code can either be compiled as a module, or linked into
-	  the kernel and started at boot. If you don't understand what all this
-	  is about, say N.
-
-config PANEL_PARPORT
-	int "Default parallel port number (0=LPT1)"
-	depends on PANEL
-	range 0 255
-	default "0"
-	---help---
-	  This is the index of the parallel port the panel is connected to. One
-	  driver instance only supports one parallel port, so if your keypad
-	  and LCD are connected to two separate ports, you have to start two
-	  modules with different arguments. Numbering starts with '0' for LPT1,
-	  and so on.
-
-config PANEL_PROFILE
-	int "Default panel profile (0-5, 0=custom)"
-	depends on PANEL
-	range 0 5
-	default "5"
-	---help---
-	  To ease configuration, the driver supports different configuration
-	  profiles for past and recent wirings. These profiles can also be
-	  used to define an approximative configuration, completed by a few
-	  other options. Here are the profiles :
-
-	    0 = custom (see further)
-	    1 = 2x16 parallel LCD, old keypad
-	    2 = 2x16 serial LCD (KS-0074), new keypad
-	    3 = 2x16 parallel LCD (Hantronix), no keypad
-	    4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
-	    5 = 2x40 parallel LCD (old one), with old keypad
-
-	  Custom configurations allow you to define how your display is
-	  wired to the parallel port, and how it works. This is only intended
-	  for experts.
-
-config PANEL_KEYPAD
-	depends on PANEL && PANEL_PROFILE="0"
-	int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
-	range 0 3
-	default 0
-	---help---
-	  This enables and configures a keypad connected to the parallel port.
-	  The keys will be read from character device 10,185. Valid values are :
-
-	    0 : do not enable this driver
-	    1 : old 6 keys keypad
-	    2 : new 6 keys keypad, as used on the server at www.ant-computing.com
-	    3 : Nexcom NSA1045's 4 keys keypad
-
-	  New profiles can be described in the driver source. The driver also
-	  supports simultaneous keys pressed when the keypad supports them.
-
-config PANEL_LCD
-	depends on PANEL && PANEL_PROFILE="0"
-	int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
-	range 0 5
-	default 0
-	---help---
-	   This enables and configures an LCD connected to the parallel port.
-	   The driver includes an interpreter for escape codes starting with
-	   '\e[L' which are specific to the LCD, and a few ANSI codes. The
-	   driver will be registered as character device 10,156, usually
-	   under the name '/dev/lcd'. There are a total of 6 supported types :
-
-	     0 : do not enable the driver
-	     1 : custom configuration and wiring (see further)
-	     2 : 2x16 & 2x40 parallel LCD (old wiring)
-	     3 : 2x16 serial LCD (KS-0074 based)
-	     4 : 2x16 parallel LCD (Hantronix wiring)
-	     5 : 2x16 parallel LCD (Nexcom wiring)
-
-	   When type '1' is specified, other options will appear to configure
-	   more precise aspects (wiring, dimensions, protocol, ...). Please note
-	   that those values changed from the 2.4 driver for better consistency.
-
-config PANEL_LCD_HEIGHT
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-	int "Number of lines on the LCD (1-2)"
-	range 1 2
-	default 2
-	---help---
-	  This is the number of visible character lines on the LCD in custom profile.
-	  It can either be 1 or 2.
-
-config PANEL_LCD_WIDTH
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-	int "Number of characters per line on the LCD (1-40)"
-	range 1 40
-	default 40
-	---help---
-	  This is the number of characters per line on the LCD in custom profile.
-	  Common values are 16,20,24,40.
-
-config PANEL_LCD_BWIDTH
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-	int "Internal LCD line width (1-40, 40 by default)"
-	range 1 40
-	default 40
-	---help---
-	  Most LCDs use a standard controller which supports hardware lines of 40
-	  characters, although sometimes only 16, 20 or 24 of them are really wired
-	  to the terminal. This results in some non-visible but addressable characters,
-	  and is the case for most parallel LCDs. Other LCDs, and some serial ones,
-	  however, use the same line width internally as what is visible. The KS0074
-	  for example, uses 16 characters per line for 16 visible characters per line.
-
-	  This option lets you configure the value used by your LCD in 'custom' profile.
-	  If you don't know, put '40' here.
-
-config PANEL_LCD_HWIDTH
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-	int "Hardware LCD line width (1-64, 64 by default)"
-	range 1 64
-	default 64
-	---help---
-	  Most LCDs use a single address bit to differentiate line 0 and line 1. Since
-	  some of them need to be able to address 40 chars with the lower bits, they
-	  often use the immediately superior power of 2, which is 64, to address the
-	  next line.
-
-	  If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
-	  64 here for a 2x40.
-
-config PANEL_LCD_CHARSET
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-	int "LCD character set (0=normal, 1=KS0074)"
-	range 0 1
-	default 0
-	---help---
-	  Some controllers such as the KS0074 use a somewhat strange character set
-	  where many symbols are at unusual places. The driver knows how to map
-	  'standard' ASCII characters to the character sets used by these controllers.
-	  Valid values are :
-
-	     0 : normal (untranslated) character set
-	     1 : KS0074 character set
-
-	  If you don't know, use the normal one (0).
-
-config PANEL_LCD_PROTO
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-	int "LCD communication mode (0=parallel 8 bits, 1=serial)"
-	range 0 1
-	default 0
-	---help---
-	  This driver now supports any serial or parallel LCD wired to a parallel
-	  port. But before assigning signals, the driver needs to know if it will
-	  be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
-	  (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
-	  (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
-	  parallel LCD, and 1 for a serial LCD.
-
-config PANEL_LCD_PIN_E
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
-	range -17 17
-	default 14
-	---help---
-	  This describes the number of the parallel port pin to which the LCD 'E'
-	  signal has been connected. It can be :
-
-	          0 : no connection (eg: connected to ground)
-	      1..17 : directly connected to any of these pins on the DB25 plug
-	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
-	  Default for the 'E' pin in custom profile is '14' (AUTOFEED).
-
-config PANEL_LCD_PIN_RS
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
-	range -17 17
-	default 17
-	---help---
-	  This describes the number of the parallel port pin to which the LCD 'RS'
-	  signal has been connected. It can be :
-
-	          0 : no connection (eg: connected to ground)
-	      1..17 : directly connected to any of these pins on the DB25 plug
-	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
-	  Default for the 'RS' pin in custom profile is '17' (SELECT IN).
-
-config PANEL_LCD_PIN_RW
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
-	range -17 17
-	default 16
-	---help---
-	  This describes the number of the parallel port pin to which the LCD 'RW'
-	  signal has been connected. It can be :
-
-	          0 : no connection (eg: connected to ground)
-	      1..17 : directly connected to any of these pins on the DB25 plug
-	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
-	  Default for the 'RW' pin in custom profile is '16' (INIT).
-
-config PANEL_LCD_PIN_SCL
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
-	range -17 17
-	default 1
-	---help---
-	  This describes the number of the parallel port pin to which the serial
-	  LCD 'SCL' signal has been connected. It can be :
-
-	          0 : no connection (eg: connected to ground)
-	      1..17 : directly connected to any of these pins on the DB25 plug
-	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
-	  Default for the 'SCL' pin in custom profile is '1' (STROBE).
-
-config PANEL_LCD_PIN_SDA
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
-	range -17 17
-	default 2
-	---help---
-	  This describes the number of the parallel port pin to which the serial
-	  LCD 'SDA' signal has been connected. It can be :
-
-	          0 : no connection (eg: connected to ground)
-	      1..17 : directly connected to any of these pins on the DB25 plug
-	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
-	  Default for the 'SDA' pin in custom profile is '2' (D0).
-
-config PANEL_LCD_PIN_BL
-	depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
-        int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
-	range -17 17
-	default 0
-	---help---
-	  This describes the number of the parallel port pin to which the LCD 'BL' signal
-          has been connected. It can be :
-
-	          0 : no connection (eg: connected to ground)
-	      1..17 : directly connected to any of these pins on the DB25 plug
-	    -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
-	  Default for the 'BL' pin in custom profile is '0' (uncontrolled).
-
-config PANEL_CHANGE_MESSAGE
-	depends on PANEL
-	bool "Change LCD initialization message ?"
-	default "n"
-	---help---
-	  This allows you to replace the boot message indicating the kernel version
-	  and the driver version with a custom message. This is useful on appliances
-	  where a simple 'Starting system' message can be enough to stop a customer
-	  from worrying.
-
-	  If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
-	  say 'N' and keep the default message with the version.
-
-config PANEL_BOOT_MESSAGE
-	depends on PANEL && PANEL_CHANGE_MESSAGE="y"
-	string "New initialization message"
-	default ""
-	---help---
-	  This allows you to replace the boot message indicating the kernel version
-	  and the driver version with a custom message. This is useful on appliances
-	  where a simple 'Starting system' message can be enough to stop a customer
-	  from worrying.
-
-	  An empty message will only clear the display at driver init time. Any other
-	  printf()-formatted message is valid with newline and escape codes.
diff --git a/drivers/staging/panel/Makefile b/drivers/staging/panel/Makefile
deleted file mode 100644
index 747c238..0000000
--- a/drivers/staging/panel/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_PANEL)		+= panel.o
diff --git a/drivers/staging/panel/TODO b/drivers/staging/panel/TODO
deleted file mode 100644
index 2db3f99..0000000
--- a/drivers/staging/panel/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-TODO:
-	- checkpatch.pl cleanups
-	- review major/minor usages
-	- review userspace api
-	- see if all of this could be easier done in userspace instead.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Willy Tarreau <willy@meta-x.org>
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
index bbe5ad8..02ba78f 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -13537,7 +13537,6 @@
 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
 	/*
 	 * Enable send-side J_KEY integrity check, unless this is A0 h/w
-	 * (due to A0 erratum).
 	 */
 	if (!is_ax(dd)) {
 		reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h
index 701e9e1..014d7a6 100644
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ b/drivers/staging/rdma/hfi1/chip_registers.h
@@ -551,6 +551,17 @@
 #define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008)
 #define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull
 #define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400)
+#define CCE_PCIE_CTRL (CCE + 0x0000000000C0)
+#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK 0x3ull
+#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT 0
+#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK 0xFull
+#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT 2
+#define CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT 8
+#define CCE_PCIE_CTRL_XMT_MARGIN_SHIFT 9
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK 0x1ull
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT 12
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK 0x7ull
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT 13
 #define CCE_REVISION (CCE + 0x000000000000)
 #define CCE_REVISION2 (CCE + 0x000000000008)
 #define CCE_REVISION2_HFI_ID_MASK 0x1ull
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
index 0c88317..e41159f 100644
--- a/drivers/staging/rdma/hfi1/diag.c
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -257,7 +257,7 @@
 static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
 static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
 
-static struct hfi1_filter_array hfi1_filters[] = {
+static const struct hfi1_filter_array hfi1_filters[] = {
 	{ hfi1_filter_lid },
 	{ hfi1_filter_dlid },
 	{ hfi1_filter_mad_mgmt_class },
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c
index 8485de1..32185206 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/staging/rdma/hfi1/driver.c
@@ -368,7 +368,7 @@
 		if (opcode == IB_OPCODE_CNP) {
 			/*
 			 * Only in pre-B0 h/w is the CNP_OPCODE handled
-			 * via this code path (errata 291394).
+			 * via this code path.
 			 */
 			struct hfi1_qp *qp = NULL;
 			u32 lqpn, rqpn;
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c
index 7dc5bae..47dfe25 100644
--- a/drivers/staging/rdma/hfi1/efivar.c
+++ b/drivers/staging/rdma/hfi1/efivar.c
@@ -83,8 +83,7 @@
 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
 		return -EOPNOTSUPP;
 
-	uni_name = kzalloc(sizeof(efi_char16_t) * (strlen(name) + 1),
-			   GFP_KERNEL);
+	uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL);
 	temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL);
 
 	if (!uni_name || !temp_buffer) {
@@ -128,13 +127,12 @@
 	 * temporary buffer.  Now allocate a correctly sized
 	 * buffer.
 	 */
-	data = kmalloc(temp_size, GFP_KERNEL);
+	data = kmemdup(temp_buffer, temp_size, GFP_KERNEL);
 	if (!data) {
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	memcpy(data, temp_buffer, temp_size);
 	*size = temp_size;
 	*return_data = data;
 
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h
index 2611bb2..d4826a9 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/staging/rdma/hfi1/hfi.h
@@ -1730,7 +1730,7 @@
 		base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
 
 	if (is_ax(dd))
-		/* turn off send-side job key checks - A0 erratum */
+		/* turn off send-side job key checks - A0 */
 		return base_sc_integrity &
 		       ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
 	return base_sc_integrity;
@@ -1757,7 +1757,7 @@
 	| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
 
 	if (is_ax(dd))
-		/* turn off send-side job key checks - A0 erratum */
+		/* turn off send-side job key checks - A0 */
 		return base_sdma_integrity &
 		       ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
 	return base_sdma_integrity;
@@ -1794,6 +1794,10 @@
 	dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
 			get_unit_name((dd)->unit), ##__VA_ARGS__)
 
+#define dd_dev_dbg(dd, fmt, ...) \
+	dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
+		get_unit_name((dd)->unit), ##__VA_ARGS__)
+
 #define hfi1_dev_porterr(dd, port, fmt, ...) \
 	dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
 			get_unit_name((dd)->unit), (dd)->unit, (port), \
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
index 4f5dbd1..bee1c0e 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -2279,17 +2279,23 @@
 {
 	if (!is_bx(ppd->dd)) {
 		unsigned long vl;
-		u64 max_vl_xmit_wait = 0, tmp;
+		u64 sum_vl_xmit_wait = 0;
 		u32 vl_all_mask = VL_MASK_ALL;
 
 		for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
 				 8 * sizeof(vl_all_mask)) {
-			tmp = read_port_cntr(ppd, C_TX_WAIT_VL,
-					     idx_from_vl(vl));
-			if (tmp > max_vl_xmit_wait)
-				max_vl_xmit_wait = tmp;
+			u64 tmp = sum_vl_xmit_wait +
+				  read_port_cntr(ppd, C_TX_WAIT_VL,
+						 idx_from_vl(vl));
+			if (tmp < sum_vl_xmit_wait) {
+				/* we wrapped */
+				sum_vl_xmit_wait = (u64)~0;
+				break;
+			}
+			sum_vl_xmit_wait = tmp;
 		}
-		rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait);
+		if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
+			rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
 	}
 }
 
@@ -2491,18 +2497,19 @@
 	return error_counter_summary;
 }
 
-static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
 			    u32 vl_select_mask)
 {
-	if (!is_bx(dd)) {
+	if (!is_bx(ppd->dd)) {
 		unsigned long vl;
-		int vfi = 0;
 		u64 sum_vl_xmit_wait = 0;
+		u32 vl_all_mask = VL_MASK_ALL;
 
-		for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-				8 * sizeof(vl_select_mask)) {
+		for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+				 8 * sizeof(vl_all_mask)) {
 			u64 tmp = sum_vl_xmit_wait +
-				be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait);
+				  read_port_cntr(ppd, C_TX_WAIT_VL,
+						 idx_from_vl(vl));
 			if (tmp < sum_vl_xmit_wait) {
 				/* we wrapped */
 				sum_vl_xmit_wait = (u64) ~0;
@@ -2665,7 +2672,7 @@
 		vfi++;
 	}
 
-	a0_datacounters(dd, rsp, vl_select_mask);
+	a0_datacounters(ppd, rsp, vl_select_mask);
 
 	if (resp_len)
 		*resp_len += response_data_size;
diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c
index a3f8b88..3825321 100644
--- a/drivers/staging/rdma/hfi1/mr.c
+++ b/drivers/staging/rdma/hfi1/mr.c
@@ -70,7 +70,7 @@
 	int m, i = 0;
 	int rval = 0;
 
-	m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+	m = DIV_ROUND_UP(count, HFI1_SEGSZ);
 	for (; i < m; i++) {
 		mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
 		if (!mr->map[i])
@@ -159,7 +159,7 @@
 	int m;
 
 	/* Allocate struct plus pointers to first level page tables. */
-	m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+	m = DIV_ROUND_UP(count, HFI1_SEGSZ);
 	mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
 	if (!mr)
 		goto bail;
@@ -333,7 +333,7 @@
 	int rval = -ENOMEM;
 
 	/* Allocate struct plus pointers to first level page tables. */
-	m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+	m = DIV_ROUND_UP(fmr_attr->max_pages, HFI1_SEGSZ);
 	fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
 	if (!fmr)
 		goto bail;
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c
index 8317b07..b2f553d8 100644
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ b/drivers/staging/rdma/hfi1/pcie.c
@@ -867,6 +867,83 @@
 }
 
 /*
+ * CCE_PCIE_CTRL long name helpers
+ * We redefine these shorter macros to use in the code while leaving
+ * chip_registers.h to be autogenerated from the hardware spec.
+ */
+#define LANE_BUNDLE_MASK              CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK
+#define LANE_BUNDLE_SHIFT             CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT
+#define LANE_DELAY_MASK               CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK
+#define LANE_DELAY_SHIFT              CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT
+#define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT
+#define MARGIN_SHIFT                  CCE_PCIE_CTRL_XMT_MARGIN_SHIFT
+#define MARGIN_G1_G2_OVERWRITE_MASK   CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK
+#define MARGIN_G1_G2_OVERWRITE_SHIFT  CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT
+#define MARGIN_GEN1_GEN2_MASK         CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK
+#define MARGIN_GEN1_GEN2_SHIFT        CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT
+
+ /*
+  * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C).
+  */
+static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
+{
+	u64 pcie_ctrl;
+	u64 xmt_margin;
+	u64 xmt_margin_oe;
+	u64 lane_delay;
+	u64 lane_bundle;
+
+	pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
+
+	/*
+	 * For Discrete, use full-swing.
+	 *  - PCIe TX defaults to full-swing.
+	 *    Leave this register as default.
+	 * For Integrated, use half-swing
+	 *  - Copy xmt_margin and xmt_margin_oe
+	 *    from Gen1/Gen2 to Gen3.
+	 */
+	if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */
+		/* extract initial fields */
+		xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT)
+			      & MARGIN_GEN1_GEN2_MASK;
+		xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT)
+				 & MARGIN_G1_G2_OVERWRITE_MASK;
+		lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK;
+		lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT)
+			       & LANE_BUNDLE_MASK;
+
+		/*
+		 * For A0, EFUSE values are not set.  Override with the
+		 * correct values.
+		 */
+		if (is_ax(dd)) {
+			/*
+			 * xmt_margin and OverwiteEnabel should be the
+			 * same for Gen1/Gen2 and Gen3
+			 */
+			xmt_margin = 0x5;
+			xmt_margin_oe = 0x1;
+			lane_delay = 0xF; /* Delay 240ns. */
+			lane_bundle = 0x0; /* Set to 1 lane. */
+		}
+
+		/* overwrite existing values */
+		pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT)
+			| (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT)
+			| (xmt_margin << MARGIN_SHIFT)
+			| (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT)
+			| (lane_delay << LANE_DELAY_SHIFT)
+			| (lane_bundle << LANE_BUNDLE_SHIFT);
+
+		write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
+	}
+
+	dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
+		   fname, pcie_ctrl);
+}
+
+/*
  * Do all the steps needed to transition the PCIe link to Gen3 speed.
  */
 int do_pcie_gen3_transition(struct hfi1_devdata *dd)
@@ -986,7 +1063,7 @@
 	 * PcieCfgRegPl100 - Gen3 Control
 	 *
 	 * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl
-	 * turn on PcieCfgRegPl100.EqEieosCnt (erratum)
+	 * turn on PcieCfgRegPl100.EqEieosCnt
 	 * Everything else zero.
 	 */
 	reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
@@ -1064,11 +1141,8 @@
 
 	/*
 	 * step 5d: program XMT margin
-	 * Right now, leave the default alone.  To change, do a
-	 * read-modify-write of:
-	 *	CcePcieCtrl.XmtMargin
-	 *	CcePcieCtrl.XmitMarginOverwriteEnable
 	 */
+	write_xmt_margin(dd, __func__);
 
 	/* step 5e: disable active state power management (ASPM) */
 	dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
index bd1b402..25e6053 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -671,7 +671,7 @@
 	if (unlikely(bth1 & HFI1_BECN_SMASK)) {
 		/*
 		 * In pre-B0 h/w the CNP_OPCODE is handled via an
-		 * error path (errata 291394).
+		 * error path.
 		 */
 		struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 		u32 lqpn =  be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index d3de771..2f48419 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -67,7 +67,6 @@
 #include "hfi.h"
 #include "sdma.h"
 #include "user_sdma.h"
-#include "sdma.h"
 #include "verbs.h"  /* for the headers */
 #include "common.h" /* for struct hfi1_tid_info */
 #include "trace.h"
@@ -468,8 +467,7 @@
 		fd->pq = NULL;
 	}
 	if (fd->cq) {
-		if (fd->cq->comps)
-			vfree(fd->cq->comps);
+		vfree(fd->cq->comps);
 		kfree(fd->cq);
 		fd->cq = NULL;
 	}
@@ -926,8 +924,8 @@
 			unsigned pageidx, len;
 
 			base = (unsigned long)iovec->iov.iov_base;
-			offset = ((base + iovec->offset + iov_offset) &
-				  ~PAGE_MASK);
+			offset = offset_in_page(base + iovec->offset +
+						iov_offset);
 			pageidx = (((iovec->offset + iov_offset +
 				     base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
 			len = offset + req->info.fragsize > PAGE_SIZE ?
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index ed72358..29b9834 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -53,4 +53,4 @@
 
 obj-$(CONFIG_R8188EU)	:= r8188eu.o
 
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include
+ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
index b574b23..ce60f07 100644
--- a/drivers/staging/rtl8188eu/TODO
+++ b/drivers/staging/rtl8188eu/TODO
@@ -15,5 +15,5 @@
 	rcu_read_unlock();
   Perhaps delete it, perhaps assign to some local variable.
 
-Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
 and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index e5d29fe..79d326a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -569,7 +569,7 @@
 
 		psta->ieee8021x_blocked = 0;
 
-		memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+		memset(&psta->sta_stats, 0, sizeof(struct stainfo_stats));
 
 		/* prepare for add_RATid */
 		supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
@@ -692,7 +692,7 @@
 
 	/* todo: init other variables */
 
-	memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+	memset(&psta->sta_stats, 0, sizeof(struct stainfo_stats));
 
 	spin_lock_bh(&psta->lock);
 	psta->state |= _FW_LINKED;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 433b926..1de7929 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -69,23 +69,17 @@
 	return _SUCCESS;
 }
 
-struct	cmd_obj	*rtw_dequeue_cmd(struct __queue *queue)
+struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue)
 {
 	unsigned long irqL;
 	struct cmd_obj *obj;
 
-
 	spin_lock_irqsave(&queue->lock, irqL);
-	if (list_empty(&(queue->queue))) {
-		obj = NULL;
-	} else {
-		obj = container_of((&queue->queue)->next, struct cmd_obj, list);
+	obj = list_first_entry_or_null(&queue->queue, struct cmd_obj, list);
+	if (obj)
 		list_del_init(&obj->list);
-	}
-
 	spin_unlock_irqrestore(&queue->lock, irqL);
 
-
 	return obj;
 }
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 2c4afb8..93e898d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -149,7 +149,7 @@
 {
 	struct net_device *dev = data;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
 	int len = 0;
 
@@ -184,7 +184,7 @@
 	struct net_device *dev = data;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
 	struct mlme_ext_priv	*pmlmeext = &padapter->mlmeextpriv;
-	struct mlme_ext_info	*pmlmeinfo = &(pmlmeext->mlmext_info);
+	struct mlme_ext_info	*pmlmeinfo = &pmlmeext->mlmext_info;
 
 	int len = 0;
 
@@ -200,7 +200,7 @@
 {
 	struct net_device *dev = data;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
 	int len = 0;
 
@@ -216,7 +216,7 @@
 {
 	struct net_device *dev = data;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
 	int len = 0;
 
@@ -247,9 +247,9 @@
 	struct sta_info *psta;
 	struct net_device *dev = data;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-	struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+	struct wlan_network *cur_network = &pmlmepriv->cur_network;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	int len = 0;
 
@@ -851,7 +851,7 @@
 	spin_lock_bh(&pstapriv->sta_hash_lock);
 
 	for (i = 0; i < NUM_STA; i++) {
-		phead = &(pstapriv->sta_hash[i]);
+		phead = &pstapriv->sta_hash[i];
 		plist = phead->next;
 
 		while (phead != plist) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 2320fb1..fd8cf47 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -394,7 +394,7 @@
 	u8 badworden = 0x0F;
 	u8 tmpdata[8];
 
-	memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE);
+	memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
 
 	if (!(word_en & BIT(0))) {
 		tmpaddr = start_addr;
@@ -500,8 +500,8 @@
 	if (offset > max_section)
 		return false;
 
-	memset((void *)data, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
-	memset((void *)tmpdata, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
+	memset(data, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
+	memset(tmpdata, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
 
 	/*  <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
 	/*  Skip dummy parts to prevent unexpected data read from Efuse. */
@@ -572,7 +572,7 @@
 	u16	efuse_addr = *pAddr;
 	u32	PgWriteSuccess = 0;
 
-	memset((void *)originaldata, 0xff, 8);
+	memset(originaldata, 0xff, 8);
 
 	if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata)) {
 		/* check if data exist */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 742b29c..aa7f571 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -935,8 +935,8 @@
 		}
 		break;
 	default:
-		DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n",
-			pos[0], pos[1], pos[2], (unsigned long)elen);
+		DBG_88E("unknown vendor specific information element ignored (vendor OUI %3phC len=%lu)\n",
+			pos, (unsigned long)elen);
 		return -1;
 	}
 	return 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_iol.c b/drivers/staging/rtl8188eu/core/rtw_iol.c
index cdcf0ea..2e2145ca 100644
--- a/drivers/staging/rtl8188eu/core/rtw_iol.c
+++ b/drivers/staging/rtl8188eu/core/rtw_iol.c
@@ -11,21 +11,18 @@
  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  *
  ******************************************************************************/
 
-#include<rtw_iol.h>
+#include <rtw_iol.h>
 
-bool rtw_IOL_applied(struct adapter  *adapter)
+bool rtw_IOL_applied(struct adapter *adapter)
 {
-	if (1 == adapter->registrypriv.fw_iol)
+	if (adapter->registrypriv.fw_iol == 1)
 		return true;
 
-	if ((2 == adapter->registrypriv.fw_iol) && (!adapter_to_dvobj(adapter)->ishighspeed))
+	if ((adapter->registrypriv.fw_iol == 2) &&
+	    (!adapter_to_dvobj(adapter)->ishighspeed))
 		return true;
 	return false;
 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index abab854..a645a62 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -122,31 +122,26 @@
 {
 	rtw_free_mlme_priv_ie_data(pmlmepriv);
 
-	if (pmlmepriv) {
-		if (pmlmepriv->free_bss_buf)
-			vfree(pmlmepriv->free_bss_buf);
-	}
+	if (pmlmepriv)
+		vfree(pmlmepriv->free_bss_buf);
 }
 
-struct	wlan_network *_rtw_alloc_network(struct	mlme_priv *pmlmepriv)/* _queue *free_queue) */
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
+					/* _queue *free_queue) */
 {
-	struct	wlan_network	*pnetwork;
+	struct wlan_network *pnetwork;
 	struct __queue *free_queue = &pmlmepriv->free_bss_pool;
-	struct list_head *plist = NULL;
 
 	spin_lock_bh(&free_queue->lock);
-
-	if (list_empty(&free_queue->queue)) {
-		pnetwork = NULL;
+	pnetwork = list_first_entry_or_null(&free_queue->queue,
+					    struct wlan_network, list);
+	if (!pnetwork)
 		goto exit;
-	}
-	plist = free_queue->queue.next;
-
-	pnetwork = container_of(plist, struct wlan_network, list);
 
 	list_del_init(&pnetwork->list);
 
-	RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist));
+	RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+		 ("_rtw_alloc_network: ptr=%p\n", &pnetwork->list));
 	pnetwork->network_type = 0;
 	pnetwork->fixed = false;
 	pnetwork->last_scanned = jiffies;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 3eca687..e3add486 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -20,6 +20,7 @@
 #define _RTW_MLME_EXT_C_
 
 #include <linux/ieee80211.h>
+#include <asm/unaligned.h>
 
 #include <osdep_service.h>
 #include <drv_types.h>
@@ -1027,7 +1028,6 @@
 	unsigned char		*pframe, *p;
 	struct rtw_ieee80211_hdr	*pwlanhdr;
 	__le16 *fctrl;
-	__le16		le_tmp;
 	unsigned int	i, j, ie_len, index = 0;
 	unsigned char	rf_type, bssrate[NumRates], sta_bssrate[NumRates];
 	struct ndis_802_11_var_ie *pIE;
@@ -1073,8 +1073,7 @@
 
 	/* listen interval */
 	/* todo: listen interval for power saving */
-	le_tmp = cpu_to_le16(3);
-	memcpy(pframe , (unsigned char *)&le_tmp, 2);
+	put_unaligned_le16(3, pframe);
 	pframe += 2;
 	pattrib->pktlen += 2;
 
@@ -1673,7 +1672,6 @@
 	fctrl = &(pwlanhdr->frame_ctl);
 	*(fctrl) = 0;
 
-	/* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
 	memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
 	memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
 	memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
@@ -3653,7 +3651,7 @@
 	struct sta_info *psta = NULL;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	u8 *pframe = precv_frame->rx_data;
-	u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+	u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
 	u8 category;
 	u8 action;
 
@@ -3740,10 +3738,10 @@
 			memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request));
 			process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
 
-			if (pmlmeinfo->bAcceptAddbaReq)
-				issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
-			else
-				issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
+			/* 37 = reject ADDBA Req */
+			issue_action_BA(padapter, addr,
+					RTW_WLAN_ACTION_ADDBA_RESP,
+					pmlmeinfo->accept_addba_req ? 0 : 37);
 			break;
 		case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
 			status = get_unaligned_le16(&frame_body[3]);
@@ -4150,7 +4148,7 @@
 	pmlmeext->padapter = padapter;
 
 	init_mlme_ext_priv_value(padapter);
-	pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+	pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req;
 
 	init_mlme_ext_timer(padapter);
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 110b8c0..40b7a30 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -116,9 +116,8 @@
 
 	rtw_free_uc_swdec_pending_queue(padapter);
 
-	if (precvpriv->pallocated_frame_buf) {
+	if (precvpriv->pallocated_frame_buf)
 		vfree(precvpriv->pallocated_frame_buf);
-	}
 
 	rtw_hal_free_recv_priv(padapter);
 
@@ -127,22 +126,15 @@
 struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
 {
 	struct recv_frame *hdr;
-	struct list_head *plist, *phead;
 	struct adapter *padapter;
 	struct recv_priv *precvpriv;
 
-	if (list_empty(&pfree_recv_queue->queue)) {
-		hdr = NULL;
-	} else {
-		phead = get_list_head(pfree_recv_queue);
-
-		plist = phead->next;
-
-		hdr = container_of(plist, struct recv_frame, list);
-
+	hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
+				       struct recv_frame, list);
+	if (hdr) {
 		list_del_init(&hdr->list);
 		padapter = hdr->adapter;
-		if (padapter != NULL) {
+		if (padapter) {
 			precvpriv = &padapter->recvpriv;
 			if (pfree_recv_queue == &precvpriv->free_recv_queue)
 				precvpriv->free_recvframe_cnt--;
@@ -917,9 +909,8 @@
 
 		process_pwrbit_data(adapter, precv_frame);
 
-		if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+		if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE)
 			process_wmmps_data(adapter, precv_frame);
-		}
 
 		if (GetFrameSubType(ptr) & BIT(6)) {
 			/* No data, will not indicate to upper layer, temporily count it here */
@@ -1274,32 +1265,25 @@
 	/* Dump rx packets */
 	rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
 	if (bDumpRxPkt == 1) {/* dump all rx packets */
-		int i;
-		DBG_88E("#############################\n");
-
-		for (i = 0; i < 64; i += 8)
-			DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
-				*(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
-		DBG_88E("#############################\n");
+		if (_drv_err_ <= GlobalDebugLevel) {
+			pr_info(DRIVER_PREFIX "#############################\n");
+			print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
+					16, 1, ptr, 64, false);
+			pr_info(DRIVER_PREFIX "#############################\n");
+		}
 	} else if (bDumpRxPkt == 2) {
-		if (type == WIFI_MGT_TYPE) {
-			int i;
-			DBG_88E("#############################\n");
-
-			for (i = 0; i < 64; i += 8)
-				DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
-					*(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
-			DBG_88E("#############################\n");
+		if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_MGT_TYPE)) {
+			pr_info(DRIVER_PREFIX "#############################\n");
+			print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
+					16, 1, ptr, 64, false);
+			pr_info(DRIVER_PREFIX "#############################\n");
 		}
 	} else if (bDumpRxPkt == 3) {
-		if (type == WIFI_DATA_TYPE) {
-			int i;
-			DBG_88E("#############################\n");
-
-			for (i = 0; i < 64; i += 8)
-				DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
-					*(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
-			DBG_88E("#############################\n");
+		if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_DATA_TYPE)) {
+			pr_info(DRIVER_PREFIX "#############################\n");
+			print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
+					16, 1, ptr, 64, false);
+			pr_info(DRIVER_PREFIX "#############################\n");
 		}
 	}
 	switch (type) {
@@ -1541,10 +1525,9 @@
 		if (pdefrag_q != NULL) {
 			if (fragnum == 0) {
 				/* the first fragment */
-				if (!list_empty(&pdefrag_q->queue)) {
+				if (!list_empty(&pdefrag_q->queue))
 					/* free current defrag_q */
 					rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
-				}
 			}
 
 			/* Then enqueue the 0~(n-1) fragment into the defrag_q */
@@ -1660,9 +1643,8 @@
 		a_len -= nSubframe_Length;
 		if (a_len != 0) {
 			padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
-			if (padding_len == 4) {
+			if (padding_len == 4)
 				padding_len = 0;
-			}
 
 			if (a_len < padding_len) {
 				goto exit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 22839d5..b781ccf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1081,13 +1081,13 @@
 
 	frsubtype >>= 4;
 
-	memset((void *)mic_iv, 0, 16);
-	memset((void *)mic_header1, 0, 16);
-	memset((void *)mic_header2, 0, 16);
-	memset((void *)ctr_preload, 0, 16);
-	memset((void *)chain_buffer, 0, 16);
-	memset((void *)aes_out, 0, 16);
-	memset((void *)padded_buffer, 0, 16);
+	memset(mic_iv, 0, 16);
+	memset(mic_header1, 0, 16);
+	memset(mic_header2, 0, 16);
+	memset(ctr_preload, 0, 16);
+	memset(chain_buffer, 0, 16);
+	memset(aes_out, 0, 16);
+	memset(padded_buffer, 0, 16);
 
 	if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen ==  WLAN_HDR_A3_QOS_LEN))
 		a4_exists = 0;
@@ -1279,13 +1279,13 @@
 	uint	frsubtype  = GetFrameSubType(pframe);
 	frsubtype >>= 4;
 
-	memset((void *)mic_iv, 0, 16);
-	memset((void *)mic_header1, 0, 16);
-	memset((void *)mic_header2, 0, 16);
-	memset((void *)ctr_preload, 0, 16);
-	memset((void *)chain_buffer, 0, 16);
-	memset((void *)aes_out, 0, 16);
-	memset((void *)padded_buffer, 0, 16);
+	memset(mic_iv, 0, 16);
+	memset(mic_header1, 0, 16);
+	memset(mic_header2, 0, 16);
+	memset(ctr_preload, 0, 16);
+	memset(chain_buffer, 0, 16);
+	memset(aes_out, 0, 16);
+	memset(padded_buffer, 0, 16);
 
 	/* start to decrypt the payload */
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 1beeac4..ce655b6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -179,9 +179,9 @@
 	return _SUCCESS;
 }
 
-struct	sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
 {
-	s32	index;
+	s32 index;
 	struct list_head *phash_list;
 	struct sta_info	*psta;
 	struct __queue *pfree_sta_queue;
@@ -189,17 +189,15 @@
 	int i = 0;
 	u16  wRxSeqInitialValue = 0xffff;
 
-
 	pfree_sta_queue = &pstapriv->free_sta_queue;
 
-	spin_lock_bh(&(pfree_sta_queue->lock));
-
-	if (list_empty(&pfree_sta_queue->queue)) {
+	spin_lock_bh(&pfree_sta_queue->lock);
+	psta = list_first_entry_or_null(&pfree_sta_queue->queue,
+					struct sta_info, list);
+	if (!psta) {
 		spin_unlock_bh(&pfree_sta_queue->lock);
-		psta = NULL;
 	} else {
-		psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list);
-		list_del_init(&(psta->list));
+		list_del_init(&psta->list);
 		spin_unlock_bh(&pfree_sta_queue->lock);
 		_rtw_init_stainfo(psta);
 		memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
@@ -210,14 +208,11 @@
 			psta = NULL;
 			goto exit;
 		}
-		phash_list = &(pstapriv->sta_hash[index]);
+		phash_list = &pstapriv->sta_hash[index];
 
-		spin_lock_bh(&(pstapriv->sta_hash_lock));
-
+		spin_lock_bh(&pstapriv->sta_hash_lock);
 		list_add_tail(&psta->hash_list, phash_list);
-
 		pstapriv->asoc_sta_count++;
-
 		spin_unlock_bh(&pstapriv->sta_hash_lock);
 
 /*  Commented by Albert 2009/08/13 */
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 59b4432..8309669 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1374,7 +1374,7 @@
 				epigram_vendor_flag = 1;
 				if (ralink_vendor_flag) {
 					DBG_88E("link to Tenda W311R AP\n");
-					 return HT_IOT_PEER_TENDA;
+					return HT_IOT_PEER_TENDA;
 				} else {
 					DBG_88E("Capture EPIGRAM_OUI\n");
 				}
@@ -1579,7 +1579,8 @@
 		tid = (param>>2)&0x0f;
 		preorder_ctrl = &psta->recvreorder_ctrl[tid];
 		preorder_ctrl->indicate_seq = 0xffff;
-		preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
+		preorder_ctrl->enable = (pmlmeinfo->accept_addba_req) ? true
+								      : false;
 	}
 }
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index e778132..f2dd7a6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -247,11 +247,8 @@
 		pxmitbuf++;
 	}
 
-	if (pxmitpriv->pallocated_frame_buf)
-		vfree(pxmitpriv->pallocated_frame_buf);
-
-	if (pxmitpriv->pallocated_xmitbuf)
-		vfree(pxmitpriv->pallocated_xmitbuf);
+	vfree(pxmitpriv->pallocated_frame_buf);
+	vfree(pxmitpriv->pallocated_xmitbuf);
 
 	/*  free xmit extension buff */
 	pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
@@ -1216,40 +1213,24 @@
 struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
 {
 	unsigned long irql;
-	struct xmit_buf *pxmitbuf =  NULL;
-	struct list_head *plist, *phead;
+	struct xmit_buf *pxmitbuf;
 	struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
 
-
 	spin_lock_irqsave(&pfree_queue->lock, irql);
-
-	if (list_empty(&pfree_queue->queue)) {
-		pxmitbuf = NULL;
-	} else {
-		phead = get_list_head(pfree_queue);
-
-		plist = phead->next;
-
-		pxmitbuf = container_of(plist, struct xmit_buf, list);
-
-		list_del_init(&(pxmitbuf->list));
-	}
-
-	if (pxmitbuf !=  NULL) {
+	pxmitbuf = list_first_entry_or_null(&pfree_queue->queue,
+					    struct xmit_buf, list);
+	if (pxmitbuf) {
+		list_del_init(&pxmitbuf->list);
 		pxmitpriv->free_xmit_extbuf_cnt--;
-
 		pxmitbuf->priv_data = NULL;
 		/* pxmitbuf->ext_tag = true; */
-
 		if (pxmitbuf->sctx) {
 			DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
 		}
 	}
-
 	spin_unlock_irqrestore(&pfree_queue->lock, irql);
 
-
 	return pxmitbuf;
 }
 
@@ -1278,28 +1259,16 @@
 struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
 {
 	unsigned long irql;
-	struct xmit_buf *pxmitbuf =  NULL;
-	struct list_head *plist, *phead;
+	struct xmit_buf *pxmitbuf;
 	struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
 
-
 	/* DBG_88E("+rtw_alloc_xmitbuf\n"); */
 
 	spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql);
-
-	if (list_empty(&pfree_xmitbuf_queue->queue)) {
-		pxmitbuf = NULL;
-	} else {
-		phead = get_list_head(pfree_xmitbuf_queue);
-
-		plist = phead->next;
-
-		pxmitbuf = container_of(plist, struct xmit_buf, list);
-
-		list_del_init(&(pxmitbuf->list));
-	}
-
-	if (pxmitbuf !=  NULL) {
+	pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue,
+					    struct xmit_buf, list);
+	if (pxmitbuf) {
+		list_del_init(&pxmitbuf->list);
 		pxmitpriv->free_xmitbuf_cnt--;
 		pxmitbuf->priv_data = NULL;
 		if (pxmitbuf->sctx) {
@@ -1309,7 +1278,6 @@
 	}
 	spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
 
-
 	return pxmitbuf;
 }
 
@@ -1355,38 +1323,33 @@
 
 */
 
-struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)
+				/* _queue *pfree_xmit_queue) */
 {
 	/*
 		Please remember to use all the osdep_service api,
 		and lock/unlock or _enter/_exit critical to protect
 		pfree_xmit_queue
 	*/
-
-	struct xmit_frame *pxframe = NULL;
-	struct list_head *plist, *phead;
+	struct xmit_frame *pxframe;
 	struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
 
-
 	spin_lock_bh(&pfree_xmit_queue->lock);
-
-	if (list_empty(&pfree_xmit_queue->queue)) {
-		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
-		pxframe =  NULL;
+	pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
+					   struct xmit_frame, list);
+	if (!pxframe) {
+		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+			 ("rtw_alloc_xmitframe:%d\n",
+			 pxmitpriv->free_xmitframe_cnt));
 	} else {
-		phead = get_list_head(pfree_xmit_queue);
+		list_del_init(&pxframe->list);
 
-		plist = phead->next;
-
-		pxframe = container_of(plist, struct xmit_frame, list);
-
-		list_del_init(&(pxframe->list));
-	}
-
-	if (pxframe !=  NULL) { /* default value setting */
+		/* default value setting */
 		pxmitpriv->free_xmitframe_cnt--;
 
-		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
+		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+			 ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n",
+			 pxmitpriv->free_xmitframe_cnt));
 
 		pxframe->buf_addr = NULL;
 		pxframe->pxmitbuf = NULL;
@@ -1402,10 +1365,8 @@
 		pxframe->agg_num = 1;
 		pxframe->ack_report = 0;
 	}
-
 	spin_unlock_bh(&pfree_xmit_queue->lock);
 
-
 	return pxframe;
 }
 
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index f58a822..c2ad6a3 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -598,18 +598,12 @@
 
 	reg[RF_PATH_A] = &hal_data->PHYRegDef[RF_PATH_A];
 	reg[RF_PATH_B] = &hal_data->PHYRegDef[RF_PATH_B];
-	reg[RF_PATH_C] = &hal_data->PHYRegDef[RF_PATH_C];
-	reg[RF_PATH_D] = &hal_data->PHYRegDef[RF_PATH_D];
 
 	reg[RF_PATH_A]->rfintfs = rFPGA0_XAB_RFInterfaceSW;
 	reg[RF_PATH_B]->rfintfs = rFPGA0_XAB_RFInterfaceSW;
-	reg[RF_PATH_C]->rfintfs = rFPGA0_XCD_RFInterfaceSW;
-	reg[RF_PATH_D]->rfintfs = rFPGA0_XCD_RFInterfaceSW;
 
 	reg[RF_PATH_A]->rfintfi = rFPGA0_XAB_RFInterfaceRB;
 	reg[RF_PATH_B]->rfintfi = rFPGA0_XAB_RFInterfaceRB;
-	reg[RF_PATH_C]->rfintfi = rFPGA0_XCD_RFInterfaceRB;
-	reg[RF_PATH_D]->rfintfi = rFPGA0_XCD_RFInterfaceRB;
 
 	reg[RF_PATH_A]->rfintfo = rFPGA0_XA_RFInterfaceOE;
 	reg[RF_PATH_B]->rfintfo = rFPGA0_XB_RFInterfaceOE;
@@ -622,13 +616,9 @@
 
 	reg[RF_PATH_A]->rfLSSI_Select = rFPGA0_XAB_RFParameter;
 	reg[RF_PATH_B]->rfLSSI_Select = rFPGA0_XAB_RFParameter;
-	reg[RF_PATH_C]->rfLSSI_Select = rFPGA0_XCD_RFParameter;
-	reg[RF_PATH_D]->rfLSSI_Select = rFPGA0_XCD_RFParameter;
 
 	reg[RF_PATH_A]->rfTxGainStage = rFPGA0_TxGainStage;
 	reg[RF_PATH_B]->rfTxGainStage = rFPGA0_TxGainStage;
-	reg[RF_PATH_C]->rfTxGainStage = rFPGA0_TxGainStage;
-	reg[RF_PATH_D]->rfTxGainStage = rFPGA0_TxGainStage;
 
 	reg[RF_PATH_A]->rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
 	reg[RF_PATH_B]->rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
@@ -638,43 +628,27 @@
 
 	reg[RF_PATH_A]->rfSwitchControl = rFPGA0_XAB_SwitchControl;
 	reg[RF_PATH_B]->rfSwitchControl = rFPGA0_XAB_SwitchControl;
-	reg[RF_PATH_C]->rfSwitchControl = rFPGA0_XCD_SwitchControl;
-	reg[RF_PATH_D]->rfSwitchControl = rFPGA0_XCD_SwitchControl;
 
 	reg[RF_PATH_A]->rfAGCControl1 = rOFDM0_XAAGCCore1;
 	reg[RF_PATH_B]->rfAGCControl1 = rOFDM0_XBAGCCore1;
-	reg[RF_PATH_C]->rfAGCControl1 = rOFDM0_XCAGCCore1;
-	reg[RF_PATH_D]->rfAGCControl1 = rOFDM0_XDAGCCore1;
 
 	reg[RF_PATH_A]->rfAGCControl2 = rOFDM0_XAAGCCore2;
 	reg[RF_PATH_B]->rfAGCControl2 = rOFDM0_XBAGCCore2;
-	reg[RF_PATH_C]->rfAGCControl2 = rOFDM0_XCAGCCore2;
-	reg[RF_PATH_D]->rfAGCControl2 = rOFDM0_XDAGCCore2;
 
 	reg[RF_PATH_A]->rfRxIQImbalance = rOFDM0_XARxIQImbalance;
 	reg[RF_PATH_B]->rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
-	reg[RF_PATH_C]->rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
-	reg[RF_PATH_D]->rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
 
 	reg[RF_PATH_A]->rfRxAFE = rOFDM0_XARxAFE;
 	reg[RF_PATH_B]->rfRxAFE = rOFDM0_XBRxAFE;
-	reg[RF_PATH_C]->rfRxAFE = rOFDM0_XCRxAFE;
-	reg[RF_PATH_D]->rfRxAFE = rOFDM0_XDRxAFE;
 
 	reg[RF_PATH_A]->rfTxIQImbalance = rOFDM0_XATxIQImbalance;
 	reg[RF_PATH_B]->rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
-	reg[RF_PATH_C]->rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
-	reg[RF_PATH_D]->rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
 
 	reg[RF_PATH_A]->rfTxAFE = rOFDM0_XATxAFE;
 	reg[RF_PATH_B]->rfTxAFE = rOFDM0_XBTxAFE;
-	reg[RF_PATH_C]->rfTxAFE = rOFDM0_XCTxAFE;
-	reg[RF_PATH_D]->rfTxAFE = rOFDM0_XDTxAFE;
 
 	reg[RF_PATH_A]->rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
 	reg[RF_PATH_B]->rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
-	reg[RF_PATH_C]->rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
-	reg[RF_PATH_D]->rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
 
 	reg[RF_PATH_A]->rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
 	reg[RF_PATH_B]->rfLSSIReadBackPi = TransceiverB_HSPI_Readback;
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 4d72537..656133c 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -75,16 +75,6 @@
 		usb_write8(adapt, write_address, byte_buffer[i]);
 }
 
-static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-	u32 i;
-
-	for (i = *pfwlen; i < roundup(*pfwlen, 4); i++)
-		pfwbuf[i] = 0;
-
-	*pfwlen = i;
-}
-
 static void _rtl88e_fw_page_write(struct adapter *adapt,
 				  u32 page, const u8 *buffer, u32 size)
 {
@@ -103,8 +93,6 @@
 	u32 page_no, remain;
 	u32 page, offset;
 
-	_rtl88e_fill_dummy(buf_ptr, &size);
-
 	page_no = size / FW_8192C_PAGE_SIZE;
 	remain = size % FW_8192C_PAGE_SIZE;
 
@@ -170,14 +158,14 @@
 
 int rtl88eu_download_fw(struct adapter *adapt)
 {
-	struct hal_data_8188e *rtlhal = GET_HAL_DATA(adapt);
 	struct dvobj_priv *dvobj = adapter_to_dvobj(adapt);
 	struct device *device = dvobj_to_dev(dvobj);
 	const struct firmware *fw;
 	const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
 	struct rtl92c_firmware_header *pfwheader = NULL;
-	u8 *pfwdata;
-	u32 fwsize;
+	u8 *download_data, *fw_data;
+	size_t download_size;
+	unsigned int trailing_zeros_length;
 
 	if (request_firmware(&fw, fw_name, device)) {
 		dev_err(device, "Firmware %s not available\n", fw_name);
@@ -186,35 +174,43 @@
 
 	if (fw->size > FW_8188E_SIZE) {
 		dev_err(device, "Firmware size exceed 0x%X. Check it.\n",
-			 FW_8188E_SIZE);
+			FW_8188E_SIZE);
+		release_firmware(fw);
 		return -1;
 	}
 
-	pfwdata = kzalloc(FW_8188E_SIZE, GFP_KERNEL);
-	if (!pfwdata)
+	trailing_zeros_length = (4 - fw->size % 4) % 4;
+
+	fw_data = kmalloc(fw->size + trailing_zeros_length, GFP_KERNEL);
+	if (!fw_data) {
+		release_firmware(fw);
 		return -ENOMEM;
+	}
 
-	rtlhal->pfirmware = pfwdata;
-	memcpy(rtlhal->pfirmware, fw->data, fw->size);
-	rtlhal->fwsize = fw->size;
-	release_firmware(fw);
+	memcpy(fw_data, fw->data, fw->size);
+	memset(fw_data + fw->size, 0, trailing_zeros_length);
 
-	fwsize = rtlhal->fwsize;
-	pfwheader = (struct rtl92c_firmware_header *)pfwdata;
+	pfwheader = (struct rtl92c_firmware_header *)fw_data;
 
 	if (IS_FW_HEADER_EXIST(pfwheader)) {
-		pfwdata = pfwdata + 32;
-		fwsize = fwsize - 32;
+		download_data = fw_data + 32;
+		download_size = fw->size + trailing_zeros_length - 32;
+	} else {
+		download_data = fw_data;
+		download_size = fw->size + trailing_zeros_length;
 	}
 
+	release_firmware(fw);
+
 	if (usb_read8(adapt, REG_MCUFWDL) & RAM_DL_SEL) {
 		usb_write8(adapt, REG_MCUFWDL, 0);
 		rtl88e_firmware_selfreset(adapt);
 	}
 	_rtl88e_enable_fw_download(adapt, true);
 	usb_write8(adapt, REG_MCUFWDL, usb_read8(adapt, REG_MCUFWDL) | FWDL_ChkSum_rpt);
-	_rtl88e_write_fw(adapt, pfwdata, fwsize);
+	_rtl88e_write_fw(adapt, download_data, download_size);
 	_rtl88e_enable_fw_download(adapt, false);
 
+	kfree(fw_data);
 	return _rtl88e_fw_free_to_go(adapt);
 }
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index d3e8a8e..ae42b44 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -180,32 +180,6 @@
 			hal_data->BW20_24G_Diff[TxCount][RF_PATH_A]+
 			hal_data->BW20_24G_Diff[TxCount][index];
 			bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
-		} else if (TxCount == RF_PATH_C) {
-			cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index];
-			ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_B][index]+
-			hal_data->BW20_24G_Diff[TxCount][index];
-
-			bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_B][index]+
-			hal_data->BW20_24G_Diff[TxCount][index];
-			bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
-		} else if (TxCount == RF_PATH_D) {
-			cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index];
-			ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_B][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_C][index]+
-			hal_data->BW20_24G_Diff[TxCount][index];
-
-			bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_A][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_B][index]+
-			hal_data->BW20_24G_Diff[RF_PATH_C][index]+
-			hal_data->BW20_24G_Diff[TxCount][index];
-			bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
 		}
 	}
 }
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index e058162..2670d6b 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -69,13 +69,11 @@
 enum rf_radio_path {
 	RF_PATH_A = 0,			/* Radio Path A */
 	RF_PATH_B = 1,			/* Radio Path B */
-	RF_PATH_C = 2,			/* Radio Path C */
-	RF_PATH_D = 3,			/* Radio Path D */
 };
 
 #define MAX_PG_GROUP 13
 
-#define	RF_PATH_MAX			3
+#define	RF_PATH_MAX			2
 #define		MAX_RF_PATH		RF_PATH_MAX
 #define		MAX_TX_COUNT		4 /* path numbers */
 
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 0729bd4..aeaf5f7e 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -110,7 +110,7 @@
 	u8	wifi_spec;/*  !turbo_mode */
 
 	u8	channel_plan;
-	bool	bAcceptAddbaReq;
+	bool	accept_addba_req; /* true = accept AP's Add BA req */
 
 	u8	antdiv_cfg;
 	u8	antdiv_type;
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index 62a0049..ef792bf 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -69,7 +69,7 @@
 };
 
 struct phy_status_rpt {
-	struct phy_rx_agc_info path_agc[3];
+	struct phy_rx_agc_info path_agc[RF_PATH_MAX];
 	u8	ch_corr[2];
 	u8	cck_sig_qual_ofdm_pwdb_all;
 	u8	cck_agc_rpt_ofdm_cfosho_a;
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index cbad364f..9f5050e 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -191,8 +191,6 @@
 struct hal_data_8188e {
 	struct HAL_VERSION	VersionID;
 	u16	CustomerID;
-	u8 *pfirmware;
-	u32 fwsize;
 	u16	FirmwareVersion;
 	u16	FirmwareVersionRev;
 	u16	FirmwareSubVersion;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 9093a5f..4471133 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -348,7 +348,7 @@
 	u8	candidate_tid_bitmap;
 	u8	dialogToken;
 	/*  Accept ADDBA Request */
-	bool bAcceptAddbaReq;
+	bool accept_addba_req;
 	u8	bwmode_updated;
 	u8	hidden_ssid_mode;
 
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index a076ede..5d6a1c9 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1907,7 +1907,7 @@
 	memset(param, 0, param_len);
 
 	param->cmd = IEEE_CMD_SET_ENCRYPTION;
-	memset(param->sta_addr, 0xff, ETH_ALEN);
+	eth_broadcast_addr(param->sta_addr);
 
 	switch (pext->alg) {
 	case IW_ENCODE_ALG_NONE:
@@ -3095,7 +3095,6 @@
 	.get_wireless_stats = rtw_get_wireless_stats,
 };
 
-#include <rtw_android.h>
 int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct iwreq *wrq = (struct iwreq *)rq;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 9201b94..7986e67 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -101,8 +101,6 @@
 static int rtw_low_power;
 static int rtw_wifi_spec;
 static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
-/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
-static int rtw_AcceptAddbaReq = true;
 
 static int rtw_antdiv_cfg = 2; /*  0:OFF , 1:ON, 2:decide by Efuse config */
 
@@ -593,7 +591,7 @@
 	registry_par->low_power = (u8)rtw_low_power;
 	registry_par->wifi_spec = (u8)rtw_wifi_spec;
 	registry_par->channel_plan = (u8)rtw_channel_plan;
-	registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+	registry_par->accept_addba_req = true;
 	registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
 	registry_par->antdiv_type = (u8)rtw_antdiv_type;
 	registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;
@@ -1157,7 +1155,6 @@
 static int netdev_close(struct net_device *pnetdev)
 {
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
-	struct hal_data_8188e *rtlhal = GET_HAL_DATA(padapter);
 
 	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
 
@@ -1190,9 +1187,6 @@
 		rtw_led_control(padapter, LED_CTL_POWER_OFF);
 	}
 
-	kfree(rtlhal->pfirmware);
-	rtlhal->pfirmware = NULL;
-
 	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
 	DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
 	return 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 01d50f7..6a68f17 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -474,8 +474,7 @@
 	pr_debug("+r871xu_dev_remove, hw_init_completed=%d\n",
 		if1->hw_init_completed);
 	rtw_free_drv_sw(if1);
-	if (pnetdev)
-		rtw_free_netdev(pnetdev);
+	rtw_free_netdev(pnetdev);
 }
 
 static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 2c19054c..735a199 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -17,8 +17,6 @@
 
 #include "rtllib.h"
 
-
-
 struct chnl_txpow_triple {
 	u8 FirstChnl;
 	u8  NumChnls;
@@ -42,7 +40,6 @@
  */
 
 struct rt_dot11d_info {
-
 	bool bEnabled;
 
 	u16 CountryIeLen;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index e9c4f97..ba64a4f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -680,7 +680,7 @@
 
 	rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
 	{
-		u32 ratr_value = 0;
+		u32 ratr_value;
 
 		ratr_value = regRATR;
 		if (priv->rf_type == RF_1T2R)
@@ -1000,7 +1000,7 @@
 	_rtl92e_update_msr(dev);
 
 	if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
-		u32 reg = 0;
+		u32 reg;
 
 		reg = rtl92e_readl(dev, RCR);
 		if (priv->rtllib->state == RTLLIB_LINKED) {
@@ -1186,7 +1186,7 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
 			 PCI_DMA_TODEVICE);
-	struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
+	struct tx_fwinfo_8190pci *pTxFwInfo;
 
 	pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
 	memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
@@ -2235,7 +2235,7 @@
 
 void rtl92e_clear_irq(struct net_device *dev)
 {
-	u32 tmp = 0;
+	u32 tmp;
 
 	tmp = rtl92e_readl(dev, ISR);
 	rtl92e_writel(dev, ISR, tmp);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 8f989a9..0b06482 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -288,7 +288,7 @@
 
 void rtl92e_irq_enable(struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	priv->irq_enabled = 1;
 
@@ -297,7 +297,7 @@
 
 void rtl92e_irq_disable(struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	priv->ops->irq_disable(dev);
 
@@ -306,7 +306,7 @@
 
 static void _rtl92e_set_chan(struct net_device *dev, short ch)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
 	if (priv->chan_forced)
@@ -1546,14 +1546,14 @@
 *****************************************************************************/
 void rtl92e_rx_enable(struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	priv->ops->rx_enable(dev);
 }
 
 void rtl92e_tx_enable(struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	priv->ops->tx_enable(dev);
 
@@ -1612,7 +1612,7 @@
 static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
 				   int rate)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	int ret;
 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
 				    MAX_DEV_ADDR_SIZE);
@@ -1643,7 +1643,7 @@
 
 static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	int ret;
 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
 				    MAX_DEV_ADDR_SIZE);
@@ -1676,7 +1676,7 @@
 
 static void _rtl92e_tx_isr(struct net_device *dev, int prio)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
 
@@ -1850,7 +1850,7 @@
 static int _rtl92e_alloc_tx_ring(struct net_device *dev, unsigned int prio,
 				 unsigned int entries)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	struct tx_desc *ring;
 	dma_addr_t dma;
 	int i;
@@ -1944,7 +1944,7 @@
 void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
 				    struct rtllib_rx_stats *stats)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 
 	if (stats->bIsAMPDU && !stats->bFirstMPDU)
 		stats->mac_time = priv->LastRxDescTSF;
@@ -2022,7 +2022,7 @@
 
 static void _rtl92e_rx_normal(struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_hdr_1addr *rtllib_hdr = NULL;
 	bool unicast_packet = false;
 	bool bLedBlinking = true;
@@ -2128,7 +2128,7 @@
 
 static void _rtl92e_tx_resume(struct net_device *dev)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
 	struct sk_buff *skb;
 	int queue_index;
@@ -2279,7 +2279,7 @@
 /* based on ipw2200 driver */
 static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	struct iwreq *wrq = (struct iwreq *)rq;
 	int ret = -1;
 	struct rtllib_device *ieee = priv->rtllib;
@@ -2403,7 +2403,7 @@
 static irqreturn_t _rtl92e_irq(int irq, void *netdev)
 {
 	struct net_device *dev = (struct net_device *) netdev;
-	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+	struct r8192_priv *priv = rtllib_priv(dev);
 	unsigned long flags;
 	u32 inta;
 	u32 intb;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index ef03242..b6b714d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1875,7 +1875,7 @@
 				  struct r8192_priv,
 				  rfpath_check_wq);
 	struct net_device *dev = priv->rtllib->dev;
-	u8 rfpath = 0, i;
+	u8 rfpath, i;
 
 	rfpath = rtl92e_readb(dev, 0xc04);
 
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c04a020..c7fd1b1 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -189,7 +189,7 @@
 static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
 				 struct ba_record *pBA)
 {
-	struct sk_buff *skb = NULL;
+	struct sk_buff *skb;
 
 	skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
 
@@ -204,7 +204,7 @@
 static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
 				 struct ba_record *pBA, u16 StatusCode)
 {
-	struct sk_buff *skb = NULL;
+	struct sk_buff *skb;
 
 	skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP);
 	if (skb)
@@ -217,7 +217,7 @@
 			      struct ba_record *pBA, enum tr_select TxRxSelect,
 			      u16 ReasonCode)
 {
-	struct sk_buff *skb = NULL;
+	struct sk_buff *skb;
 
 	skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode);
 	if (skb)
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 563ac12..d99240e 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -76,7 +76,7 @@
 
 #define container_of_work_rsl(x, y, z) container_of(x, y, z)
 #define container_of_dwork_rsl(x, y, z)				\
-	container_of(container_of(x, struct delayed_work, work), y, z)
+	container_of(to_delayed_work(x), y, z)
 
 #define iwe_stream_add_event_rsl(info, start, stop, iwe, len)	\
 	iwe_stream_add_event(info, start, stop, iwe, len)
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 113fbf7..f4f318a 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -45,15 +45,11 @@
 #include <linux/etherdevice.h>
 #include <linux/uaccess.h>
 #include <net/arp.h>
-
 #include "rtllib.h"
 
-
 u32 rt_global_debug_component = COMP_ERR;
 EXPORT_SYMBOL(rt_global_debug_component);
 
-
-
 static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
 {
 	if (ieee->networks)
@@ -110,7 +106,6 @@
 	}
 	rtllib_networks_initialize(ieee);
 
-
 	/* Default fragmentation threshold is maximum payload size */
 	ieee->fts = DEFAULT_FTS;
 	ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 37343ec..af64bd3 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -905,7 +905,7 @@
 {
 	struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
 	u16 fc = le16_to_cpu(hdr->frame_ctl);
-	size_t hdrlen = 0;
+	size_t hdrlen;
 
 	hdrlen = rtllib_get_hdrlen(fc);
 	if (HTCCheck(ieee, skb->data)) {
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index d0fedb0..25b5b5e 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -355,9 +355,9 @@
 	req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
 	req->header.duration_id = 0;
 
-	memset(req->header.addr1, 0xff, ETH_ALEN);
+	eth_broadcast_addr(req->header.addr1);
 	ether_addr_copy(req->header.addr2, ieee->dev->dev_addr);
-	memset(req->header.addr3, 0xff, ETH_ALEN);
+	eth_broadcast_addr(req->header.addr3);
 
 	tag = (u8 *) skb_put(skb, len + 2 + rate_len);
 
@@ -776,7 +776,7 @@
 {
 	struct sk_buff *skb;
 	struct rtllib_authentication *auth;
-	int  len = 0;
+	int  len;
 
 	len = sizeof(struct rtllib_authentication) + challengelen +
 		     ieee->tx_headroom + 4;
@@ -3328,7 +3328,7 @@
 			goto done;
 		}
 		new_crypt->ops = ops;
-		if (new_crypt->ops)
+		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
 			new_crypt->priv =
 				new_crypt->ops->init(param->u.crypt.idx);
 
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 86f52ac..01a75bd 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -243,7 +243,7 @@
 			     struct iw_request_info *info,
 			     union iwreq_data *wrqu, char *extra)
 {
-	u32 tmp_rate = 0;
+	u32 tmp_rate;
 
 	tmp_rate = TxCountToDataRate(ieee,
 				     ieee->softmac_stats.CurrentShowTxate);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 80f7a09..84e6272 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -623,7 +623,7 @@
 			goto done;
 		}
 		new_crypt->ops = ops;
-		if (new_crypt->ops)
+		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
 			new_crypt->priv = new_crypt->ops->init(idx);
 
 		if (new_crypt->priv == NULL) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 967ef9a..68931e5 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -238,8 +238,6 @@
 
 #define ieee80211_tkip_null		ieee80211_tkip_null_rsl
 
-#define ieee80211_wep_null		ieee80211_wep_null_rsl
-
 #define free_ieee80211			free_ieee80211_rsl
 #define alloc_ieee80211			alloc_ieee80211_rsl
 
@@ -329,9 +327,6 @@
 
 
 // linux under 2.6.9 release may not support it, so modify it for common use
-#define MSECS(t) msecs_to_jiffies(t)
-#define msleep_interruptible_rsl  msleep_interruptible
-
 #define IEEE80211_DATA_LEN		2304
 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
    6.2.1.1.2.
@@ -2260,7 +2255,6 @@
 
 /* ieee80211_crypt_ccmp&tkip&wep.c */
 void ieee80211_tkip_null(void);
-void ieee80211_wep_null(void);
 void ieee80211_ccmp_null(void);
 
 int ieee80211_crypto_init(void);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 681611d..cde8e2b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -274,6 +274,3 @@
 	ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
 }
 
-void ieee80211_wep_null(void)
-{
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 425b2dd..30fff6c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -177,7 +177,6 @@
 
 /* These function were added to load crypte module autoly */
 	ieee80211_tkip_null();
-	ieee80211_wep_null();
 	ieee80211_ccmp_null();
 
 	return dev;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 130c852..9dfde07 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -594,12 +594,18 @@
 {
 	PRT_HIGH_THROUGHPUT	pHTInfo = ieee->pHTInfo;
 	PRX_REORDER_ENTRY	pReorderEntry = NULL;
-	struct ieee80211_rxb *prxbIndicateArray[REORDER_WIN_SIZE];
+	struct ieee80211_rxb **prxbIndicateArray;
 	u8			WinSize = pHTInfo->RxReorderWinSize;
 	u16			WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
 	u8			index = 0;
 	bool			bMatchWinStart = false, bPktInBuf = false;
 	IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__func__,SeqNum,pTS->RxIndicateSeq,WinSize);
+
+	prxbIndicateArray = kmalloc(sizeof(struct ieee80211_rxb *) *
+			REORDER_WIN_SIZE, GFP_KERNEL);
+	if (!prxbIndicateArray)
+		return;
+
 	/* Rx Reorder initialize condition.*/
 	if (pTS->RxIndicateSeq == 0xffff) {
 		pTS->RxIndicateSeq = SeqNum;
@@ -618,6 +624,8 @@
 			kfree(prxb);
 			prxb = NULL;
 		}
+
+		kfree(prxbIndicateArray);
 		return;
 	}
 
@@ -741,6 +749,7 @@
 		// Indicate packets
 		if(index>REORDER_WIN_SIZE){
 			IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+			kfree(prxbIndicateArray);
 			return;
 		}
 		ieee80211_indicate_packets(ieee, prxbIndicateArray, index);
@@ -752,9 +761,12 @@
 		pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq;
 		if(timer_pending(&pTS->RxPktPendingTimer))
 			del_timer_sync(&pTS->RxPktPendingTimer);
-		pTS->RxPktPendingTimer.expires = jiffies + MSECS(pHTInfo->RxReorderPendingTime);
+		pTS->RxPktPendingTimer.expires = jiffies +
+				msecs_to_jiffies(pHTInfo->RxReorderPendingTime);
 		add_timer(&pTS->RxPktPendingTimer);
 	}
+
+	kfree(prxbIndicateArray);
 }
 
 static u8 parse_subframe(struct sk_buff *skb,
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 38c3eb7..f8041f9d6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -103,12 +103,12 @@
 {
 	u8 *tag = *tag_p;
 
-	*tag++ = MFIE_TYPE_GENERIC; //0
+	*tag++ = MFIE_TYPE_GENERIC; /* 0 */
 	*tag++ = 7;
 	*tag++ = 0x00;
 	*tag++ = 0x50;
 	*tag++ = 0xf2;
-	*tag++ = 0x02;//5
+	*tag++ = 0x02;	/* 5 */
 	*tag++ = 0x00;
 	*tag++ = 0x01;
 #ifdef SUPPORT_USPD
@@ -128,12 +128,12 @@
 {
 	u8 *tag = *tag_p;
 
-	*tag++ = MFIE_TYPE_GENERIC; //0
+	*tag++ = MFIE_TYPE_GENERIC; /* 0 */
 	*tag++ = 7;
 	*tag++ = 0x00;
 	*tag++ = 0xe0;
 	*tag++ = 0x4c;
-	*tag++ = 0x01;//5
+	*tag++ = 0x01;	/* 5 */
 	*tag++ = 0x02;
 	*tag++ = 0x11;
 	*tag++ = 0x00;
@@ -186,14 +186,14 @@
 	PRT_HIGH_THROUGHPUT      pHTInfo = ieee->pHTInfo;
 	u8 rate;
 
-	// 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M.
+	/* 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. */
 	if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
 		rate = 0x0c;
 	else
 		rate = ieee->basic_rate & 0x7f;
 
 	if (rate == 0) {
-		// 2005.01.26, by rcnjko.
+		/* 2005.01.26, by rcnjko. */
 		if(ieee->mode == IEEE_A||
 		   ieee->mode== IEEE_N_5G||
 		   (ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK))
@@ -340,7 +340,7 @@
 
 	req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
 	req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-	req->header.duration_id = 0; //FIXME: is this OK ?
+	req->header.duration_id = 0; /* FIXME: is this OK? */
 
 	memset(req->header.addr1, 0xff, ETH_ALEN);
 	memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -380,7 +380,8 @@
 	if (ieee->beacon_txing && ieee->ieee_up) {
 //		if(!timer_pending(&ieee->beacon_timer))
 //			add_timer(&ieee->beacon_timer);
-		mod_timer(&ieee->beacon_timer,jiffies+(MSECS(ieee->current_network.beacon_interval-5)));
+		mod_timer(&ieee->beacon_timer,
+			  jiffies + msecs_to_jiffies(ieee->current_network.beacon_interval-5));
 	}
 	//spin_unlock_irqrestore(&ieee->beacon_lock,flags);
 }
@@ -468,7 +469,7 @@
 		if (ieee->state >= IEEE80211_LINKED && ieee->sync_scan_hurryup)
 			goto out;
 
-		msleep_interruptible_rsl(IEEE80211_SOFTMAC_SCAN_TIME);
+		msleep_interruptible(IEEE80211_SOFTMAC_SCAN_TIME);
 
 	}
 out:
@@ -487,7 +488,7 @@
 
 static void ieee80211_softmac_scan_wq(struct work_struct *work)
 {
-	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
 	static short watchdog;
 	u8 channel_map[MAX_CHANNEL_NUMBER+1];
@@ -514,7 +515,7 @@
 		ieee80211_send_probe_requests(ieee);
 
 
-	queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
+	schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
 
 	up(&ieee->scan_sem);
 	return;
@@ -613,7 +614,7 @@
 	if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
 		if (ieee->scanning == 0) {
 			ieee->scanning = 1;
-			queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
+			schedule_delayed_work(&ieee->softmac_scan_wq, 0);
 		}
 	}else
 		ieee->start_scan(ieee->dev);
@@ -672,7 +673,7 @@
 	else if(ieee->auth_mode == 1)
 		auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
 	else if(ieee->auth_mode == 2)
-		auth->algorithm = WLAN_AUTH_OPEN;//0x80;
+		auth->algorithm = WLAN_AUTH_OPEN; /* 0x80; */
 	printk("=================>%s():auth->algorithm is %d\n",__func__,auth->algorithm);
 	auth->transaction = cpu_to_le16(ieee->associate_seq);
 	ieee->associate_seq++;
@@ -727,7 +728,7 @@
 
 	encrypt = ieee->host_encrypt && crypt && crypt->ops &&
 		((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
-	//HT ralated element
+	/* HT ralated element */
 	tmp_ht_cap_buf =(u8 *) &(ieee->pHTInfo->SelfHTCap);
 	tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
 	tmp_ht_info_buf =(u8 *) &(ieee->pHTInfo->SelfHTInfo);
@@ -765,13 +766,13 @@
 	memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
 	memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
 
-	beacon_buf->header.duration_id = 0; //FIXME
+	beacon_buf->header.duration_id = 0; /* FIXME */
 	beacon_buf->beacon_interval =
 		cpu_to_le16(ieee->current_network.beacon_interval);
 	beacon_buf->capability =
 		cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
 	beacon_buf->capability |=
-		cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here
+		cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); /* add short preamble here */
 
 	if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
 		beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
@@ -1012,7 +1013,7 @@
 	crypt = ieee->crypt[ieee->tx_keyidx];
 	encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name,"WEP") || wpa_ie_len));
 
-	//Include High Throuput capability && Realtek proprietary
+	/* Include High Throuput capability && Realtek proprietary */
 	if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
 	{
 		ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
@@ -1044,8 +1045,8 @@
 
 #ifdef THOMAS_TURBO
 	len = sizeof(struct ieee80211_assoc_request_frame)+ 2
-		+ beacon->ssid_len//essid tagged val
-		+ rate_len//rates tagged val
+		+ beacon->ssid_len	/* essid tagged val */
+		+ rate_len	/* rates tagged val */
 		+ wpa_ie_len
 		+ wmm_info_len
 		+ turbo_info_len
@@ -1057,8 +1058,8 @@
 		+ ieee->tx_headroom;
 #else
 	len = sizeof(struct ieee80211_assoc_request_frame)+ 2
-		+ beacon->ssid_len//essid tagged val
-		+ rate_len//rates tagged val
+		+ beacon->ssid_len	/* essid tagged val */
+		+ rate_len	/* rates tagged val */
 		+ wpa_ie_len
 		+ wmm_info_len
 		+ ht_cap_len
@@ -1240,7 +1241,7 @@
 
 	ieee->state = IEEE80211_ASSOCIATING_RETRY;
 
-	queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \
+	schedule_delayed_work(&ieee->associate_retry_wq, \
 			   IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
 
 	spin_unlock_irqrestore(&ieee->lock, flags);
@@ -1381,7 +1382,7 @@
 
 	ieee->state = IEEE80211_LINKED;
 	//ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet);
-	queue_work(ieee->wq, &ieee->associate_complete_wq);
+	schedule_work(&ieee->associate_complete_wq);
 }
 
 static void ieee80211_associate_procedure_wq(struct work_struct *work)
@@ -1482,7 +1483,7 @@
 					}
 
 					ieee->state = IEEE80211_ASSOCIATING;
-					queue_work(ieee->wq, &ieee->associate_procedure_wq);
+					schedule_work(&ieee->associate_procedure_wq);
 				}else{
 					if(ieee80211_is_54g(&ieee->current_network) &&
 						(ieee->modulation & IEEE80211_OFDM_MODULATION)){
@@ -1735,10 +1736,12 @@
 	if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps))
 		return 2;
 
-	if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
+	if(!time_after(jiffies,
+		       ieee->dev->trans_start + msecs_to_jiffies(timeout)))
 		return 0;
 
-	if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
+	if(!time_after(jiffies,
+		       ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
 		return 0;
 
 	if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) &&
@@ -2041,7 +2044,7 @@
 					"Association response status code 0x%x\n",
 					errcode);
 				if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
-					queue_work(ieee->wq, &ieee->associate_procedure_wq);
+					schedule_work(&ieee->associate_procedure_wq);
 				} else {
 					ieee80211_associate_abort(ieee);
 				}
@@ -2097,7 +2100,7 @@
 			notify_wx_assoc_event(ieee);
 			//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
 			RemovePeerTS(ieee, header->addr2);
-			queue_work(ieee->wq, &ieee->associate_procedure_wq);
+			schedule_work(&ieee->associate_procedure_wq);
 		}
 		break;
 	case IEEE80211_STYPE_MANAGE_ACT:
@@ -2330,7 +2333,7 @@
 static void ieee80211_start_ibss_wq(struct work_struct *work)
 {
 
-	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
 	/* iwconfig mode ad-hoc will schedule this and return
 	 * on the other hand this will block further iwconfig SET
@@ -2439,7 +2442,7 @@
 
 inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
 {
-	queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150);
+	schedule_delayed_work(&ieee->start_ibss_wq, 150);
 }
 
 /* this is called only in user context, with wx_sem held */
@@ -2504,7 +2507,7 @@
 
 static void ieee80211_associate_retry_wq(struct work_struct *work)
 {
-	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
 	unsigned long flags;
 
@@ -2722,7 +2725,6 @@
 	setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
 		    (unsigned long)ieee);
 
-	ieee->wq = create_workqueue(DRV_NAME);
 
 	INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
 	INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
@@ -2752,7 +2754,6 @@
 	del_timer_sync(&ieee->associate_timer);
 
 	cancel_delayed_work(&ieee->associate_retry_wq);
-	destroy_workqueue(ieee->wq);
 
 	up(&ieee->wx_sem);
 }
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 3bde744..789b7c9 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -19,7 +19,7 @@
 {
 	pBA->bValid = true;
 	if(Time != 0)
-		mod_timer(&pBA->Timer, jiffies + MSECS(Time));
+		mod_timer(&pBA->Timer, jiffies + msecs_to_jiffies(Time));
 }
 
 /********************************************************************************************************************
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index f33c743..148d0d45 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -35,9 +35,7 @@
 	u8 index = 0;
 	bool bPktInBuf = false;
 
-
 	spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
-	//PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
 	IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__func__);
 	if(pRxTs->RxTimeoutIndicateSeq != 0xffff)
 	{
@@ -87,10 +85,10 @@
 	if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
 	{
 		pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
-		mod_timer(&pRxTs->RxPktPendingTimer,  jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime));
+		mod_timer(&pRxTs->RxPktPendingTimer,
+			  jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
 	}
 	spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
-	//PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
 }
 
 /********************************************************************************************************************
@@ -212,7 +210,8 @@
 	del_timer_sync(&pTsCommonInfo->InactTimer);
 
 	if(InactTime!=0)
-		mod_timer(&pTsCommonInfo->InactTimer, jiffies + MSECS(InactTime));
+		mod_timer(&pTsCommonInfo->InactTimer,
+			  jiffies + msecs_to_jiffies(InactTime));
 }
 
 
@@ -469,7 +468,6 @@
 
 		while(!list_empty(&pRxTS->RxPendingPktList))
 		{
-		//      PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
 			spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
 			//pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
 			pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
@@ -489,7 +487,6 @@
 				prxb = NULL;
 			}
 			list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
-			//PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
 			spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
 		}
 
@@ -590,7 +587,8 @@
 		if(pTxTS->bAddBaReqDelayed)
 		{
 			IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
-			mod_timer(&pTxTS->TsAddBaTimer, jiffies + MSECS(TS_ADDBA_DELAY));
+			mod_timer(&pTxTS->TsAddBaTimer,
+				  jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
 		}
 		else
 		{
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index e000329..5c3bb3b 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -1,12 +1,11 @@
 /*
-  This is part of the rtl8192 driver
-  released under the GPL (See file COPYING for details).
-
-  This files contains programming code for the rtl8256
-  radio frontend.
-
-  *Many* thanks to Realtek Corp. for their great support!
-
+* This is part of the rtl8192 driver
+* released under the GPL (See file COPYING for details).
+*
+* This files contains programming code for the rtl8256
+* radio frontend.
+*
+* *Many* thanks to Realtek Corp. for their great support!
 */
 
 #include "r8192U.h"
@@ -22,7 +21,8 @@
  * Output:      NONE
  * Return:      NONE
  * Note:	8226 support both 20M  and 40 MHz
- *---------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
 void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
 {
 	u8	eRFPath;
@@ -83,7 +83,8 @@
  * Input:       struct net_device*	dev
  * Output:      NONE
  * Return:      NONE
- *---------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
 void PHY_RF8256_Config(struct net_device *dev)
 {
 	struct r8192_priv *priv = ieee80211_priv(dev);
@@ -100,7 +101,8 @@
  * Input:       struct net_device*	dev
  * Output:      NONE
  * Return:      NONE
- *---------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
 void phy_RF8256_Config_ParaFile(struct net_device *dev)
 {
 	u32	u4RegValue = 0;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index f4a4eae..3a93218 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1092,10 +1092,17 @@
 static void rtl8192_tx_isr(struct urb *tx_urb)
 {
 	struct sk_buff *skb = (struct sk_buff *)tx_urb->context;
-	struct net_device *dev = (struct net_device *)(skb->cb);
+	struct net_device *dev;
 	struct r8192_priv *priv = NULL;
-	cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-	u8  queue_index = tcb_desc->queue_index;
+	cb_desc *tcb_desc;
+	u8  queue_index;
+
+	if (!skb)
+		return;
+
+	dev = (struct net_device *)(skb->cb);
+	tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+	queue_index = tcb_desc->queue_index;
 
 	priv = ieee80211_priv(dev);
 
@@ -1113,11 +1120,9 @@
 	}
 
 	/* free skb and tx_urb */
-	if (skb != NULL) {
-		dev_kfree_skb_any(skb);
-		usb_free_urb(tx_urb);
-		atomic_dec(&priv->tx_pending[queue_index]);
-	}
+	dev_kfree_skb_any(skb);
+	usb_free_urb(tx_urb);
+	atomic_dec(&priv->tx_pending[queue_index]);
 
 	/*
 	 * Handle HW Beacon:
@@ -1371,7 +1376,7 @@
 */
 static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
 {
-	u8 QueueSelect = 0x0;       /* defualt set to */
+	u8 QueueSelect = 0x0;       /* default set to */
 
 	switch (QueueID) {
 	case BE_QUEUE:
@@ -1957,7 +1962,7 @@
 		     network->qos_data.param_count)) {
 			network->qos_data.old_param_count =
 				network->qos_data.param_count;
-			queue_work(priv->priv_wq, &priv->qos_activate);
+			schedule_work(&priv->qos_activate);
 			RT_TRACE(COMP_QOS,
 				 "QoS parameters change call qos_activate\n");
 		}
@@ -1966,7 +1971,7 @@
 		       &def_qos_parameters, size);
 
 		if ((network->qos_data.active == 1) && (active_network == 1)) {
-			queue_work(priv->priv_wq, &priv->qos_activate);
+			schedule_work(&priv->qos_activate);
 			RT_TRACE(COMP_QOS,
 				 "QoS was disabled call qos_activate\n");
 		}
@@ -1985,7 +1990,7 @@
 	struct r8192_priv *priv = ieee80211_priv(dev);
 
 	rtl8192_qos_handle_probe_response(priv, 1, network);
-	queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0);
+	schedule_delayed_work(&priv->update_beacon_wq, 0);
 	return 0;
 
 }
@@ -2037,7 +2042,7 @@
 		 network->flags,
 		 priv->ieee80211->current_network.qos_data.active);
 	if (set_qos_param == 1)
-		queue_work(priv->priv_wq, &priv->qos_activate);
+		schedule_work(&priv->qos_activate);
 
 
 	return 0;
@@ -2382,7 +2387,6 @@
 {
 	struct r8192_priv *priv = ieee80211_priv(dev);
 
-	priv->priv_wq = create_workqueue(DRV_NAME);
 
 	INIT_WORK(&priv->reset_wq, rtl8192_restart);
 
@@ -3436,8 +3440,7 @@
 
 static void rtl819x_watchdog_wqcallback(struct work_struct *work)
 {
-	struct delayed_work *dwork = container_of(work,
-						  struct delayed_work, work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct r8192_priv *priv = container_of(dwork,
 					       struct r8192_priv, watch_dog_wq);
 	struct net_device *dev = priv->ieee80211->dev;
@@ -3514,7 +3517,7 @@
 {
 	struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
 
-	queue_delayed_work(priv->priv_wq, &priv->watch_dog_wq, 0);
+	schedule_delayed_work(&priv->watch_dog_wq, 0);
 	mod_timer(&priv->watch_dog_timer,
 		  jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
 }
@@ -4297,7 +4300,7 @@
 	if (is_cck_rate) {
 		/* (1)Hardware does not provide RSSI for CCK */
 
-		/* (2)PWDB, Average PWDB cacluated by hardware
+		/* (2)PWDB, Average PWDB calculated by hardware
 		 * (for rate adaptive)
 		 */
 		u8 report;
@@ -4398,7 +4401,7 @@
 		}
 
 
-		/* (2)PWDB, Average PWDB cacluated by hardware
+		/* (2)PWDB, Average PWDB calculated by hardware
 		 * (for rate adaptive)
 		 */
 		rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
@@ -5018,7 +5021,6 @@
 	kfree(priv->pFirmware);
 	priv->pFirmware = NULL;
 	rtl8192_usb_deleteendpoints(dev);
-	destroy_workqueue(priv->priv_wq);
 	mdelay(10);
 fail:
 	free_ieee80211(dev);
@@ -5056,7 +5058,6 @@
 		kfree(priv->pFirmware);
 		priv->pFirmware = NULL;
 		rtl8192_usb_deleteendpoints(dev);
-		destroy_workqueue(priv->priv_wq);
 		mdelay(10);
 	}
 	free_ieee80211(dev);
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 375ec96..1e0e53c 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -767,7 +767,7 @@
 
 void dm_txpower_trackingcallback(struct work_struct *work)
 {
-	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct r8192_priv *priv = container_of(dwork, struct r8192_priv, txpower_tracking_wq);
 	struct net_device *dev = priv->ieee80211->dev;
 
@@ -1628,47 +1628,75 @@
 void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
 				       u32 dm_value)
 {
-	if (dm_type == DIG_TYPE_THRESH_HIGH) {
+	switch (dm_type) {
+	case DIG_TYPE_THRESH_HIGH:
 		dm_digtable.rssi_high_thresh = dm_value;
-	} else if (dm_type == DIG_TYPE_THRESH_LOW) {
+		break;
+
+	case  DIG_TYPE_THRESH_LOW:
 		dm_digtable.rssi_low_thresh = dm_value;
-	} else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
+		break;
+
+	case  DIG_TYPE_THRESH_HIGHPWR_HIGH:
 		dm_digtable.rssi_high_power_highthresh = dm_value;
-	} else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_LOW) {
+		break;
+
+	case DIG_TYPE_THRESH_HIGHPWR_LOW:
 		dm_digtable.rssi_high_power_lowthresh = dm_value;
-	} else if (dm_type == DIG_TYPE_ENABLE) {
+		break;
+
+	case DIG_TYPE_ENABLE:
 		dm_digtable.dig_state		= DM_STA_DIG_MAX;
 		dm_digtable.dig_enable_flag	= true;
-	} else if (dm_type == DIG_TYPE_DISABLE) {
+		break;
+
+	case DIG_TYPE_DISABLE:
 		dm_digtable.dig_state		= DM_STA_DIG_MAX;
 		dm_digtable.dig_enable_flag	= false;
-	} else if (dm_type == DIG_TYPE_DBG_MODE) {
+		break;
+
+	case DIG_TYPE_DBG_MODE:
 		if (dm_value >= DM_DBG_MAX)
 			dm_value = DM_DBG_OFF;
 		dm_digtable.dbg_mode		= (u8)dm_value;
-	} else if (dm_type == DIG_TYPE_RSSI) {
+		break;
+
+	case DIG_TYPE_RSSI:
 		if (dm_value > 100)
 			dm_value = 30;
 		dm_digtable.rssi_val			= (long)dm_value;
-	} else if (dm_type == DIG_TYPE_ALGORITHM) {
+		break;
+
+	case DIG_TYPE_ALGORITHM:
 		if (dm_value >= DIG_ALGO_MAX)
 			dm_value = DIG_ALGO_BY_FALSE_ALARM;
 		if (dm_digtable.dig_algorithm != (u8)dm_value)
 			dm_digtable.dig_algorithm_switch = 1;
 		dm_digtable.dig_algorithm	= (u8)dm_value;
-	} else if (dm_type == DIG_TYPE_BACKOFF) {
+		break;
+
+	case DIG_TYPE_BACKOFF:
 		if (dm_value > 30)
 			dm_value = 30;
 		dm_digtable.backoff_val		= (u8)dm_value;
-	} else if (dm_type == DIG_TYPE_RX_GAIN_MIN) {
+		break;
+
+	case DIG_TYPE_RX_GAIN_MIN:
 		if (dm_value == 0)
 			dm_value = 0x1;
 		dm_digtable.rx_gain_range_min = (u8)dm_value;
-	} else if (dm_type == DIG_TYPE_RX_GAIN_MAX) {
+		break;
+
+	case DIG_TYPE_RX_GAIN_MAX:
 		if (dm_value > 0x50)
 			dm_value = 0x50;
 		dm_digtable.rx_gain_range_max = (u8)dm_value;
+		break;
+
+	default:
+		break;
 	}
+
 }	/* DM_ChangeDynamicInitGainThresh */
 
 /*-----------------------------------------------------------------------------
@@ -2412,7 +2440,7 @@
  *---------------------------------------------------------------------------*/
 void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
 {
-	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct r8192_priv *priv = container_of(dwork, struct r8192_priv, rfpath_check_wq);
 	struct net_device *dev = priv->ieee80211->dev;
 	/*bool bactually_set = false;*/
@@ -2769,12 +2797,14 @@
 		if (bDoubleTimeInterval) {
 			if (timer_pending(&priv->fsync_timer))
 				del_timer_sync(&priv->fsync_timer);
-			priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
+			priv->fsync_timer.expires = jiffies +
+				msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
 			add_timer(&priv->fsync_timer);
 		} else {
 			if (timer_pending(&priv->fsync_timer))
 				del_timer_sync(&priv->fsync_timer);
-			priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
+			priv->fsync_timer.expires = jiffies +
+				msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
 			add_timer(&priv->fsync_timer);
 		}
 	} else {
@@ -2847,7 +2877,8 @@
 	}
 	if (timer_pending(&priv->fsync_timer))
 		del_timer_sync(&priv->fsync_timer);
-	priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
+	priv->fsync_timer.expires = jiffies +
+			msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
 	add_timer(&priv->fsync_timer);
 
 	write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 4911fef..f828e64 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -1,21 +1,23 @@
-/*
-   This file contains wireless extension handlers.
-
-   This is part of rtl8180 OpenSource driver.
-   Copyright (C) Andrea Merello 2004-2005  <andrea.merello@gmail.com>
-   Released under the terms of GPL (General Public Licence)
-
-   Parts of this driver are based on the GPL part
-   of the official realtek driver.
-
-   Parts of this driver are based on the rtl8180 driver skeleton
-   from Patric Schenke & Andres Salomon.
-
-   Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
-   We want to thank the Authors of those projects and the Ndiswrapper
-   project Authors.
-*/
+/******************************************************************************
+ *
+ * This file contains wireless extension handlers.
+ *
+ * This is part of rtl8180 OpenSource driver.
+ * Copyright (C) Andrea Merello 2004-2005  <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public Licence)
+ *
+ * Parts of this driver are based on the GPL part
+ * of the official realtek driver.
+ *
+ * Parts of this driver are based on the rtl8180 driver skeleton
+ * from Patric Schenke & Andres Salomon.
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
+ *
+ * We want to thank the Authors of those projects and the Ndiswrapper
+ * project Authors.
+ *
+ *****************************************************************************/
 
 #include <linux/string.h>
 #include "r8192U.h"
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index f264d88..696df34 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1683,8 +1683,7 @@
 
 void InitialGainOperateWorkItemCallBack(struct work_struct *work)
 {
-	struct delayed_work *dwork = container_of(work, struct delayed_work,
-						  work);
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct r8192_priv *priv = container_of(dwork, struct r8192_priv,
 					       initialgain_operate_wq);
 	struct net_device *dev = priv->ieee80211->dev;
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 3d64fee..29e47e1 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -159,6 +159,7 @@
 	struct mp_priv  mppriv;
 	s32	bDriverStopped;
 	s32	bSurpriseRemoved;
+	s32	bSuspended;
 	u32	IsrContent;
 	u32	ImrContent;
 	u8	EepromAddressSize;
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index d374824c..67ab580 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -12,8 +12,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Modifications for inclusion into the Linux staging tree are
  * Copyright(c) 2010 Larry Finger. All rights reserved.
@@ -61,7 +60,6 @@
 #define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED	6
 #define IEEE_CRYPT_ERR_CARD_CONF_FAILED		7
 
-
 #define	IEEE_CRYPT_ALG_NAME_LEN			16
 
 #define WPA_CIPHER_NONE				BIT(0)
@@ -70,8 +68,6 @@
 #define WPA_CIPHER_TKIP				BIT(3)
 #define WPA_CIPHER_CCMP				BIT(4)
 
-
-
 #define WPA_SELECTOR_LEN			4
 #define RSN_HEADER_LEN				4
 
@@ -88,7 +84,6 @@
 	WIRELESS_11BGN		= (WIRELESS_11B | WIRELESS_11G | WIRELESS_11N),
 };
 
-
 struct ieee_param {
 	u32 cmd;
 	u8 sta_addr[ETH_ALEN];
@@ -161,7 +156,6 @@
 	u16 seq_ctl;
 } __packed;
 
-
 struct	ieee80211_hdr_qos {
 	u16 frame_ctl;
 	u16 duration_id;
@@ -191,7 +185,6 @@
 	u16 length;
 } __packed;
 
-
 enum eap_type {
 	EAP_PACKET = 0,
 	EAPOL_START,
@@ -255,7 +248,6 @@
 #define IEEE80211_STYPE_CFPOLL		0x0060
 #define IEEE80211_STYPE_CFACKPOLL	0x0070
 #define IEEE80211_QOS_DATAGRP		0x0080
-#define IEEE80211_QoS_DATAGRP		IEEE80211_QOS_DATAGRP
 
 #define IEEE80211_SCTL_FRAG		0x000F
 #define IEEE80211_SCTL_SEQ		0xFFF0
@@ -305,15 +297,15 @@
 
 #define WLAN_AUTH_CHALLENGE_LEN 128
 
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
+#define WLAN_CAPABILITY_BSS BIT(0)
+#define WLAN_CAPABILITY_IBSS BIT(1)
+#define WLAN_CAPABILITY_CF_POLLABLE BIT(2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST BIT(3)
+#define WLAN_CAPABILITY_PRIVACY BIT(4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE BIT(5)
+#define WLAN_CAPABILITY_PBCC BIT(6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY BIT(7)
+#define WLAN_CAPABILITY_SHORT_SLOT BIT(10)
 
 /* Information Element IDs */
 #define WLAN_EID_SSID 0
@@ -331,24 +323,21 @@
 #define IEEE80211_DATA_HDR3_LEN 24
 #define IEEE80211_DATA_HDR4_LEN 30
 
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
+#define IEEE80211_STATMASK_SIGNAL BIT(0)
+#define IEEE80211_STATMASK_RSSI BIT(1)
+#define IEEE80211_STATMASK_NOISE BIT(2)
+#define IEEE80211_STATMASK_RATE BIT(3)
 #define IEEE80211_STATMASK_WEMASK 0x7
 
+#define IEEE80211_CCK_MODULATION    BIT(0)
+#define IEEE80211_OFDM_MODULATION   BIT(1)
 
-#define IEEE80211_CCK_MODULATION    (1<<0)
-#define IEEE80211_OFDM_MODULATION   (1<<1)
-
-#define IEEE80211_24GHZ_BAND     (1<<0)
-#define IEEE80211_52GHZ_BAND     (1<<1)
+#define IEEE80211_24GHZ_BAND     BIT(0)
+#define IEEE80211_52GHZ_BAND     BIT(1)
 
 #define IEEE80211_CCK_RATE_LEN			4
 #define IEEE80211_NUM_OFDM_RATESLEN	8
 
-
 #define IEEE80211_CCK_RATE_1MB		        0x02
 #define IEEE80211_CCK_RATE_2MB		        0x04
 #define IEEE80211_CCK_RATE_5MB		        0x0B
@@ -364,18 +353,18 @@
 #define IEEE80211_OFDM_RATE_54MB		0x6C
 #define IEEE80211_BASIC_RATE_MASK		0x80
 
-#define IEEE80211_CCK_RATE_1MB_MASK		(1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK		(1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK		(1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK		(1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK		(1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK		(1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK		(1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK		(1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK		(1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK		(1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK		(1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK		(1<<11)
+#define IEEE80211_CCK_RATE_1MB_MASK		BIT(0)
+#define IEEE80211_CCK_RATE_2MB_MASK		BIT(1)
+#define IEEE80211_CCK_RATE_5MB_MASK		BIT(2)
+#define IEEE80211_CCK_RATE_11MB_MASK		BIT(3)
+#define IEEE80211_OFDM_RATE_6MB_MASK		BIT(4)
+#define IEEE80211_OFDM_RATE_9MB_MASK		BIT(5)
+#define IEEE80211_OFDM_RATE_12MB_MASK		BIT(6)
+#define IEEE80211_OFDM_RATE_18MB_MASK		BIT(7)
+#define IEEE80211_OFDM_RATE_24MB_MASK		BIT(8)
+#define IEEE80211_OFDM_RATE_36MB_MASK		BIT(9)
+#define IEEE80211_OFDM_RATE_48MB_MASK		BIT(10)
+#define IEEE80211_OFDM_RATE_54MB_MASK		BIT(11)
 
 #define IEEE80211_CCK_RATES_MASK	        0x0000000F
 #define IEEE80211_CCK_BASIC_RATES_MASK		(IEEE80211_CCK_RATE_1MB_MASK | \
@@ -401,9 +390,6 @@
 #define IEEE80211_NUM_CCK_RATES	            4
 #define IEEE80211_OFDM_SHIFT_MASK_A         4
 
-
-
-
 /* NOTE: This data is for statistical purposes; not all hardware provides this
  *       information for frames received.  Not setting these will not cause
  *       any adverse affects.
@@ -481,15 +467,15 @@
 	uint swtxawake;
 };
 
-#define SEC_KEY_1         (1<<0)
-#define SEC_KEY_2         (1<<1)
-#define SEC_KEY_3         (1<<2)
-#define SEC_KEY_4         (1<<3)
-#define SEC_ACTIVE_KEY    (1<<4)
-#define SEC_AUTH_MODE     (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL         (1<<7)
-#define SEC_ENABLED       (1<<8)
+#define SEC_KEY_1         BIT(0)
+#define SEC_KEY_2         BIT(1)
+#define SEC_KEY_3         BIT(2)
+#define SEC_KEY_4         BIT(3)
+#define SEC_ACTIVE_KEY    BIT(4)
+#define SEC_AUTH_MODE     BIT(5)
+#define SEC_UNICAST_GROUP BIT(6)
+#define SEC_LEVEL         BIT(7)
+#define SEC_ENABLED       BIT(8)
 
 #define SEC_LEVEL_0      0 /* None */
 #define SEC_LEVEL_1      1 /* WEP 40 and 104 bit */
@@ -645,9 +631,9 @@
 
 #define MAX_WPA_IE_LEN 128
 
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM    (1<<1)
-#define NETWORK_HAS_CCK     (1<<2)
+#define NETWORK_EMPTY_ESSID BIT(0)
+#define NETWORK_HAS_OFDM    BIT(1)
+#define NETWORK_HAS_CCK     BIT(2)
 
 #define IEEE80211_DTIM_MBCAST 4
 #define IEEE80211_DTIM_UCAST 2
@@ -699,15 +685,15 @@
 #define DEFAULT_MAX_SCAN_AGE (15 * HZ)
 #define DEFAULT_FTS 2346
 
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+#define CFG_IEEE80211_RESERVE_FCS BIT(0)
+#define CFG_IEEE80211_COMPUTE_FCS BIT(1)
 
 #define MAXTID	16
 
-#define IEEE_A            (1<<0)
-#define IEEE_B            (1<<1)
-#define IEEE_G            (1<<2)
-#define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_A            BIT(0)
+#define IEEE_B            BIT(1)
+#define IEEE_G            BIT(2)
+#define IEEE_MODE_MASK    (IEEE_A | IEEE_B | IEEE_G)
 
 static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
 {
@@ -757,7 +743,7 @@
 unsigned char *r8712_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len,
 				 int limit);
 int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
-			int *pairwise_cipher);
+		       int *pairwise_cipher);
 int r8712_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
 			int *pairwise_cipher);
 int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index b89e2d3..ab19112 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -269,7 +269,6 @@
 
 static u8 init_default_value(struct _adapter *padapter)
 {
-	u8 ret  = _SUCCESS;
 	struct registry_priv *pregistrypriv = &padapter->registrypriv;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -302,7 +301,7 @@
 	r8712_init_registrypriv_dev_network(padapter);
 	r8712_update_registrypriv_dev_network(padapter);
 	/*misc.*/
-	return ret;
+	return _SUCCESS;
 }
 
 u8 r8712_init_drv_sw(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 9b91609..86a9089 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -293,7 +293,7 @@
 
 	r8712_write32(pAdapter, IOCMD_CTRL_REG, cmd);
 	msleep(100);
-	while ((0 != r8712_read32(pAdapter, IOCMD_CTRL_REG)) &&
+	while ((r8712_read32(pAdapter, IOCMD_CTRL_REG != 0)) &&
 	       (pollingcnts > 0)) {
 		pollingcnts--;
 		msleep(20);
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index eaa93fb..76f60ba 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -161,7 +161,7 @@
 
 	/* read one byte to check if E-Fuse is empty */
 	if (efuse_one_byte_rw(padapter, true, 0, &value)) {
-		if (0xFF == value)
+		if (value == 0xFF)
 			*empty = true;
 		else
 			*empty = false;
@@ -345,7 +345,7 @@
 				ret = false;
 			} else if (pkt.data[i * 2] != value) {
 				ret = false;
-				if (0xFF == value) /* write again */
+				if (value == 0xFF) /* write again */
 					efuse_one_byte_write(padapter, addr,
 							pkt.data[i * 2]);
 			}
@@ -353,7 +353,7 @@
 				ret = false;
 			} else if (pkt.data[i * 2 + 1] != value) {
 				ret = false;
-				if (0xFF == value) /* write again */
+				if (value == 0xFF) /* write again */
 					efuse_one_byte_write(padapter, addr + 1,
 							     pkt.data[i * 2 +
 								      1]);
@@ -420,7 +420,7 @@
 		}
 		/* write header fail */
 		bResult = false;
-		if (0xFF == efuse_data)
+		if (efuse_data == 0xFF)
 			return bResult; /* nothing damaged. */
 		/* call rescue procedure */
 		if (!fix_header(padapter, efuse_data, efuse_addr))
diff --git a/drivers/staging/rtl8712/rtl8712_io.c b/drivers/staging/rtl8712/rtl8712_io.c
index 4148d48..391eff3 100644
--- a/drivers/staging/rtl8712/rtl8712_io.c
+++ b/drivers/staging/rtl8712/rtl8712_io.c
@@ -36,109 +36,76 @@
 
 u8 r8712_read8(struct _adapter *adapter, u32 addr)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
-	u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	_read8 = pintfhdl->io_ops._read8;
-	return  _read8(pintfhdl, addr);
+	return hdl->io_ops._read8(hdl, addr);
 }
 
 u16 r8712_read16(struct _adapter *adapter, u32 addr)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
-	u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	_read16 = pintfhdl->io_ops._read16;
-	return _read16(pintfhdl, addr);
+	return hdl->io_ops._read16(hdl, addr);
 }
 
 u32 r8712_read32(struct _adapter *adapter, u32 addr)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
-	u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	_read32 = pintfhdl->io_ops._read32;
-	return _read32(pintfhdl, addr);
+	return hdl->io_ops._read32(hdl, addr);
 }
 
 void r8712_write8(struct _adapter *adapter, u32 addr, u8 val)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
-	void (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	_write8 = pintfhdl->io_ops._write8;
-	_write8(pintfhdl, addr, val);
+	hdl->io_ops._write8(hdl, addr, val);
 }
 
 void r8712_write16(struct _adapter *adapter, u32 addr, u16 val)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
-	void (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	_write16 = pintfhdl->io_ops._write16;
-	_write16(pintfhdl, addr, val);
+	hdl->io_ops._write16(hdl, addr, val);
 }
 
 void r8712_write32(struct _adapter *adapter, u32 addr, u32 val)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl  = &(pio_queue->intf);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	void (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
-
-	_write32 = pintfhdl->io_ops._write32;
-	_write32(pintfhdl, addr, val);
+	hdl->io_ops._write32(hdl, addr, val);
 }
 
 void r8712_read_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
-			  u8 *pmem);
 	if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
 		return;
-	_read_mem = pintfhdl->io_ops._read_mem;
-	_read_mem(pintfhdl, addr, cnt, pmem);
+
+	hdl->io_ops._read_mem(hdl, addr, cnt, pmem);
 }
 
 void r8712_write_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
-	void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
-			   u8 *pmem);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	_write_mem = pintfhdl->io_ops._write_mem;
-	_write_mem(pintfhdl, addr, cnt, pmem);
+	hdl->io_ops._write_mem(hdl, addr, cnt, pmem);
 }
 
 void r8712_read_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl	*pintfhdl = &(pio_queue->intf);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
-			  u8 *pmem);
 	if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
 		return;
-	_read_port = pintfhdl->io_ops._read_port;
-	_read_port(pintfhdl, addr, cnt, pmem);
+
+	hdl->io_ops._read_port(hdl, addr, cnt, pmem);
 }
 
 void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
 {
-	struct io_queue *pio_queue = adapter->pio_queue;
-	struct intf_hdl *pintfhdl = &(pio_queue->intf);
+	struct intf_hdl *hdl = &adapter->pio_queue->intf;
 
-	u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
-			   u8 *pmem);
-	_write_port = pintfhdl->io_ops._write_port;
-	_write_port(pintfhdl, addr, cnt, pmem);
+	hdl->io_ops._write_port(hdl, addr, cnt, pmem);
 }
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 562a102..5875677 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -136,15 +136,12 @@
 	unsigned long irqL;
 	struct cmd_obj *obj;
 
-	spin_lock_irqsave(&(queue->lock), irqL);
-	if (list_empty(&(queue->queue))) {
-		obj = NULL;
-	} else {
-		obj = LIST_CONTAINOR(queue->queue.next,
-				     struct cmd_obj, list);
+	spin_lock_irqsave(&queue->lock, irqL);
+	obj = list_first_entry_or_null(&queue->queue,
+				       struct cmd_obj, list);
+	if (obj)
 		list_del_init(&obj->list);
-	}
-	spin_unlock_irqrestore(&(queue->lock), irqL);
+	spin_unlock_irqrestore(&queue->lock, irqL);
 	return obj;
 }
 
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index edfc680..a15f3ce 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -137,7 +137,7 @@
 	}
 }
 
-static inline char *translate_scan(struct _adapter *padapter,
+static noinline_for_stack char *translate_scan(struct _adapter *padapter,
 				   struct iw_request_info *info,
 				   struct wlan_network *pnetwork,
 				   char *start, char *stop)
@@ -398,12 +398,9 @@
 			wep_key_idx = 0;
 		if (wep_key_len > 0) {
 			wep_key_len = wep_key_len <= 5 ? 5 : 13;
-			pwep = kmalloc((u32)(wep_key_len +
-				FIELD_OFFSET(struct NDIS_802_11_WEP,
-				KeyMaterial)), GFP_ATOMIC);
+			pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
 			if (pwep == NULL)
 				return -ENOMEM;
-			memset(pwep, 0, sizeof(struct NDIS_802_11_WEP));
 			pwep->KeyLength = wep_key_len;
 			pwep->Length = wep_key_len +
 				 FIELD_OFFSET(struct NDIS_802_11_WEP,
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index 7c346a4..c7f2e51 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -49,8 +49,7 @@
 
 uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -66,8 +65,7 @@
 
 uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -83,8 +81,7 @@
 
 uint oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -115,8 +112,7 @@
 
 uint oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -147,8 +143,7 @@
 
 uint oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -172,8 +167,7 @@
 
 uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 	u32 preamblemode = 0;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
@@ -202,8 +196,7 @@
 
 uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 	struct eeprom_priv *peeprompriv = &padapter->eeprompriv;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
@@ -216,8 +209,7 @@
 uint oid_rt_set_channelplan_hdl(struct oid_par_priv
 				       *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 	struct eeprom_priv *peeprompriv = &padapter->eeprompriv;
 
 	if (poid_par_priv->type_of_oid != SET_OID)
@@ -229,8 +221,7 @@
 uint oid_rt_set_preamble_mode_hdl(struct oid_par_priv
 					 *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 	u32 preamblemode = 0;
 
 	if (poid_par_priv->type_of_oid != SET_OID)
@@ -267,8 +258,7 @@
 uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv
 					  *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -285,8 +275,7 @@
 uint oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv
 					  *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != QUERY_OID)
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -325,8 +314,7 @@
 
 uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				    (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct NDIS_802_11_CONFIGURATION *pnic_Config;
 	u32   channelnum;
@@ -449,8 +437,7 @@
 					     poid_par_priv)
 {
 	uint status = RNDIS_STATUS_SUCCESS;
-	struct _adapter *Adapter = (struct _adapter *)
-			(poid_par_priv->adapter_context);
+	struct _adapter *Adapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -470,8 +457,7 @@
 uint oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv)
 {
 	uint status = RNDIS_STATUS_SUCCESS;
-	struct _adapter *Adapter = (struct _adapter *)
-			(poid_par_priv->adapter_context);
+	struct _adapter *Adapter = poid_par_priv->adapter_context;
 
 	if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */
 		return RNDIS_STATUS_NOT_ACCEPTED;
@@ -516,8 +502,7 @@
 
 uint oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv)
 {
-	struct _adapter *padapter = (struct _adapter *)
-				     (poid_par_priv->adapter_context);
+	struct _adapter *padapter = poid_par_priv->adapter_context;
 	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
 	u32 ulInfo;
 
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 04f727f..1b022dbd 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -87,16 +87,15 @@
 	unsigned long irqL;
 	struct wlan_network *pnetwork;
 	struct  __queue *free_queue = &pmlmepriv->free_bss_pool;
-	struct list_head *plist = NULL;
 
-	if (list_empty(&free_queue->queue))
-		return NULL;
 	spin_lock_irqsave(&free_queue->lock, irqL);
-	plist = free_queue->queue.next;
-	pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
-	list_del_init(&pnetwork->list);
-	pnetwork->last_scanned = jiffies;
-	pmlmepriv->num_of_scanned++;
+	pnetwork = list_first_entry_or_null(&free_queue->queue,
+					    struct wlan_network, list);
+	if (pnetwork) {
+		list_del_init(&pnetwork->list);
+		pnetwork->last_scanned = jiffies;
+		pmlmepriv->num_of_scanned++;
+	}
 	spin_unlock_irqrestore(&free_queue->lock, irqL);
 	return pnetwork;
 }
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 4ff5301..616ca39 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -72,14 +72,12 @@
 	_init_queue(&precvpriv->recv_pending_queue);
 	precvpriv->adapter = padapter;
 	precvpriv->free_recvframe_cnt = NR_RECVFRAME;
-	precvpriv->pallocated_frame_buf = kmalloc(NR_RECVFRAME *
+	precvpriv->pallocated_frame_buf = kzalloc(NR_RECVFRAME *
 				sizeof(union recv_frame) + RXFRAME_ALIGN_SZ,
 				GFP_ATOMIC);
 	if (precvpriv->pallocated_frame_buf == NULL)
 		return _FAIL;
 	kmemleak_not_leak(precvpriv->pallocated_frame_buf);
-	memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME *
-		sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
 	precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf +
 				    RXFRAME_ALIGN_SZ -
 				    ((addr_t)(precvpriv->pallocated_frame_buf) &
@@ -103,21 +101,17 @@
 	r8712_free_recv_priv(precvpriv);
 }
 
-union recv_frame *r8712_alloc_recvframe(struct  __queue *pfree_recv_queue)
+union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
 {
 	unsigned long irqL;
 	union recv_frame  *precvframe;
-	struct list_head *plist, *phead;
 	struct _adapter *padapter;
 	struct recv_priv *precvpriv;
 
 	spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
-	if (list_empty(&pfree_recv_queue->queue)) {
-		precvframe = NULL;
-	} else {
-		phead = &pfree_recv_queue->queue;
-		plist = phead->next;
-		precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+	precvframe = list_first_entry_or_null(&pfree_recv_queue->queue,
+					      union recv_frame, u.hdr.list);
+	if (precvframe) {
 		list_del_init(&precvframe->u.hdr.list);
 		padapter = precvframe->u.hdr.adapter;
 		if (padapter != NULL) {
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 162e61c..3fe45b6 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -89,16 +89,11 @@
 	spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
 }
 
-
-static void mfree_sta_priv_lock(struct	sta_priv *pstapriv)
-{
-	 mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
-}
-
 u32 _r8712_free_sta_priv(struct sta_priv *pstapriv)
 {
 	if (pstapriv) {
-		mfree_sta_priv_lock(pstapriv);
+		/* be done before free sta_hash_lock */
+		mfree_all_stainfo(pstapriv);
 		kfree(pstapriv->pallocated_stainfo_buf);
 	}
 	return _SUCCESS;
@@ -116,13 +111,11 @@
 	unsigned long flags;
 
 	pfree_sta_queue = &pstapriv->free_sta_queue;
-	spin_lock_irqsave(&(pfree_sta_queue->lock), flags);
-	if (list_empty(&pfree_sta_queue->queue)) {
-		psta = NULL;
-	} else {
-		psta = LIST_CONTAINOR(pfree_sta_queue->queue.next,
-				      struct sta_info, list);
-		list_del_init(&(psta->list));
+	spin_lock_irqsave(&pfree_sta_queue->lock, flags);
+	psta = list_first_entry_or_null(&pfree_sta_queue->queue,
+					struct sta_info, list);
+	if (psta) {
+		list_del_init(&psta->list);
 		_init_stainfo(psta);
 		memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
 		index = wifi_mac_hash(hwaddr);
@@ -130,7 +123,7 @@
 			psta = NULL;
 			goto exit;
 		}
-		phash_list = &(pstapriv->sta_hash[index]);
+		phash_list = &pstapriv->sta_hash[index];
 		list_add_tail(&psta->hash_list, phash_list);
 		pstapriv->asoc_sta_count++;
 
@@ -154,7 +147,7 @@
 		}
 	}
 exit:
-	spin_unlock_irqrestore(&(pfree_sta_queue->lock), flags);
+	spin_unlock_irqrestore(&pfree_sta_queue->lock, flags);
 	return psta;
 }
 
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 68d65d2..7089ebd 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -741,21 +741,16 @@
 struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
 {
 	unsigned long irqL;
-	struct xmit_buf *pxmitbuf =  NULL;
-	struct list_head *plist, *phead;
+	struct xmit_buf *pxmitbuf;
 	struct  __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
 
 	spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
-	if (list_empty(&pfree_xmitbuf_queue->queue)) {
-		pxmitbuf = NULL;
-	} else {
-		phead = &pfree_xmitbuf_queue->queue;
-		plist = phead->next;
-		pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
-		list_del_init(&(pxmitbuf->list));
-	}
-	if (pxmitbuf !=  NULL)
+	pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue,
+					    struct xmit_buf, list);
+	if (pxmitbuf) {
+		list_del_init(&pxmitbuf->list);
 		pxmitpriv->free_xmitbuf_cnt--;
+	}
 	spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
 	return pxmitbuf;
 }
@@ -795,20 +790,14 @@
 		pfree_xmit_queue
 	*/
 	unsigned long irqL;
-	struct xmit_frame *pxframe = NULL;
-	struct list_head *plist, *phead;
+	struct xmit_frame *pxframe;
 	struct  __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
 
 	spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
-	if (list_empty(&pfree_xmit_queue->queue)) {
-		pxframe =  NULL;
-	} else {
-		phead = &pfree_xmit_queue->queue;
-		plist = phead->next;
-		pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
-		list_del_init(&(pxframe->list));
-	}
-	if (pxframe !=  NULL) {
+	pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
+					   struct xmit_frame, list);
+	if (pxframe) {
+		list_del_init(&pxframe->list);
 		pxmitpriv->free_xmitframe_cnt--;
 		pxframe->buf_addr = NULL;
 		pxframe->pxmitbuf = NULL;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index c71333f..f1d3d70 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -205,12 +205,15 @@
 static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state)
 {
 	struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
+	struct _adapter *padapter = netdev_priv(pnetdev);
 
 	netdev_info(pnetdev, "Suspending...\n");
 	if (!pnetdev || !netif_running(pnetdev)) {
 		netdev_info(pnetdev, "Unable to suspend\n");
 		return 0;
 	}
+	padapter->bSuspended = true;
+	rtl871x_intf_stop(padapter);
 	if (pnetdev->netdev_ops->ndo_stop)
 		pnetdev->netdev_ops->ndo_stop(pnetdev);
 	mdelay(10);
@@ -218,9 +221,16 @@
 	return 0;
 }
 
+static void rtl871x_intf_resume(struct _adapter *padapter)
+{
+	if (padapter->dvobjpriv.inirp_init)
+		padapter->dvobjpriv.inirp_init(padapter);
+}
+
 static int r871x_resume(struct usb_interface *pusb_intf)
 {
 	struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
+	struct _adapter *padapter = netdev_priv(pnetdev);
 
 	netdev_info(pnetdev,  "Resuming...\n");
 	if (!pnetdev || !netif_running(pnetdev)) {
@@ -230,6 +240,8 @@
 	netif_device_attach(pnetdev);
 	if (pnetdev->netdev_ops->ndo_open)
 		pnetdev->netdev_ops->ndo_open(pnetdev);
+	padapter->bSuspended = false;
+	rtl871x_intf_resume(padapter);
 	return 0;
 }
 
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 489a9e6..c2ac581 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -232,9 +232,14 @@
 		case -EPIPE:
 		case -ENODEV:
 		case -ESHUTDOWN:
-		case -ENOENT:
 			padapter->bDriverStopped = true;
 			break;
+		case -ENOENT:
+			if (!padapter->bSuspended) {
+				padapter->bDriverStopped = true;
+				break;
+			}
+			/* Fall through. */
 		case -EPROTO:
 			precvbuf->reuse = true;
 			r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index d398183..f8866d7 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -131,7 +131,7 @@
 
 	for (i = 0; i < 8; i++) {
 		pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
-		if (pxmitbuf->pxmit_urb[i] == NULL) {
+		if (!pxmitbuf->pxmit_urb[i]) {
 			netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
 			return _FAIL;
 		}
@@ -171,7 +171,7 @@
 		goto _xmit_entry_drop;
 	}
 	pxmitframe = r8712_alloc_xmitframe(pxmitpriv);
-	if (pxmitframe == NULL) {
+	if (!pxmitframe) {
 		ret = 0;
 		goto _xmit_entry_drop;
 	}
diff --git a/drivers/staging/rtl8723au/TODO b/drivers/staging/rtl8723au/TODO
index 175a0ce..f5d57d3 100644
--- a/drivers/staging/rtl8723au/TODO
+++ b/drivers/staging/rtl8723au/TODO
@@ -9,5 +9,5 @@
 - merge Realtek's bugfixes and new features into the driver
 - switch to use MAC80211
 
-Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
 Jes Sorensen <Jes.Sorensen@redhat.com>, and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index 1aa9b26..ce4b589 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -171,24 +171,20 @@
 	return ret;
 }
 
-void	expire_timeout_chk23a(struct rtw_adapter *padapter)
+void expire_timeout_chk23a(struct rtw_adapter *padapter)
 {
-	struct list_head *phead, *plist, *ptmp;
+	struct list_head *phead;
 	u8 updated = 0;
-	struct sta_info *psta;
+	struct sta_info *psta, *ptmp;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	u8 chk_alive_num = 0;
 	struct sta_info *chk_alive_list[NUM_STA];
 	int i;
 
 	spin_lock_bh(&pstapriv->auth_list_lock);
-
 	phead = &pstapriv->auth_list;
-
 	/* check auth_queue */
-	list_for_each_safe(plist, ptmp, phead) {
-		psta = container_of(plist, struct sta_info, auth_list);
-
+	list_for_each_entry_safe(psta, ptmp, phead, auth_list) {
 		if (psta->expire_to > 0) {
 			psta->expire_to--;
 			if (psta->expire_to == 0) {
@@ -206,19 +202,13 @@
 				spin_lock_bh(&pstapriv->auth_list_lock);
 			}
 		}
-
 	}
-
 	spin_unlock_bh(&pstapriv->auth_list_lock);
 
 	spin_lock_bh(&pstapriv->asoc_list_lock);
-
 	phead = &pstapriv->asoc_list;
-
 	/* check asoc_queue */
-	list_for_each_safe(plist, ptmp, phead) {
-		psta = container_of(plist, struct sta_info, asoc_list);
-
+	list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
 		if (chk_sta_is_alive(psta) || !psta->expire_to) {
 			psta->expire_to = pstapriv->expire_to;
 			psta->keep_alive_trycnt = 0;
@@ -283,7 +273,6 @@
 			}
 		}
 	}
-
 	spin_unlock_bh(&pstapriv->asoc_list_lock);
 
 	if (chk_alive_num) {
@@ -1059,7 +1048,7 @@
 
 int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr)
 {
-	struct list_head *plist, *phead;
+	struct list_head *phead;
 	u8 added = false;
 	int i, ret = 0;
 	struct rtw_wlan_acl_node *paclnode;
@@ -1073,12 +1062,8 @@
 		return -1;
 
 	spin_lock_bh(&pacl_node_q->lock);
-
 	phead = get_list_head(pacl_node_q);
-
-	list_for_each(plist, phead) {
-		paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
+	list_for_each_entry(paclnode, phead, list) {
 		if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
 			if (paclnode->valid == true) {
 				added = true;
@@ -1087,7 +1072,6 @@
 			}
 		}
 	}
-
 	spin_unlock_bh(&pacl_node_q->lock);
 
 	if (added)
@@ -1121,8 +1105,8 @@
 
 int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
 {
-	struct list_head *plist, *phead, *ptmp;
-	struct rtw_wlan_acl_node *paclnode;
+	struct list_head *phead;
+	struct rtw_wlan_acl_node *paclnode, *ptmp;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
 	struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -1130,12 +1114,8 @@
 	DBG_8723A("%s(acl_num =%d) = %pM\n", __func__, pacl_list->num, addr);
 
 	spin_lock_bh(&pacl_node_q->lock);
-
 	phead = get_list_head(pacl_node_q);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
+	list_for_each_entry_safe(paclnode, ptmp, phead, list) {
 		if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
 			if (paclnode->valid) {
 				paclnode->valid = false;
@@ -1146,7 +1126,6 @@
 			}
 		}
 	}
-
 	spin_unlock_bh(&pacl_node_q->lock);
 
 	DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
@@ -1354,20 +1333,14 @@
 {
 	/* update associated stations cap. */
 	if (updated == true) {
-		struct list_head *phead, *plist, *ptmp;
-		struct sta_info *psta;
+		struct list_head *phead;
+		struct sta_info *psta, *ptmp;
 		struct sta_priv *pstapriv = &padapter->stapriv;
 
 		spin_lock_bh(&pstapriv->asoc_list_lock);
-
 		phead = &pstapriv->asoc_list;
-
-		list_for_each_safe(plist, ptmp, phead) {
-			psta = container_of(plist, struct sta_info, asoc_list);
-
+		list_for_each_entry_safe(psta, ptmp, phead, asoc_list)
 			VCS_update23a(padapter, psta);
-		}
-
 		spin_unlock_bh(&pstapriv->asoc_list_lock);
 	}
 }
@@ -1627,7 +1600,7 @@
 
 int rtw_ap_inform_ch_switch23a(struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset)
 {
-	struct list_head *phead, *plist;
+	struct list_head *phead;
 	struct sta_info *psta = NULL;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1642,10 +1615,7 @@
 
 	spin_lock_bh(&pstapriv->asoc_list_lock);
 	phead = &pstapriv->asoc_list;
-
-	list_for_each(plist, phead) {
-		psta = container_of(plist, struct sta_info, asoc_list);
-
+	list_for_each_entry(psta, phead, asoc_list) {
 		issue_action_spct_ch_switch23a(padapter, psta->hwaddr, new_ch, ch_offset);
 		psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
 	}
@@ -1658,8 +1628,8 @@
 
 int rtw_sta_flush23a(struct rtw_adapter *padapter)
 {
-	struct list_head *phead, *plist, *ptmp;
-	struct sta_info *psta;
+	struct list_head *phead;
+	struct sta_info *psta, *ptmp;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
 	struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -1675,10 +1645,7 @@
 
 	spin_lock_bh(&pstapriv->asoc_list_lock);
 	phead = &pstapriv->asoc_list;
-
-	list_for_each_safe(plist, ptmp, phead) {
-		psta = container_of(plist, struct sta_info, asoc_list);
-
+	list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
 		/* Remove sta from asoc_list */
 		list_del_init(&psta->asoc_list);
 		pstapriv->asoc_list_cnt--;
@@ -1744,9 +1711,9 @@
 	struct mlme_priv *mlmepriv = &padapter->mlmepriv;
 	struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
 	struct sta_priv *pstapriv = &padapter->stapriv;
-	struct sta_info *psta;
+	struct sta_info *psta, *ptmp;
 	struct security_priv *psecuritypriv = &padapter->securitypriv;
-	struct list_head *phead, *plist, *ptmp;
+	struct list_head *phead;
 	u8 chk_alive_num = 0;
 	struct sta_info *chk_alive_list[NUM_STA];
 	int i;
@@ -1775,15 +1742,9 @@
 	}
 
 	spin_lock_bh(&pstapriv->asoc_list_lock);
-
 	phead = &pstapriv->asoc_list;
-
-	list_for_each_safe(plist, ptmp, phead) {
-		psta = container_of(plist, struct sta_info, asoc_list);
-
+	list_for_each_entry_safe(psta, ptmp, phead, asoc_list)
 		chk_alive_list[chk_alive_num++] = psta;
-	}
-
 	spin_unlock_bh(&pstapriv->asoc_list_lock);
 
 	for (i = 0; i < chk_alive_num; i++) {
@@ -1841,8 +1802,8 @@
 
 void stop_ap_mode23a(struct rtw_adapter *padapter)
 {
-	struct list_head *phead, *plist, *ptmp;
-	struct rtw_wlan_acl_node *paclnode;
+	struct list_head *phead;
+	struct rtw_wlan_acl_node *paclnode, *ptmp;
 	struct sta_info *psta = NULL;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1864,15 +1825,10 @@
 	/* for ACL */
 	spin_lock_bh(&pacl_node_q->lock);
 	phead = get_list_head(pacl_node_q);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
+	list_for_each_entry_safe(paclnode, ptmp, phead, list) {
 		if (paclnode->valid == true) {
 			paclnode->valid = false;
-
 			list_del_init(&paclnode->list);
-
 			pacl_list->num--;
 		}
 	}
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
index 3035bb8..cd4e0f0 100644
--- a/drivers/staging/rtl8723au/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723au/core/rtw_cmd.c
@@ -295,8 +295,7 @@
 
 post_process:
 	/* call callback function for post-processed */
-	if (pcmd->cmdcode < (sizeof(rtw_cmd_callback) /
-			     sizeof(struct _cmd_callback))) {
+	if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
 		pcmd_callback =	rtw_cmd_callback[pcmd->cmdcode].callback;
 		if (!pcmd_callback) {
 			RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c
index 3c09ea9..3adda55 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme.c
@@ -171,21 +171,15 @@
 
 void rtw_free_network_queue23a(struct rtw_adapter *padapter)
 {
-	struct list_head *phead, *plist, *ptmp;
-	struct wlan_network *pnetwork;
+	struct list_head *phead;
+	struct wlan_network *pnetwork, *ptmp;
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue;
 
 	spin_lock_bh(&scanned_queue->lock);
-
 	phead = get_list_head(scanned_queue);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pnetwork = container_of(plist, struct wlan_network, list);
-
+	list_for_each_entry_safe(pnetwork, ptmp, phead, list)
 		_rtw_free_network23a(pmlmepriv, pnetwork);
-	}
-
 	spin_unlock_bh(&scanned_queue->lock);
 }
 
@@ -329,15 +323,12 @@
 struct wlan_network *
 rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue)
 {
-	struct list_head *plist, *phead;
+	struct list_head *phead;
 	struct wlan_network *pwlan;
 	struct wlan_network *oldest = NULL;
 
 	phead = get_list_head(scanned_queue);
-
-	list_for_each(plist, phead) {
-		pwlan = container_of(plist, struct wlan_network, list);
-
+	list_for_each_entry(pwlan, phead, list) {
 		if (pwlan->fixed != true) {
 			if (!oldest || time_after(oldest->last_scanned,
 						  pwlan->last_scanned))
@@ -445,7 +436,6 @@
 
 	spin_lock_bh(&queue->lock);
 	phead = get_list_head(queue);
-
 	list_for_each(plist, phead) {
 		pnetwork = container_of(plist, struct wlan_network, list);
 
@@ -710,21 +700,17 @@
 
 static void free_scanqueue(struct mlme_priv *pmlmepriv)
 {
-	struct wlan_network *pnetwork;
+	struct wlan_network *pnetwork, *ptemp;
 	struct rtw_queue *scan_queue = &pmlmepriv->scanned_queue;
-	struct list_head *plist, *phead, *ptemp;
+	struct list_head *phead;
 
 	RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+free_scanqueue\n");
 	spin_lock_bh(&scan_queue->lock);
-
 	phead = get_list_head(scan_queue);
-
-	list_for_each_safe(plist, ptemp, phead) {
-		pnetwork = container_of(plist, struct wlan_network, list);
+	list_for_each_entry_safe(pnetwork, ptemp, phead, list) {
 		pnetwork->fixed = false;
 		_rtw_free_network23a(pmlmepriv, pnetwork);
 	}
-
 	spin_unlock_bh(&scan_queue->lock);
 }
 
@@ -1625,15 +1611,13 @@
 static struct wlan_network *
 rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv)
 {
-	struct wlan_network *pnetwork, *candidate = NULL;
+	struct wlan_network *pnetwork, *ptmp, *candidate = NULL;
 	struct rtw_queue *queue = &pmlmepriv->scanned_queue;
-	struct list_head *phead, *plist, *ptmp;
+	struct list_head *phead;
 
 	spin_lock_bh(&pmlmepriv->scanned_queue.lock);
 	phead = get_list_head(queue);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pnetwork = container_of(plist, struct wlan_network, list);
+	list_for_each_entry_safe(pnetwork, ptmp, phead, list) {
 		if (!pnetwork) {
 			RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
 				 "%s: return _FAIL:(pnetwork == NULL)\n",
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index d28f29a..f4fff38 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -2154,8 +2154,7 @@
 
 	category = mgmt->u.action.category;
 
-	for (i = 0;
-	     i < sizeof(OnAction23a_tbl) / sizeof(struct action_handler); i++) {
+	for (i = 0; i < ARRAY_SIZE(OnAction23a_tbl); i++) {
 		ptable = &OnAction23a_tbl[i];
 
 		if (category == ptable->num)
@@ -2656,8 +2655,6 @@
 	pattrib->last_txcmdsz = pattrib->pktlen;
 
 	dump_mgntframe23a(padapter, pmgntframe);
-
-	return;
 }
 
 static int _issue_probereq(struct rtw_adapter *padapter,
@@ -2957,8 +2954,6 @@
 	rtw_wep_encrypt23a(padapter, pmgntframe);
 	DBG_8723A("%s\n", __func__);
 	dump_mgntframe23a(padapter, pmgntframe);
-
-	return;
 }
 
 #ifdef CONFIG_8723AU_AP_MODE
@@ -3338,8 +3333,6 @@
 		}
 	} else
 		kfree(pmlmepriv->assoc_req);
-
-	return;
 }
 
 /* when wait_ack is true, this function should be called at process context */
@@ -4102,8 +4095,6 @@
 		pmlmeext->chan_scan_time = SURVEY_TO;
 		pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
 	}
-
-	return;
 }
 
 /* collect bss info from Beacon and Probe request/response frames. */
@@ -4759,8 +4750,6 @@
 	rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
 
 	pmlmeext->sitesurvey_res.bss_cnt++;
-
-	return;
 }
 
 void report_surveydone_event23a(struct rtw_adapter *padapter)
@@ -4802,8 +4791,6 @@
 	DBG_8723A("survey done event(%x)\n", psurveydone_evt->bss_cnt);
 
 	rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
-	return;
 }
 
 void report_join_res23a(struct rtw_adapter *padapter, int res)
@@ -4850,8 +4837,6 @@
 	rtw_joinbss_event_prehandle23a(padapter, (u8 *)&pjoinbss_evt->network);
 
 	rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
-	return;
 }
 
 void report_del_sta_event23a(struct rtw_adapter *padapter,
@@ -4906,8 +4891,6 @@
 	DBG_8723A("report_del_sta_event23a: delete STA, mac_id =%d\n", mac_id);
 
 	rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
-	return;
 }
 
 void report_add_sta_event23a(struct rtw_adapter *padapter,
@@ -4951,8 +4934,6 @@
 	DBG_8723A("report_add_sta_event23a: add STA\n");
 
 	rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
-	return;
 }
 
 /****************************************************************************
@@ -5394,8 +5375,6 @@
 		issue_assocreq(padapter);
 		set_link_timer(pmlmeext, REASSOC_TO);
 	}
-
-	return;
 }
 
 static void addba_timer_hdl(unsigned long data)
@@ -6082,10 +6061,10 @@
 #ifdef CONFIG_8723AU_AP_MODE
 	else { /* tx bc/mc frames after update TIM */
 		struct sta_info *psta_bmc;
-		struct list_head *plist, *phead, *ptmp;
-		struct xmit_frame *pxmitframe;
+		struct list_head *phead;
+		struct xmit_frame *pxmitframe, *ptmp;
 		struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-		struct sta_priv  *pstapriv = &padapter->stapriv;
+		struct sta_priv *pstapriv = &padapter->stapriv;
 
 		/* for BC/MC Frames */
 		psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
@@ -6099,10 +6078,8 @@
 
 			phead = get_list_head(&psta_bmc->sleep_q);
 
-			list_for_each_safe(plist, ptmp, phead) {
-				pxmitframe = container_of(plist,
-							  struct xmit_frame,
-							  list);
+			list_for_each_entry_safe(pxmitframe, ptmp,
+						 phead, list) {
 
 				list_del_init(&pxmitframe->list);
 
@@ -6119,7 +6096,6 @@
 				rtl8723au_hal_xmitframe_enqueue(padapter,
 								pxmitframe);
 			}
-
 			/* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
 			spin_unlock_bh(&pxmitpriv->lock);
 		}
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 404b618..0a7741c 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -85,16 +85,15 @@
 	return res;
 }
 
-void _rtw_free_recv_priv23a (struct recv_priv *precvpriv)
+void _rtw_free_recv_priv23a(struct recv_priv *precvpriv)
 {
 	struct rtw_adapter *padapter = precvpriv->adapter;
-	struct recv_frame *precvframe;
-	struct list_head *plist, *ptmp;
+	struct recv_frame *precvframe, *ptmp;
 
 	rtw_free_uc_swdec_pending_queue23a(padapter);
 
-	list_for_each_safe(plist, ptmp, &precvpriv->free_recv_queue.queue) {
-		precvframe = container_of(plist, struct recv_frame, list);
+	list_for_each_entry_safe(precvframe, ptmp,
+				 &precvpriv->free_recv_queue.queue, list) {
 		list_del_init(&precvframe->list);
 		kfree(precvframe);
 	}
@@ -195,19 +194,13 @@
 
 static void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue)
 {
-	struct recv_frame *hdr;
-	struct list_head *plist, *phead, *ptmp;
+	struct recv_frame *hdr, *ptmp;
+	struct list_head *phead;
 
 	spin_lock(&pframequeue->lock);
-
 	phead = get_list_head(pframequeue);
-	plist = phead->next;
-
-	list_for_each_safe(plist, ptmp, phead) {
-		hdr = container_of(plist, struct recv_frame, list);
+	list_for_each_entry_safe(hdr, ptmp, phead, list)
 		rtw_free_recvframe23a(hdr);
-	}
-
 	spin_unlock(&pframequeue->lock);
 }
 
@@ -361,33 +354,19 @@
 				int i;
 
 				RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
-					 "*(pframemic-8)-*(pframemic-1) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
-					 *(pframemic - 8), *(pframemic - 7),
-					 *(pframemic - 6), *(pframemic - 5),
-					 *(pframemic - 4), *(pframemic - 3),
-					 *(pframemic - 2), *(pframemic - 1));
+					 "*(pframemic-8)-*(pframemic-1) =%*phC\n",
+					 8, pframemic - 8);
 				RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
-					 "*(pframemic-16)-*(pframemic-9) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
-					 *(pframemic - 16), *(pframemic - 15),
-					 *(pframemic - 14), *(pframemic - 13),
-					 *(pframemic - 12), *(pframemic - 11),
-					 *(pframemic - 10), *(pframemic - 9));
+					 "*(pframemic-16)-*(pframemic-9) =%*phC\n",
+					 8, pframemic - 16);
 
 				RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
 					 "====== demp packet (len =%d) ======\n",
 					 precvframe->pkt->len);
 				for (i = 0; i < precvframe->pkt->len; i = i + 8) {
 					RT_TRACE(_module_rtl871x_recv_c_,
-						 _drv_err_,
-						 "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
-						 *(precvframe->pkt->data+i),
-						 *(precvframe->pkt->data+i+1),
-						 *(precvframe->pkt->data+i+2),
-						 *(precvframe->pkt->data+i+3),
-						 *(precvframe->pkt->data+i+4),
-						 *(precvframe->pkt->data+i+5),
-						 *(precvframe->pkt->data+i+6),
-						 *(precvframe->pkt->data+i+7));
+						 _drv_err_, "%*phC\n",
+						 8, precvframe->pkt->data + i);
 				}
 				RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
 					 "====== demp packet end [len =%d]======\n",
@@ -1402,11 +1381,7 @@
 		DBG_8723A("#############################\n");
 
 		for (i = 0; i < 64; i = i + 8)
-			DBG_8723A("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n",
-				  *(ptr + i), *(ptr + i + 1), *(ptr + i + 2),
-				  *(ptr + i + 3), *(ptr + i + 4),
-				  *(ptr + i + 5), *(ptr + i + 6),
-				  *(ptr + i + 7));
+			DBG_8723A("%*phC:\n", 8, ptr + i);
 		DBG_8723A("#############################\n");
 	}
 }
@@ -1567,16 +1542,14 @@
 struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
 				    struct rtw_queue *defrag_q)
 {
-	struct list_head *plist, *phead, *ptmp;
-	u8	*data, wlanhdr_offset;
-	u8	curfragnum;
-	struct recv_frame *pnfhdr;
+	struct list_head *plist, *phead;
+	u8 wlanhdr_offset;
+	u8 curfragnum;
+	struct recv_frame *pnfhdr, *ptmp;
 	struct recv_frame *prframe, *pnextrframe;
-	struct rtw_queue	*pfree_recv_queue;
+	struct rtw_queue *pfree_recv_queue;
 	struct sk_buff *skb;
 
-
-
 	curfragnum = 0;
 	pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
 
@@ -1597,12 +1570,7 @@
 
 	curfragnum++;
 
-	phead = get_list_head(defrag_q);
-
-	data = prframe->pkt->data;
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pnfhdr = container_of(plist, struct recv_frame, list);
+	list_for_each_entry_safe(pnfhdr, ptmp, phead, list) {
 		pnextrframe = (struct recv_frame *)pnfhdr;
 		/* check the fragment sequence  (2nd ~n fragment frame) */
 
@@ -1644,8 +1612,6 @@
 	RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
 		 "Performance defrag!!!!!\n");
 
-
-
 	return prframe;
 }
 
diff --git a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
index b06bff7..22d857b 100644
--- a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
@@ -83,8 +83,8 @@
 
 int _rtw_free_sta_priv23a(struct sta_priv *pstapriv)
 {
-	struct list_head *phead, *plist, *ptmp;
-	struct sta_info *psta;
+	struct list_head *phead;
+	struct sta_info *psta, *ptmp;
 	struct recv_reorder_ctrl *preorder_ctrl;
 	int index;
 
@@ -93,12 +93,9 @@
 		spin_lock_bh(&pstapriv->sta_hash_lock);
 		for (index = 0; index < NUM_STA; index++) {
 			phead = &pstapriv->sta_hash[index];
-
-			list_for_each_safe(plist, ptmp, phead) {
+			list_for_each_entry_safe(psta, ptmp, phead, hash_list) {
 				int i;
 
-				psta = container_of(plist, struct sta_info,
-						    hash_list);
 				for (i = 0; i < 16 ; i++) {
 					preorder_ctrl = &psta->recvreorder_ctrl[i];
 					del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
@@ -325,8 +322,8 @@
 /*  free all stainfo which in sta_hash[all] */
 void rtw_free_all_stainfo23a(struct rtw_adapter *padapter)
 {
-	struct list_head *plist, *phead, *ptmp;
-	struct sta_info *psta;
+	struct list_head *phead;
+	struct sta_info *psta, *ptmp;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo23a(padapter);
 	s32 index;
@@ -335,13 +332,9 @@
 		return;
 
 	spin_lock_bh(&pstapriv->sta_hash_lock);
-
 	for (index = 0; index < NUM_STA; index++) {
 		phead = &pstapriv->sta_hash[index];
-
-		list_for_each_safe(plist, ptmp, phead) {
-			psta = container_of(plist, struct sta_info, hash_list);
-
+		list_for_each_entry_safe(psta, ptmp, phead, hash_list) {
 			if (pbcmc_stainfo != psta)
 				rtw_free_stainfo23a(padapter, psta);
 		}
@@ -352,9 +345,9 @@
 /* any station allocated can be searched by hash list */
 struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr)
 {
-	struct list_head *plist, *phead;
+	struct list_head *phead;
 	struct sta_info *psta = NULL;
-	u32	index;
+	u32 index;
 	const u8 *addr;
 
 	if (hwaddr == NULL)
@@ -368,12 +361,8 @@
 	index = wifi_mac_hash(addr);
 
 	spin_lock_bh(&pstapriv->sta_hash_lock);
-
 	phead = &pstapriv->sta_hash[index];
-
-	list_for_each(plist, phead) {
-		psta = container_of(plist, struct sta_info, hash_list);
-
+	list_for_each_entry(psta, phead, hash_list) {
 		/*  if found the matched address */
 		if (ether_addr_equal(psta->hwaddr, addr))
 			break;
@@ -418,7 +407,7 @@
 {
 	bool res = true;
 #ifdef CONFIG_8723AU_AP_MODE
-	struct list_head *plist, *phead;
+	struct list_head *phead;
 	struct rtw_wlan_acl_node *paclnode;
 	bool match = false;
 	struct sta_priv *pstapriv = &padapter->stapriv;
@@ -427,10 +416,7 @@
 
 	spin_lock_bh(&pacl_node_q->lock);
 	phead = get_list_head(pacl_node_q);
-
-	list_for_each(plist, phead) {
-		paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
+	list_for_each_entry(paclnode, phead, list) {
 		if (ether_addr_equal(paclnode->addr, mac_addr)) {
 			if (paclnode->valid) {
 				match = true;
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
index a4b6bb6..b82b182 100644
--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
@@ -193,39 +193,38 @@
 	goto exit;
 }
 
-void _rtw_free_xmit_priv23a (struct xmit_priv *pxmitpriv)
+void _rtw_free_xmit_priv23a(struct xmit_priv *pxmitpriv)
 {
 	struct rtw_adapter *padapter = pxmitpriv->adapter;
-	struct xmit_frame *pxframe;
-	struct xmit_buf *pxmitbuf;
-	struct list_head *plist, *ptmp;
+	struct xmit_frame *pxframe, *ptmp;
+	struct xmit_buf *pxmitbuf, *ptmp2;
 
-	list_for_each_safe(plist, ptmp, &pxmitpriv->free_xmit_queue.queue) {
-		pxframe = container_of(plist, struct xmit_frame, list);
+	list_for_each_entry_safe(pxframe, ptmp,
+				 &pxmitpriv->free_xmit_queue.queue, list) {
 		list_del_init(&pxframe->list);
 		rtw_os_xmit_complete23a(padapter, pxframe);
 		kfree(pxframe);
 	}
 
-	list_for_each_safe(plist, ptmp, &pxmitpriv->xmitbuf_list) {
-		pxmitbuf = container_of(plist, struct xmit_buf, list2);
+	list_for_each_entry_safe(pxmitbuf, ptmp2,
+				 &pxmitpriv->xmitbuf_list, list2) {
 		list_del_init(&pxmitbuf->list2);
 		rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
 		kfree(pxmitbuf);
 	}
 
 	/* free xframe_ext queue,  the same count as extbuf  */
-	list_for_each_safe(plist, ptmp,
-			   &pxmitpriv->free_xframe_ext_queue.queue) {
-		pxframe = container_of(plist, struct xmit_frame, list);
+	list_for_each_entry_safe(pxframe, ptmp,
+				 &pxmitpriv->free_xframe_ext_queue.queue,
+				 list) {
 		list_del_init(&pxframe->list);
 		rtw_os_xmit_complete23a(padapter, pxframe);
 		kfree(pxframe);
 	}
 
 	/*  free xmit extension buff */
-	list_for_each_safe(plist, ptmp, &pxmitpriv->xmitextbuf_list) {
-		pxmitbuf = container_of(plist, struct xmit_buf, list2);
+	list_for_each_entry_safe(pxmitbuf, ptmp2,
+				 &pxmitpriv->xmitextbuf_list, list2) {
 		list_del_init(&pxmitbuf->list2);
 		rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
 		kfree(pxmitbuf);
@@ -1563,18 +1562,13 @@
 void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv,
 				 struct rtw_queue *pframequeue)
 {
-	struct list_head *plist, *phead, *ptmp;
-	struct	xmit_frame *pxmitframe;
+	struct list_head *phead;
+	struct xmit_frame *pxmitframe, *ptmp;
 
 	spin_lock_bh(&pframequeue->lock);
-
 	phead = get_list_head(pframequeue);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pxmitframe = container_of(plist, struct xmit_frame, list);
-
+	list_for_each_entry_safe(pxmitframe, ptmp, phead, list)
 		rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
-	}
 	spin_unlock_bh(&pframequeue->lock);
 
 }
@@ -1612,9 +1606,9 @@
 rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i,
 		   int entry)
 {
-	struct list_head *sta_plist, *sta_phead, *ptmp;
+	struct list_head *sta_phead;
 	struct hw_xmit *phwxmit;
-	struct tx_servq *ptxservq = NULL;
+	struct tx_servq *ptxservq = NULL, *ptmp;
 	struct rtw_queue *pframe_queue = NULL;
 	struct xmit_frame *pxmitframe = NULL;
 	struct rtw_adapter *padapter = pxmitpriv->adapter;
@@ -1638,11 +1632,8 @@
 		phwxmit = phwxmit_i + inx[i];
 
 		sta_phead = get_list_head(phwxmit->sta_queue);
-
-		list_for_each_safe(sta_plist, ptmp, sta_phead) {
-			ptxservq = container_of(sta_plist, struct tx_servq,
-						tx_pending);
-
+		list_for_each_entry_safe(ptxservq, ptmp, sta_phead,
+					 tx_pending) {
 			pframe_queue = &ptxservq->sta_pending;
 
 			pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
@@ -2052,18 +2043,15 @@
 				     struct rtw_queue *pframequeue)
 {
 	int ret;
-	struct list_head *plist, *phead, *ptmp;
-	u8	ac_index;
+	struct list_head *phead;
+	u8 ac_index;
 	struct tx_servq	*ptxservq;
-	struct pkt_attrib	*pattrib;
-	struct xmit_frame	*pxmitframe;
-	struct hw_xmit *phwxmits =  padapter->xmitpriv.hwxmits;
+	struct pkt_attrib *pattrib;
+	struct xmit_frame *pxmitframe, *ptmp;
+	struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
 
 	phead = get_list_head(pframequeue);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pxmitframe = container_of(plist, struct xmit_frame, list);
-
+	list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
 		ret = xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe);
 
 		if (ret == true) {
@@ -2124,17 +2112,14 @@
 {
 	u8 update_mask = 0, wmmps_ac = 0;
 	struct sta_info *psta_bmc;
-	struct list_head *plist, *phead, *ptmp;
-	struct xmit_frame *pxmitframe = NULL;
+	struct list_head *phead;
+	struct xmit_frame *pxmitframe = NULL, *ptmp;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 
 	spin_lock_bh(&pxmitpriv->lock);
-
 	phead = get_list_head(&psta->sleep_q);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pxmitframe = container_of(plist, struct xmit_frame, list);
+	list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
 		list_del_init(&pxmitframe->list);
 
 		switch (pxmitframe->attrib.priority) {
@@ -2194,7 +2179,6 @@
 
 		pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid);
 	}
-
 	/* spin_unlock_bh(&psta->sleep_q.lock); */
 	spin_unlock_bh(&pxmitpriv->lock);
 
@@ -2206,13 +2190,8 @@
 	if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) {
 		/* no any sta in ps mode */
 		spin_lock_bh(&pxmitpriv->lock);
-
 		phead = get_list_head(&psta_bmc->sleep_q);
-
-		list_for_each_safe(plist, ptmp, phead) {
-			pxmitframe = container_of(plist, struct xmit_frame,
-						  list);
-
+		list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
 			list_del_init(&pxmitframe->list);
 
 			psta_bmc->sleepq_len--;
@@ -2232,7 +2211,6 @@
 			/* update_BCNTIM(padapter); */
 			update_mask |= BIT(1);
 		}
-
 		/* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
 		spin_unlock_bh(&pxmitpriv->lock);
 	}
@@ -2245,19 +2223,15 @@
 				  struct sta_info *psta)
 {
 	u8 wmmps_ac = 0;
-	struct list_head *plist, *phead, *ptmp;
-	struct xmit_frame *pxmitframe;
+	struct list_head *phead;
+	struct xmit_frame *pxmitframe, *ptmp;
 	struct sta_priv *pstapriv = &padapter->stapriv;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 
 	/* spin_lock_bh(&psta->sleep_q.lock); */
 	spin_lock_bh(&pxmitpriv->lock);
-
 	phead = get_list_head(&psta->sleep_q);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pxmitframe = container_of(plist, struct xmit_frame, list);
-
+	list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
 		switch (pxmitframe->attrib.priority) {
 		case 1:
 		case 2:
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
index e8cab9e..8d3ea6c 100644
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
@@ -219,7 +219,7 @@
 	u32 i;
 	u8 platform = 0x04;
 	u8 board = pDM_Odm->BoardType;
-	u32 ArrayLen = sizeof(Array_AGC_TAB_1T_8723A)/sizeof(u32);
+	u32 ArrayLen = ARRAY_SIZE(Array_AGC_TAB_1T_8723A);
 	u32 *Array = Array_AGC_TAB_1T_8723A;
 
 	hex = board;
@@ -467,7 +467,7 @@
 	u32 i = 0;
 	u8  platform = 0x04;
 	u8  board = pDM_Odm->BoardType;
-	u32 ArrayLen = sizeof(Array_PHY_REG_1T_8723A)/sizeof(u32);
+	u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_1T_8723A);
 	u32 *Array = Array_PHY_REG_1T_8723A;
 
 	hex += board;
@@ -523,7 +523,7 @@
 	u32 i;
 	u8 platform = 0x04;
 	u8 board = pDM_Odm->BoardType;
-	u32 ArrayLen = sizeof(Array_PHY_REG_MP_8723A)/sizeof(u32);
+	u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_MP_8723A);
 	u32 *Array = Array_PHY_REG_MP_8723A;
 
 	hex += board;
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
index 93b2d18..9bf6859 100644
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
@@ -145,7 +145,7 @@
 	u32     i           = 0;
 	u8     platform    = 0x04;
 	u8     board       = pDM_Odm->BoardType;
-	u32     ArrayLen    = sizeof(Array_MAC_REG_8723A)/sizeof(u32);
+	u32     ArrayLen    = ARRAY_SIZE(Array_MAC_REG_8723A);
 	u32 *Array       = Array_MAC_REG_8723A;
 
 	hex += board;
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
index dbf571e..286f3ea 100644
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
@@ -215,7 +215,7 @@
 	u32     i           = 0;
 	u8     platform    = 0x04;
 	u8     board       = pDM_Odm->BoardType;
-	u32     ArrayLen    = sizeof(Array_RadioA_1T_8723A)/sizeof(u32);
+	u32     ArrayLen    = ARRAY_SIZE(Array_RadioA_1T_8723A);
 	u32 *Array = Array_RadioA_1T_8723A;
 
 	hex += board;
diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c
index 530db57..9d7b11b 100644
--- a/drivers/staging/rtl8723au/hal/hal_com.c
+++ b/drivers/staging/rtl8723au/hal/hal_com.c
@@ -328,7 +328,7 @@
 
 	if (trigger == C2H_EVT_HOST_CLOSE)
 		goto exit;	/* Not ready */
-	else if (trigger != C2H_EVT_FW_CLOSE)
+	if (trigger != C2H_EVT_FW_CLOSE)
 		goto clear_evt;	/* Not a valid value */
 
 	c2h_evt = (struct c2h_evt_hdr *)buf;
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
index 6b9dbef..45d70fd 100644
--- a/drivers/staging/rtl8723au/hal/odm.c
+++ b/drivers/staging/rtl8723au/hal/odm.c
@@ -1274,7 +1274,7 @@
 
 	for (i = 0; i < sta_cnt; i++) {
 		if (PWDB_rssi[i] != (0))
-			rtl8723a_set_rssi_cmd(Adapter, (u8 *)&PWDB_rssi[i]);
+			rtl8723a_set_rssi_cmd(Adapter, PWDB_rssi[i]);
 	}
 
 	pdmpriv->EntryMaxUndecoratedSmoothedPWDB = MaxDB;
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
index d5c48a5..1688f663 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
@@ -77,11 +77,7 @@
 
 #define PlatformZeroMemory(ptr, sz)	memset(ptr, 0, sz)
 
-#define PlatformProcessHCICommands(...)
-#define PlatformTxBTQueuedPackets(...)
 #define PlatformIndicateBTACLData(...)	(RT_STATUS_SUCCESS)
-#define PlatformAcquireSpinLock(padapter, type)
-#define PlatformReleaseSpinLock(padapter, type)
 
 #define GET_UNDECORATED_AVERAGE_RSSI(padapter)	\
 			(GET_HAL_DATA(padapter)->dmpriv.EntryMinUndecoratedSmoothedPWDB)
@@ -1454,21 +1450,11 @@
 	}
 
 	if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) {
-		snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x",
-		padapter->eeprompriv.mac_addr[0],
-		padapter->eeprompriv.mac_addr[1],
-		padapter->eeprompriv.mac_addr[2],
-		padapter->eeprompriv.mac_addr[3],
-		padapter->eeprompriv.mac_addr[4],
-		padapter->eeprompriv.mac_addr[5]);
+		snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32,
+			 "AMP-%pMF", padapter->eeprompriv.mac_addr);
 	} else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) {
-		snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x",
-		pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[0],
-		pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[1],
-		pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[2],
-		pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[3],
-		pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[4],
-		pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[5]);
+		snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32,
+			 "AMP-%pMF", pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr);
 	}
 
 	FillOctetString(pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid, pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 21);
@@ -9475,10 +9461,8 @@
 			psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm1Ant.curPsTdma;
 		else
 			psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm2Ant.curPsTdma;
-		snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA(0x3a)", \
-			pHalData->bt_coexist.fw3aVal[0], pHalData->bt_coexist.fw3aVal[1],
-			pHalData->bt_coexist.fw3aVal[2], pHalData->bt_coexist.fw3aVal[3],
-			pHalData->bt_coexist.fw3aVal[4], psTdmaCase);
+		snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %*ph case-%d",
+			 "PS TDMA(0x3a)", 5, pHalData->bt_coexist.fw3aVal, psTdmaCase);
 		DCMD_Printf(btCoexDbgBuf);
 
 		snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Decrease Bt Power", \
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
index 1662c03c..2230f4c 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
@@ -113,11 +113,11 @@
 	return ret;
 }
 
-int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param)
+int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param)
 {
-	*((u32 *)param) = cpu_to_le32(*((u32 *)param));
+	__le32 cmd = cpu_to_le32(param);
 
-	FillH2CCmd(padapter, RSSI_SETTING_EID, 3, param);
+	FillH2CCmd(padapter, RSSI_SETTING_EID, 3, (void *)&cmd);
 
 	return _SUCCESS;
 }
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index ecf54ee..e3dc889 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1044,7 +1044,7 @@
 	u8 val;
 
 	val = rtl8723au_read8(padapter, REG_LEDCFG2);
-	/*  Let 8051 take control antenna settting */
+	/*  Let 8051 take control antenna setting */
 	val |= BIT(7);		/*  DPDT_SEL_EN, 0x4C[23] */
 	rtl8723au_write8(padapter, REG_LEDCFG2, val);
 }
@@ -1054,7 +1054,7 @@
 	u8 val;
 
 	val = rtl8723au_read8(padapter, REG_LEDCFG2);
-	/*  Let 8051 take control antenna settting */
+	/*  Let 8051 take control antenna setting */
 	if (!(val & BIT(7))) {
 		val |= BIT(7);	/*  DPDT_SEL_EN, 0x4C[23] */
 		rtl8723au_write8(padapter, REG_LEDCFG2, val);
@@ -1066,7 +1066,7 @@
 	u8 val;
 
 	val = rtl8723au_read8(padapter, REG_LEDCFG2);
-	/*  Let 8051 take control antenna settting */
+	/*  Let 8051 take control antenna setting */
 	val &= ~BIT(7);		/*  DPDT_SEL_EN, clear 0x4C[23] */
 	rtl8723au_write8(padapter, REG_LEDCFG2, val);
 }
@@ -1297,7 +1297,7 @@
 		/*  If we want to SS mode, we can not reset 8051. */
 		if ((val8 & BIT(1)) && padapter->bFWReady) {
 			/* IF fw in RAM code, do reset */
-			/*  2010/08/25 MH Accordign to RD alfred's
+			/*  2010/08/25 MH According to RD alfred's
 			    suggestion, we need to disable other */
 			/*  HRCV INT to influence 8051 reset. */
 			rtl8723au_write8(padapter, REG_FWIMR, 0x20);
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
index 9926b079..3c2d03e 100644
--- a/drivers/staging/rtl8723au/hal/usb_halinit.c
+++ b/drivers/staging/rtl8723au/hal/usb_halinit.c
@@ -1021,10 +1021,8 @@
 	}
 
 	RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
-		 "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%02x:%02x:%02x:%02x:%02x:%02x\n",
-		 pEEPROM->mac_addr[0], pEEPROM->mac_addr[1],
-		 pEEPROM->mac_addr[2], pEEPROM->mac_addr[3],
-		 pEEPROM->mac_addr[4], pEEPROM->mac_addr[5]);
+		 "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%pM\n",
+		 pEEPROM->mac_addr);
 }
 
 static void readAdapterInfo(struct rtw_adapter *padapter)
diff --git a/drivers/staging/rtl8723au/include/osdep_service.h b/drivers/staging/rtl8723au/include/osdep_service.h
index dedb418..98250b1 100644
--- a/drivers/staging/rtl8723au/include/osdep_service.h
+++ b/drivers/staging/rtl8723au/include/osdep_service.h
@@ -29,10 +29,10 @@
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
+#include <linux/atomic.h>
+#include <linux/io.h>
 #include <linux/semaphore.h>
 #include <linux/sem.h>
 #include <linux/sched.h>
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
index 014c02e..f95535a 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
@@ -149,7 +149,7 @@
 #else
 #define rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(padapter) do {} while(0)
 #endif
-int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param);
+int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param);
 int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg);
 void rtl8723a_add_rateatid(struct rtw_adapter *padapter, u32 bitmap, u8 arg, u8 rssi_level);
 
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 0ae2180..908b84c 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -1270,18 +1270,14 @@
 
 void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter)
 {
-	struct list_head *plist, *phead, *ptmp;
+	struct list_head *phead;
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct rtw_queue *queue = &pmlmepriv->scanned_queue;
-	struct wlan_network *pnetwork;
+	struct wlan_network *pnetwork, *ptmp;
 
 	spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
 	phead = get_list_head(queue);
-
-	list_for_each_safe(plist, ptmp, phead) {
-		pnetwork = container_of(plist, struct wlan_network, list);
-
+	list_for_each_entry_safe(pnetwork, ptmp, phead, list) {
 		/* report network only if the current channel set
 		   contains the channel to which this network belongs */
 		if (rtw_ch_set_search_ch23a
@@ -1289,7 +1285,6 @@
 		     pnetwork->network.DSConfig) >= 0)
 			rtw_cfg80211_inform_bss(padapter, pnetwork);
 	}
-
 	spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
 
 	/* call this after other things have been done */
@@ -2850,9 +2845,9 @@
 {
 	const u8 *mac = params->mac;
 	int ret = 0;
-	struct list_head *phead, *plist, *ptmp;
+	struct list_head *phead;
 	u8 updated = 0;
-	struct sta_info *psta;
+	struct sta_info *psta, *ptmp;
 	struct rtw_adapter *padapter = netdev_priv(ndev);
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2881,13 +2876,9 @@
 		return -EINVAL;
 
 	spin_lock_bh(&pstapriv->asoc_list_lock);
-
 	phead = &pstapriv->asoc_list;
-
 	/* check asoc_queue */
-	list_for_each_safe(plist, ptmp, phead) {
-		psta = container_of(plist, struct sta_info, asoc_list);
-
+	list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
 		if (ether_addr_equal(mac, psta->hwaddr)) {
 			if (psta->dot8021xalg == 1 &&
 			    psta->bpairwise_key_installed == false) {
@@ -2912,7 +2903,6 @@
 			}
 		}
 	}
-
 	spin_unlock_bh(&pstapriv->asoc_list_lock);
 
 	associated_clients_update23a(padapter, updated);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index 0cdaef0..cf4a506 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -210,22 +210,21 @@
 void rtl8723au_write_port_cancel(struct rtw_adapter *padapter)
 {
 	struct xmit_buf *pxmitbuf;
-	struct list_head *plist;
 	int j;
 
 	DBG_8723A("%s\n", __func__);
 
 	padapter->bWritePortCancel = true;
 
-	list_for_each(plist, &padapter->xmitpriv.xmitbuf_list) {
-		pxmitbuf = container_of(plist, struct xmit_buf, list2);
+	list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitbuf_list,
+			    list2) {
 		for (j = 0; j < 8; j++) {
 			if (pxmitbuf->pxmit_urb[j])
 				usb_kill_urb(pxmitbuf->pxmit_urb[j]);
 		}
 	}
-	list_for_each(plist, &padapter->xmitpriv.xmitextbuf_list) {
-		pxmitbuf = container_of(plist, struct xmit_buf, list2);
+	list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitextbuf_list,
+			    list2) {
 		for (j = 0; j < 8; j++) {
 			if (pxmitbuf->pxmit_urb[j])
 				usb_kill_urb(pxmitbuf->pxmit_urb[j]);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 1fe8e3e..5dfcdfb 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -320,7 +320,6 @@
 	rtsx_do_before_power_down(chip, PM_S3);
 
 	if (dev->irq >= 0) {
-		synchronize_irq(dev->irq);
 		free_irq(dev->irq, (void *)dev);
 		dev->irq = -1;
 	}
@@ -398,7 +397,6 @@
 	rtsx_do_before_power_down(chip, PM_S1);
 
 	if (dev->irq >= 0) {
-		synchronize_irq(dev->irq);
 		free_irq(dev->irq, (void *)dev);
 		dev->irq = -1;
 	}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index f27491e..f564a74 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -1,4 +1,5 @@
-/* Driver for Realtek PCI-Express card reader
+/*
+ * Driver for Realtek PCI-Express card reader
  *
  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
@@ -30,74 +31,76 @@
  * Scatter-gather transfer buffer access routines
  ***********************************************************************/
 
-/* Copy a buffer of length buflen to/from the srb's transfer buffer.
+/*
+ * Copy a buffer of length buflen to/from the srb's transfer buffer.
  * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
  * points to a list of s-g entries and we ignore srb->request_bufflen.
  * For non-scatter-gather transfers, srb->request_buffer points to the
  * transfer buffer itself and srb->request_bufflen is the buffer's length.)
  * Update the *index and *offset variables so that the next copy will
- * pick up from where this one left off. */
+ * pick up from where this one left off.
+ */
 
 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
-	unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
-	unsigned int *offset, enum xfer_buf_dir dir)
+				       unsigned int buflen,
+				       struct scsi_cmnd *srb,
+				       unsigned int *index,
+				       unsigned int *offset,
+				       enum xfer_buf_dir dir)
 {
 	unsigned int cnt;
 
-	/* If not using scatter-gather, just transfer the data directly.
-	 * Make certain it will fit in the available buffer space. */
+	/* If not using scatter-gather, just transfer the data directly. */
 	if (scsi_sg_count(srb) == 0) {
+		unsigned char *sgbuffer;
+
 		if (*offset >= scsi_bufflen(srb))
 			return 0;
 		cnt = min(buflen, scsi_bufflen(srb) - *offset);
+
+		sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
+
 		if (dir == TO_XFER_BUF)
-			memcpy((unsigned char *) scsi_sglist(srb) + *offset,
-					buffer, cnt);
+			memcpy(sgbuffer, buffer, cnt);
 		else
-			memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
-					*offset, cnt);
+			memcpy(buffer, sgbuffer, cnt);
 		*offset += cnt;
 
-	/* Using scatter-gather.  We have to go through the list one entry
+	/*
+	 * Using scatter-gather.  We have to go through the list one entry
 	 * at a time.  Each s-g entry contains some number of pages, and
-	 * each page has to be kmap()'ed separately.  If the page is already
-	 * in kernel-addressable memory then kmap() will return its address.
-	 * If the page is not directly accessible -- such as a user buffer
-	 * located in high memory -- then kmap() will map it to a temporary
-	 * position in the kernel's virtual address space. */
+	 * each page has to be kmap()'ed separately.
+	 */
 	} else {
 		struct scatterlist *sg =
-				(struct scatterlist *) scsi_sglist(srb)
+				(struct scatterlist *)scsi_sglist(srb)
 				+ *index;
 
-		/* This loop handles a single s-g list entry, which may
+		/*
+		 * This loop handles a single s-g list entry, which may
 		 * include multiple pages.  Find the initial page structure
 		 * and the starting offset within the page, and update
-		 * the *offset and *index values for the next loop. */
+		 * the *offset and *index values for the next loop.
+		 */
 		cnt = 0;
 		while (cnt < buflen && *index < scsi_sg_count(srb)) {
 			struct page *page = sg_page(sg) +
 					((sg->offset + *offset) >> PAGE_SHIFT);
-			unsigned int poff =
-					(sg->offset + *offset) & (PAGE_SIZE-1);
+			unsigned int poff = (sg->offset + *offset) &
+					    (PAGE_SIZE - 1);
 			unsigned int sglen = sg->length - *offset;
 
 			if (sglen > buflen - cnt) {
-
 				/* Transfer ends within this s-g entry */
 				sglen = buflen - cnt;
 				*offset += sglen;
 			} else {
-
 				/* Transfer continues to next s-g entry */
 				*offset = 0;
 				++*index;
 				++sg;
 			}
 
-			/* Transfer the data for all the pages in this
-			 * s-g entry.  For each page: call kmap(), do the
-			 * transfer, and call kunmap() immediately after. */
 			while (sglen > 0) {
 				unsigned int plen = min(sglen, (unsigned int)
 						PAGE_SIZE - poff);
@@ -122,10 +125,12 @@
 	return cnt;
 }
 
-/* Store the contents of buffer into srb's transfer buffer and set the
-* SCSI residue. */
+/*
+ * Store the contents of buffer into srb's transfer buffer and set the
+ * SCSI residue.
+ */
 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
-	unsigned int buflen, struct scsi_cmnd *srb)
+			    unsigned int buflen, struct scsi_cmnd *srb)
 {
 	unsigned int index = 0, offset = 0;
 
@@ -136,7 +141,7 @@
 }
 
 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
-	unsigned int buflen, struct scsi_cmnd *srb)
+			    unsigned int buflen, struct scsi_cmnd *srb)
 {
 	unsigned int index = 0, offset = 0;
 
@@ -146,12 +151,12 @@
 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
 }
 
-
 /***********************************************************************
  * Transport routines
  ***********************************************************************/
 
-/* Invoke the transport and basic error-handling/recovery methods
+/*
+ * Invoke the transport and basic error-handling/recovery methods
  *
  * This is used to send the message to the device and receive the response.
  */
@@ -161,20 +166,21 @@
 
 	result = rtsx_scsi_handler(srb, chip);
 
-	/* if the command gets aborted by the higher layers, we need to
-	 * short-circuit all other processing
+	/*
+	 * if the command gets aborted by the higher layers, we need to
+	 * short-circuit all other processing.
 	 */
 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
 		dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
 		srb->result = DID_ABORT << 16;
-		goto Handle_Errors;
+		goto handle_errors;
 	}
 
 	/* if there is a transport error, reset and don't auto-sense */
 	if (result == TRANSPORT_ERROR) {
 		dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
 		srb->result = DID_ERROR << 16;
-		goto Handle_Errors;
+		goto handle_errors;
 	}
 
 	srb->result = SAM_STAT_GOOD;
@@ -188,21 +194,18 @@
 		/* set the result so the higher layers expect this data */
 		srb->result = SAM_STAT_CHECK_CONDITION;
 		memcpy(srb->sense_buffer,
-			(unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
-			sizeof(struct sense_data_t));
+		       (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
+		       sizeof(struct sense_data_t));
 	}
 
 	return;
 
-	/* Error and abort processing: try to resynchronize with the device
-	 * by issuing a port reset.  If that fails, try a class-specific
-	 * device reset. */
-Handle_Errors:
+handle_errors:
 	return;
 }
 
 void rtsx_add_cmd(struct rtsx_chip *chip,
-		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+		  u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
 {
 	u32 *cb = (u32 *)(chip->host_cmds_ptr);
 	u32 val = 0;
@@ -321,9 +324,11 @@
 }
 
 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
-		struct scatterlist *sg, int num_sg, unsigned int *index,
-		unsigned int *offset, int size,
-		enum dma_data_direction dma_dir, int timeout)
+					     struct scatterlist *sg, int num_sg,
+					     unsigned int *index,
+					     unsigned int *offset, int size,
+					     enum dma_data_direction dma_dir,
+					     int timeout)
 {
 	struct rtsx_dev *rtsx = chip->rtsx;
 	struct completion trans_done;
@@ -334,7 +339,7 @@
 	struct scatterlist *sg_ptr;
 	u32 val = TRIG_DMA;
 
-	if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
+	if (!sg || (num_sg <= 0) || !offset || !index)
 		return -EIO;
 
 	if (dma_dir == DMA_TO_DEVICE)
@@ -363,15 +368,16 @@
 
 	spin_unlock_irq(&rtsx->reg_lock);
 
-	sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+	sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
 
 	resid = size;
 	sg_ptr = sg;
 	chip->sgi = 0;
-	/* Usually the next entry will be @sg@ + 1, but if this sg element
+	/*
+	 * Usually the next entry will be @sg@ + 1, but if this sg element
 	 * is part of a chained scatterlist, it could jump to the start of
 	 * a new scatterlist array. So here we use sg_next to move to
-	 * the proper sg
+	 * the proper sg.
 	 */
 	for (i = 0; i < *index; i++)
 		sg_ptr = sg_next(sg_ptr);
@@ -476,7 +482,7 @@
 out:
 	rtsx->done = NULL;
 	rtsx->trans_state = STATE_TRANS_NONE;
-	dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+	dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
 
 	if (err < 0)
 		rtsx_stop_cmd(chip, card);
@@ -485,8 +491,9 @@
 }
 
 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
-		struct scatterlist *sg, int num_sg,
-		enum dma_data_direction dma_dir, int timeout)
+				     struct scatterlist *sg, int num_sg,
+				     enum dma_data_direction dma_dir,
+				     int timeout)
 {
 	struct rtsx_dev *rtsx = chip->rtsx;
 	struct completion trans_done;
@@ -496,7 +503,7 @@
 	long timeleft;
 	struct scatterlist *sg_ptr;
 
-	if ((sg == NULL) || (num_sg <= 0))
+	if (!sg || (num_sg <= 0))
 		return -EIO;
 
 	if (dma_dir == DMA_TO_DEVICE)
@@ -525,7 +532,7 @@
 
 	spin_unlock_irq(&rtsx->reg_lock);
 
-	buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+	buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
 
 	sg_ptr = sg;
 
@@ -623,7 +630,7 @@
 out:
 	rtsx->done = NULL;
 	rtsx->trans_state = STATE_TRANS_NONE;
-	dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+	dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
 
 	if (err < 0)
 		rtsx_stop_cmd(chip, card);
@@ -632,7 +639,8 @@
 }
 
 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
-		size_t len, enum dma_data_direction dma_dir, int timeout)
+			     size_t len, enum dma_data_direction dma_dir,
+			     int timeout)
 {
 	struct rtsx_dev *rtsx = chip->rtsx;
 	struct completion trans_done;
@@ -642,7 +650,7 @@
 	u32 val = 1 << 31;
 	long timeleft;
 
-	if ((buf == NULL) || (len <= 0))
+	if (!buf || (len <= 0))
 		return -EIO;
 
 	if (dma_dir == DMA_TO_DEVICE)
@@ -652,8 +660,8 @@
 	else
 		return -ENXIO;
 
-	addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
-	if (!addr)
+	addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
+	if (dma_mapping_error(&rtsx->pci->dev, addr))
 		return -ENOMEM;
 
 	if (card == SD_CARD)
@@ -706,7 +714,7 @@
 out:
 	rtsx->done = NULL;
 	rtsx->trans_state = STATE_TRANS_NONE;
-	dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
+	dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
 
 	if (err < 0)
 		rtsx_stop_cmd(chip, card);
@@ -715,9 +723,9 @@
 }
 
 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
-		void *buf, size_t len, int use_sg, unsigned int *index,
-		unsigned int *offset, enum dma_data_direction dma_dir,
-		int timeout)
+			       void *buf, size_t len, int use_sg,
+			       unsigned int *index, unsigned int *offset,
+			       enum dma_data_direction dma_dir, int timeout)
 {
 	int err = 0;
 
@@ -725,13 +733,16 @@
 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
 		return -EIO;
 
-	if (use_sg)
-		err = rtsx_transfer_sglist_adma_partial(chip, card,
-				(struct scatterlist *)buf, use_sg,
-				index, offset, (int)len, dma_dir, timeout);
-	else
+	if (use_sg) {
+		struct scatterlist *sg = (struct scatterlist *)buf;
+
+		err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
+							index, offset, (int)len,
+							dma_dir, timeout);
+	} else {
 		err = rtsx_transfer_buf(chip, card,
 					buf, len, dma_dir, timeout);
+	}
 	if (err < 0) {
 		if (RTSX_TST_DELINK(chip)) {
 			RTSX_CLR_DELINK(chip);
@@ -744,7 +755,7 @@
 }
 
 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
-		int use_sg, enum dma_data_direction dma_dir, int timeout)
+		       int use_sg, enum dma_data_direction dma_dir, int timeout)
 {
 	int err = 0;
 
@@ -756,8 +767,8 @@
 
 	if (use_sg) {
 		err = rtsx_transfer_sglist_adma(chip, card,
-				(struct scatterlist *)buf,
-				use_sg, dma_dir, timeout);
+						(struct scatterlist *)buf,
+						use_sg, dma_dir, timeout);
 	} else {
 		err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
 	}
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index d6c4982..244d589 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4260,10 +4260,10 @@
 		return TRANSPORT_FAILED;
 	}
 
-	if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
-		(0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
-		(0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
-		(0x64 != srb->cmnd[8])) {
+	if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
+		(srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+		(srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+		(srb->cmnd[8] != 0x64)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -4284,7 +4284,7 @@
 		return TRANSPORT_FAILED;
 	}
 
-	buf[5] = (1 == CHK_SD(sd_card)) ?  0x01 : 0x02;
+	buf[5] = (CHK_SD(sd_card) == 1) ? 0x01 : 0x02;
 	if (chip->card_wp & SD_CARD)
 		buf[5] |= 0x80;
 
@@ -5176,10 +5176,10 @@
 		return TRANSPORT_FAILED;
 	}
 
-	if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
-		(0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
-		(0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
-		(0x64 != srb->cmnd[8])) {
+	if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
+		(srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+		(srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+		(srb->cmnd[8] != 0x64)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -5188,7 +5188,7 @@
 	switch (srb->cmnd[1] & 0x0F) {
 	case 0:
 #ifdef SUPPORT_SD_LOCK
-		if (0x64 == srb->cmnd[9])
+		if (srb->cmnd[9] == 0x64)
 			sd_card->sd_lock_status |= SD_SDR_RST;
 #endif
 		retval = reset_sd_card(chip);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index e67e7ec..2e1e6cb 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -420,7 +420,6 @@
 	return STATUS_SUCCESS;
 }
 
-
 int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
 {
 	int retval;
@@ -516,7 +515,6 @@
 	return STATUS_SUCCESS;
 }
 
-
 int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 	struct spi_info *spi = &(chip->spi);
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index c95b3ab..cc0afee 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -478,6 +478,8 @@
 	u32             max_isr_xmits;
 	u32             rcv_interrupt_yields;
 	u32             intagg_period;
+	u32             intagg_delay;
+	u32             dynamic_intagg;
 	struct inicpm_state    *inicpm_info;
 	void *pinicpm_info;
 	struct slic_ifevents  if_events;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index b23a2d1..a91c60a 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -58,9 +58,9 @@
 #define DEBUG_MICROCODE                 1
 #define DBG                             1
 #define SLIC_INTERRUPT_PROCESS_LIMIT	1
-#define SLIC_OFFLOAD_IP_CHECKSUM		1
-#define STATS_TIMER_INTERVAL			2
-#define PING_TIMER_INTERVAL			    1
+#define SLIC_OFFLOAD_IP_CHECKSUM	1
+#define STATS_TIMER_INTERVAL		2
+#define PING_TIMER_INTERVAL		1
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
@@ -102,8 +102,7 @@
 static char *slic_proc_version = "2.0.351  2006/07/14 12:26:00";
 
 static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
-static int intagg_delay = 100;
-static u32 dynamic_intagg;
+#define DEFAULT_INTAGG_DELAY 100
 static unsigned int rcv_count;
 
 #define DRV_NAME          "slicoss"
@@ -119,17 +118,14 @@
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_LICENSE("Dual BSD/GPL");
 
-module_param(dynamic_intagg, int, 0);
-MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
-module_param(intagg_delay, int, 0);
-MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
-
 static const struct pci_device_id slic_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
 	{ 0 }
 };
 
+static struct ethtool_ops slic_ethtool_ops;
+
 MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
 
 static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
@@ -2860,7 +2856,7 @@
 	if (slic_global.dynamic_intagg)
 		slic_intagg_set(adapter, 0);
 	else
-		slic_intagg_set(adapter, intagg_delay);
+		slic_intagg_set(adapter, adapter->intagg_delay);
 
 	/*
 	 *  Initialize ping status to "ok"
@@ -2881,6 +2877,26 @@
 	return status;
 }
 
+static int slic_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coalesce)
+{
+	struct adapter *adapter = netdev_priv(dev);
+
+	adapter->intagg_delay = coalesce->rx_coalesce_usecs;
+	adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce;
+	return 0;
+}
+
+static int slic_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coalesce)
+{
+	struct adapter *adapter = netdev_priv(dev);
+
+	coalesce->rx_coalesce_usecs = adapter->intagg_delay;
+	coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg;
+	return 0;
+}
+
 static void slic_init_driver(void)
 {
 	if (slic_first_init) {
@@ -3069,8 +3085,6 @@
 	struct sliccard *card = NULL;
 	int pci_using_dac = 0;
 
-	slic_global.dynamic_intagg = dynamic_intagg;
-
 	err = pci_enable_device(pcidev);
 
 	if (err)
@@ -3112,12 +3126,14 @@
 		goto err_out_exit_slic_probe;
 	}
 
+	netdev->ethtool_ops = &slic_ethtool_ops;
 	SET_NETDEV_DEV(netdev, &pcidev->dev);
 
 	pci_set_drvdata(pcidev, netdev);
 	adapter = netdev_priv(netdev);
 	adapter->netdev = netdev;
 	adapter->pcidev = pcidev;
+	slic_global.dynamic_intagg = adapter->dynamic_intagg;
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
@@ -3204,5 +3220,10 @@
 	pci_unregister_driver(&slic_driver);
 }
 
+static struct ethtool_ops slic_ethtool_ops = {
+	.get_coalesce = slic_get_coalesce,
+	.set_coalesce = slic_set_coalesce
+};
+
 module_init(slic_module_init);
 module_exit(slic_module_cleanup);
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 0331d34..95f7cae 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
 #include <linux/sizes.h>
 
 #include "ddk750_help.h"
@@ -5,6 +6,10 @@
 #include "ddk750_chip.h"
 #include "ddk750_power.h"
 
+/* n / d + 1 / 2 = (2n + d) / 2d */
+#define roundedDiv(num, denom)	((2 * (num) + (denom)) / (2 * (denom)))
+#define MHz(x) ((x) * 1000000)
+
 logical_chip_type_t getChipType(void)
 {
 	unsigned short physicalID;
@@ -36,10 +41,10 @@
 		return MHz(130);
 
 	pll_reg = PEEK32(MXCLK_PLL_CTRL);
-	M = FIELD_GET(pll_reg, PANEL_PLL_CTRL, M);
-	N = FIELD_GET(pll_reg, PANEL_PLL_CTRL, N);
-	OD = FIELD_GET(pll_reg, PANEL_PLL_CTRL, OD);
-	POD = FIELD_GET(pll_reg, PANEL_PLL_CTRL, POD);
+	M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
+	N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT;
+	OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
+	POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
 
 	return DEFAULT_INPUT_CLOCK * M / N / (1 << OD) / (1 << POD);
 }
@@ -79,7 +84,7 @@
 
 static void setMemoryClock(unsigned int frequency)
 {
-	unsigned int ulReg, divisor;
+	unsigned int reg, divisor;
 
 	/* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */
 	if (getChipType() == SM750LE)
@@ -95,24 +100,24 @@
 		divisor = roundedDiv(get_mxclk_freq(), frequency);
 
 		/* Set the corresponding divisor in the register. */
-		ulReg = PEEK32(CURRENT_GATE);
+		reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_M2XCLK_MASK;
 		switch (divisor) {
 		default:
 		case 1:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_1);
+			reg |= CURRENT_GATE_M2XCLK_DIV_1;
 			break;
 		case 2:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_2);
+			reg |= CURRENT_GATE_M2XCLK_DIV_2;
 			break;
 		case 3:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_3);
+			reg |= CURRENT_GATE_M2XCLK_DIV_3;
 			break;
 		case 4:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_4);
+			reg |= CURRENT_GATE_M2XCLK_DIV_4;
 			break;
 		}
 
-		setCurrentGate(ulReg);
+		setCurrentGate(reg);
 	}
 }
 
@@ -126,7 +131,7 @@
  */
 static void setMasterClock(unsigned int frequency)
 {
-	unsigned int ulReg, divisor;
+	unsigned int reg, divisor;
 
 	/* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */
 	if (getChipType() == SM750LE)
@@ -142,24 +147,24 @@
 		divisor = roundedDiv(get_mxclk_freq(), frequency);
 
 		/* Set the corresponding divisor in the register. */
-		ulReg = PEEK32(CURRENT_GATE);
+		reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_MCLK_MASK;
 		switch (divisor) {
 		default:
 		case 3:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_3);
+			reg |= CURRENT_GATE_MCLK_DIV_3;
 			break;
 		case 4:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_4);
+			reg |= CURRENT_GATE_MCLK_DIV_4;
 			break;
 		case 6:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_6);
+			reg |= CURRENT_GATE_MCLK_DIV_6;
 			break;
 		case 8:
-			ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_8);
+			reg |= CURRENT_GATE_MCLK_DIV_8;
 			break;
 		}
 
-		setCurrentGate(ulReg);
+		setCurrentGate(reg);
 		}
 }
 
@@ -174,11 +179,11 @@
 
 	/* for 750,always use power mode0*/
 	reg = PEEK32(MODE0_GATE);
-	reg = FIELD_SET(reg, MODE0_GATE, GPIO, ON);
+	reg |= MODE0_GATE_GPIO;
 	POKE32(MODE0_GATE, reg);
 
 	/* get frame buffer size from GPIO */
-	reg = FIELD_GET(PEEK32(MISC_CTRL), MISC_CTRL, LOCALMEM_SIZE);
+	reg = PEEK32(MISC_CTRL) & MISC_CTRL_LOCALMEM_SIZE_MASK;
 	switch (reg) {
 	case MISC_CTRL_LOCALMEM_SIZE_8M:
 		data = SZ_8M;  break; /* 8  Mega byte */
@@ -197,24 +202,22 @@
 
 int ddk750_initHw(initchip_param_t *pInitParam)
 {
-	unsigned int ulReg;
+	unsigned int reg;
 
 	if (pInitParam->powerMode != 0)
 		pInitParam->powerMode = 0;
 	setPowerMode(pInitParam->powerMode);
 
 	/* Enable display power gate & LOCALMEM power gate*/
-	ulReg = PEEK32(CURRENT_GATE);
-	ulReg = FIELD_SET(ulReg, CURRENT_GATE, DISPLAY, ON);
-	ulReg = FIELD_SET(ulReg, CURRENT_GATE, LOCALMEM, ON);
-	setCurrentGate(ulReg);
+	reg = PEEK32(CURRENT_GATE);
+	reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
+	setCurrentGate(reg);
 
 	if (getChipType() != SM750LE) {
 		/*	set panel pll and graphic mode via mmio_88 */
-		ulReg = PEEK32(VGA_CONFIGURATION);
-		ulReg = FIELD_SET(ulReg, VGA_CONFIGURATION, PLL, PANEL);
-		ulReg = FIELD_SET(ulReg, VGA_CONFIGURATION, MODE, GRAPHIC);
-		POKE32(VGA_CONFIGURATION, ulReg);
+		reg = PEEK32(VGA_CONFIGURATION);
+		reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE);
+		POKE32(VGA_CONFIGURATION, reg);
 	} else {
 #if defined(__i386__) || defined(__x86_64__)
 		/* set graphic mode via IO method */
@@ -238,36 +241,36 @@
 	   The memory should be resetted after changing the MXCLK.
 	 */
 	if (pInitParam->resetMemory == 1) {
-		ulReg = PEEK32(MISC_CTRL);
-		ulReg = FIELD_SET(ulReg, MISC_CTRL, LOCALMEM_RESET, RESET);
-		POKE32(MISC_CTRL, ulReg);
+		reg = PEEK32(MISC_CTRL);
+		reg &= ~MISC_CTRL_LOCALMEM_RESET;
+		POKE32(MISC_CTRL, reg);
 
-		ulReg = FIELD_SET(ulReg, MISC_CTRL, LOCALMEM_RESET, NORMAL);
-		POKE32(MISC_CTRL, ulReg);
+		reg |= MISC_CTRL_LOCALMEM_RESET;
+		POKE32(MISC_CTRL, reg);
 	}
 
 	if (pInitParam->setAllEngOff == 1) {
 		enable2DEngine(0);
 
 		/* Disable Overlay, if a former application left it on */
-		ulReg = PEEK32(VIDEO_DISPLAY_CTRL);
-		ulReg = FIELD_SET(ulReg, VIDEO_DISPLAY_CTRL, PLANE, DISABLE);
-		POKE32(VIDEO_DISPLAY_CTRL, ulReg);
+		reg = PEEK32(VIDEO_DISPLAY_CTRL);
+		reg &= ~DISPLAY_CTRL_PLANE;
+		POKE32(VIDEO_DISPLAY_CTRL, reg);
 
 		/* Disable video alpha, if a former application left it on */
-		ulReg = PEEK32(VIDEO_ALPHA_DISPLAY_CTRL);
-		ulReg = FIELD_SET(ulReg, VIDEO_ALPHA_DISPLAY_CTRL, PLANE, DISABLE);
-		POKE32(VIDEO_ALPHA_DISPLAY_CTRL, ulReg);
+		reg = PEEK32(VIDEO_ALPHA_DISPLAY_CTRL);
+		reg &= ~DISPLAY_CTRL_PLANE;
+		POKE32(VIDEO_ALPHA_DISPLAY_CTRL, reg);
 
 		/* Disable alpha plane, if a former application left it on */
-		ulReg = PEEK32(ALPHA_DISPLAY_CTRL);
-		ulReg = FIELD_SET(ulReg, ALPHA_DISPLAY_CTRL, PLANE, DISABLE);
-		POKE32(ALPHA_DISPLAY_CTRL, ulReg);
+		reg = PEEK32(ALPHA_DISPLAY_CTRL);
+		reg &= ~DISPLAY_CTRL_PLANE;
+		POKE32(ALPHA_DISPLAY_CTRL, reg);
 
 		/* Disable DMA Channel, if a former application left it on */
-		ulReg = PEEK32(DMA_ABORT_INTERRUPT);
-		ulReg = FIELD_SET(ulReg, DMA_ABORT_INTERRUPT, ABORT_1, ABORT);
-		POKE32(DMA_ABORT_INTERRUPT, ulReg);
+		reg = PEEK32(DMA_ABORT_INTERRUPT);
+		reg |= DMA_ABORT_INTERRUPT_ABORT_1;
+		POKE32(DMA_ABORT_INTERRUPT, reg);
 
 		/* Disable DMA Power, if a former application left it on */
 		enableDMA(0);
@@ -337,7 +340,7 @@
 				unsigned int diff;
 
 				tmpClock = pll->inputFreq * M / N / X;
-				diff = absDiff(tmpClock, request_orig);
+				diff = abs(tmpClock - request_orig);
 				if (diff < mini_diff) {
 					pll->M = M;
 					pll->N = N;
@@ -356,24 +359,29 @@
 
 unsigned int formatPllReg(pll_value_t *pPLL)
 {
-	unsigned int ulPllReg = 0;
-
-    /* Note that all PLL's have the same format. Here, we just use Panel PLL parameter
-       to work out the bit fields in the register.
-       On returning a 32 bit number, the value can be applied to any PLL in the calling function.
-    */
-	ulPllReg =
-	FIELD_SET(0, PANEL_PLL_CTRL, BYPASS, OFF)
-	| FIELD_SET(0, PANEL_PLL_CTRL, POWER,  ON)
-	| FIELD_SET(0, PANEL_PLL_CTRL, INPUT,  OSC)
 #ifndef VALIDATION_CHIP
-	| FIELD_VALUE(0, PANEL_PLL_CTRL, POD,    pPLL->POD)
+	unsigned int POD = pPLL->POD;
 #endif
-	| FIELD_VALUE(0, PANEL_PLL_CTRL, OD,     pPLL->OD)
-	| FIELD_VALUE(0, PANEL_PLL_CTRL, N,      pPLL->N)
-	| FIELD_VALUE(0, PANEL_PLL_CTRL, M,      pPLL->M);
+	unsigned int OD = pPLL->OD;
+	unsigned int M = pPLL->M;
+	unsigned int N = pPLL->N;
+	unsigned int reg = 0;
 
-	return ulPllReg;
+	/*
+	 * Note that all PLL's have the same format. Here, we just use
+	 * Panel PLL parameter to work out the bit fields in the
+	 * register. On returning a 32 bit number, the value can be
+	 * applied to any PLL in the calling function.
+	 */
+	reg = PLL_CTRL_POWER |
+#ifndef VALIDATION_CHIP
+		((POD << PLL_CTRL_POD_SHIFT) & PLL_CTRL_POD_MASK) |
+#endif
+		((OD << PLL_CTRL_OD_SHIFT) & PLL_CTRL_OD_MASK) |
+		((N << PLL_CTRL_N_SHIFT) & PLL_CTRL_N_MASK) |
+		((M << PLL_CTRL_M_SHIFT) & PLL_CTRL_M_MASK);
+
+	return reg;
 }
 
 
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 84f6e8b..ca4973e 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -9,111 +9,55 @@
 static void setDisplayControl(int ctrl, int disp_state)
 {
 	/* state != 0 means turn on both timing & plane en_bit */
-	unsigned long ulDisplayCtrlReg, ulReservedBits;
-	int cnt;
+	unsigned long reg, val, reserved;
+	int cnt = 0;
 
-	cnt = 0;
-
-	/* Set the primary display control */
 	if (!ctrl) {
-		ulDisplayCtrlReg = PEEK32(PANEL_DISPLAY_CTRL);
-		/* Turn on/off the Panel display control */
-		if (disp_state) {
-			/* Timing should be enabled first before enabling the plane
-			 * because changing at the same time does not guarantee that
-			 * the plane will also enabled or disabled.
-			 */
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								PANEL_DISPLAY_CTRL, TIMING, ENABLE);
-			POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								PANEL_DISPLAY_CTRL, PLANE, ENABLE);
-
-			/* Added some masks to mask out the reserved bits.
-			 * Sometimes, the reserved bits are set/reset randomly when
-			 * writing to the PRIMARY_DISPLAY_CTRL, therefore, the register
-			 * reserved bits are needed to be masked out.
-			 */
-			ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
-				FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
-				FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE);
-
-			/* Somehow the register value on the plane is not set
-			 * until a few delay. Need to write
-			 * and read it a couple times
-			 */
-			do {
-				cnt++;
-				POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-			} while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) !=
-					(ulDisplayCtrlReg & ~ulReservedBits));
-			printk("Set Panel Plane enbit:after tried %d times\n", cnt);
-		} else {
-			/* When turning off, there is no rule on the programming
-			 * sequence since whenever the clock is off, then it does not
-			 * matter whether the plane is enabled or disabled.
-			 * Note: Modifying the plane bit will take effect on the
-			 * next vertical sync. Need to find out if it is necessary to
-			 * wait for 1 vsync before modifying the timing enable bit.
-			 * */
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								PANEL_DISPLAY_CTRL, PLANE, DISABLE);
-			POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								PANEL_DISPLAY_CTRL, TIMING, DISABLE);
-			POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-		}
-
+		reg = PANEL_DISPLAY_CTRL;
+		reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK;
 	} else {
-		/* Set the secondary display control */
-		ulDisplayCtrlReg = PEEK32(CRT_DISPLAY_CTRL);
+		reg = CRT_DISPLAY_CTRL;
+		reserved = CRT_DISPLAY_CTRL_RESERVED_MASK;
+	}
 
-		if (disp_state) {
-			/* Timing should be enabled first before enabling the plane because changing at the
-			   same time does not guarantee that the plane will also enabled or disabled.
-			   */
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								CRT_DISPLAY_CTRL, TIMING, ENABLE);
-			POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
+	val = PEEK32(reg);
+	if (disp_state) {
+		/*
+		 * Timing should be enabled first before enabling the
+		 * plane because changing at the same time does not
+		 * guarantee that the plane will also enabled or
+		 * disabled.
+		 */
+		val |= DISPLAY_CTRL_TIMING;
+		POKE32(reg, val);
 
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								CRT_DISPLAY_CTRL, PLANE, ENABLE);
+		val |= DISPLAY_CTRL_PLANE;
 
-			/* Added some masks to mask out the reserved bits.
-			 * Sometimes, the reserved bits are set/reset randomly when
-			 * writing to the PRIMARY_DISPLAY_CTRL, therefore, the register
-			 * reserved bits are needed to be masked out.
-			 */
+		/*
+		 * Somehow the register value on the plane is not set
+		 * until a few delay. Need to write and read it a
+		 * couple times
+		 */
+		do {
+			cnt++;
+			POKE32(reg, val);
+		} while ((PEEK32(reg) & ~reserved) != (val & ~reserved));
+		pr_debug("Set Plane enbit:after tried %d times\n", cnt);
+	} else {
+		/*
+		 * When turning off, there is no rule on the
+		 * programming sequence since whenever the clock is
+		 * off, then it does not matter whether the plane is
+		 * enabled or disabled.  Note: Modifying the plane bit
+		 * will take effect on the next vertical sync. Need to
+		 * find out if it is necessary to wait for 1 vsync
+		 * before modifying the timing enable bit.
+		 */
+		val &= ~DISPLAY_CTRL_PLANE;
+		POKE32(reg, val);
 
-			ulReservedBits = FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
-				FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
-				FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE) |
-				FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_4_MASK, ENABLE);
-
-			do {
-				cnt++;
-				POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
-			} while ((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) !=
-					(ulDisplayCtrlReg & ~ulReservedBits));
-				printk("Set Crt Plane enbit:after tried %d times\n", cnt);
-		} else {
-			/* When turning off, there is no rule on the programming
-			 * sequence since whenever the clock is off, then it does not
-			 * matter whether the plane is enabled or disabled.
-			 * Note: Modifying the plane bit will take effect on the next
-			 * vertical sync. Need to find out if it is necessary to
-			 * wait for 1 vsync before modifying the timing enable bit.
-			 */
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								CRT_DISPLAY_CTRL, PLANE, DISABLE);
-			POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
-
-			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
-								CRT_DISPLAY_CTRL, TIMING, DISABLE);
-			POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
-		}
+		val &= ~DISPLAY_CTRL_TIMING;
+		POKE32(reg, val);
 	}
 }
 
@@ -126,54 +70,42 @@
 
 		/* Do not wait when the Primary PLL is off or display control is already off.
 		   This will prevent the software to wait forever. */
-		if ((FIELD_GET(PEEK32(PANEL_PLL_CTRL), PANEL_PLL_CTRL, POWER) ==
-			 PANEL_PLL_CTRL_POWER_OFF) ||
-			(FIELD_GET(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, TIMING) ==
-			 PANEL_DISPLAY_CTRL_TIMING_DISABLE)) {
+		if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
+		    !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
 			return;
 		}
 
 		while (delay-- > 0) {
 			/* Wait for end of vsync. */
 			do {
-				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
-						   SYSTEM_CTRL,
-						   PANEL_VSYNC);
-			} while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
+				status = PEEK32(SYSTEM_CTRL);
+			} while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
 
 			/* Wait for start of vsync. */
 			do {
-				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
-						   SYSTEM_CTRL,
-						   PANEL_VSYNC);
-			} while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE);
+				status = PEEK32(SYSTEM_CTRL);
+			} while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
 		}
 
 	} else {
 
 		/* Do not wait when the Primary PLL is off or display control is already off.
 			   This will prevent the software to wait forever. */
-		if ((FIELD_GET(PEEK32(CRT_PLL_CTRL), CRT_PLL_CTRL, POWER) ==
-			 CRT_PLL_CTRL_POWER_OFF) ||
-			(FIELD_GET(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, TIMING) ==
-			 CRT_DISPLAY_CTRL_TIMING_DISABLE)) {
+		if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
+		    !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
 			return;
 		}
 
 		while (delay-- > 0) {
 			/* Wait for end of vsync. */
 			do {
-				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
-								   SYSTEM_CTRL,
-								   CRT_VSYNC);
-			} while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE);
+				status = PEEK32(SYSTEM_CTRL);
+			} while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
 
 			/* Wait for start of vsync. */
 			do {
-				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
-								   SYSTEM_CTRL,
-								   CRT_VSYNC);
-			} while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE);
+				status = PEEK32(SYSTEM_CTRL);
+			} while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
 		}
 	}
 }
@@ -184,22 +116,22 @@
 
 	/* disp should be 1 to open sequence */
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
-	reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, FPEN, disp);
+	reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
 	primaryWaitVerticalSync(delay);
 
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
-	reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, DATA, disp);
+	reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
 	primaryWaitVerticalSync(delay);
 
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
-	reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, VBIASEN, disp);
+	reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
 	primaryWaitVerticalSync(delay);
 
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
-	reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, FPEN, disp);
+	reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
 	primaryWaitVerticalSync(delay);
 
@@ -212,16 +144,20 @@
 	if (output & PNL_2_USAGE) {
 		/* set panel path controller select */
 		reg = PEEK32(PANEL_DISPLAY_CTRL);
-		reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, SELECT, (output & PNL_2_MASK)>>PNL_2_OFFSET);
+		reg &= ~PANEL_DISPLAY_CTRL_SELECT_MASK;
+		reg |= (((output & PNL_2_MASK) >> PNL_2_OFFSET) <<
+			PANEL_DISPLAY_CTRL_SELECT_SHIFT);
 		POKE32(PANEL_DISPLAY_CTRL, reg);
 	}
 
 	if (output & CRT_2_USAGE) {
 		/* set crt path controller select */
 		reg = PEEK32(CRT_DISPLAY_CTRL);
-		reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, SELECT, (output & CRT_2_MASK)>>CRT_2_OFFSET);
+		reg &= ~CRT_DISPLAY_CTRL_SELECT_MASK;
+		reg |= (((output & CRT_2_MASK) >> CRT_2_OFFSET) <<
+			CRT_DISPLAY_CTRL_SELECT_SHIFT);
 		/*se blank off */
-		reg = FIELD_SET(reg, CRT_DISPLAY_CTRL, BLANK, OFF);
+		reg &= ~CRT_DISPLAY_CTRL_BLANK;
 		POKE32(CRT_DISPLAY_CTRL, reg);
 
 	}
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
index 5be814e..009db92 100644
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ b/drivers/staging/sm750fb/ddk750_help.h
@@ -6,7 +6,6 @@
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
-#include "sm750_help.h"
 
 /* software control endianness */
 #define PEEK32(addr) readl(addr + mmio750)
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index 7be2111..231c021 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -17,8 +17,7 @@
 	/* Enable GPIO 30 & 31 as IIC clock & data */
 	value = PEEK32(GPIO_MUX);
 
-	value = FIELD_SET(value, GPIO_MUX, 30, I2C) |
-			  FIELD_SET(0, GPIO_MUX, 31, I2C);
+	value |= (GPIO_MUX_30 | GPIO_MUX_31);
 	POKE32(GPIO_MUX, value);
 
 	/* Enable Hardware I2C power.
@@ -27,12 +26,10 @@
 	enableI2C(1);
 
 	/* Enable the I2C Controller and set the bus speed mode */
-	value = PEEK32(I2C_CTRL);
-	if (bus_speed_mode == 0)
-		value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD);
-	else
-		value = FIELD_SET(value, I2C_CTRL, MODE, FAST);
-	value = FIELD_SET(value, I2C_CTRL, EN, ENABLE);
+	value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
+	if (bus_speed_mode)
+		value |= I2C_CTRL_MODE;
+	value |= I2C_CTRL_EN;
 	POKE32(I2C_CTRL, value);
 
 	return 0;
@@ -43,8 +40,7 @@
 	unsigned int value;
 
 	/* Disable I2C controller */
-	value = PEEK32(I2C_CTRL);
-	value = FIELD_SET(value, I2C_CTRL, EN, DISABLE);
+	value = PEEK32(I2C_CTRL) & ~I2C_CTRL_EN;
 	POKE32(I2C_CTRL, value);
 
 	/* Disable I2C Power */
@@ -52,8 +48,8 @@
 
 	/* Set GPIO 30 & 31 back as GPIO pins */
 	value = PEEK32(GPIO_MUX);
-	value = FIELD_SET(value, GPIO_MUX, 30, GPIO);
-	value = FIELD_SET(value, GPIO_MUX, 31, GPIO);
+	value &= ~GPIO_MUX_30;
+	value &= ~GPIO_MUX_31;
 	POKE32(GPIO_MUX, value);
 }
 
@@ -63,9 +59,7 @@
 
 	/* Wait until the transfer is completed. */
 	timeout = HWI2C_WAIT_TIMEOUT;
-	while ((FIELD_GET(PEEK32(I2C_STATUS),
-			  I2C_STATUS, TX) != I2C_STATUS_TX_COMPLETED) &&
-	       (timeout != 0))
+	while (!(PEEK32(I2C_STATUS) & I2C_STATUS_TX) && (timeout != 0))
 		timeout--;
 
 	if (timeout == 0)
@@ -121,14 +115,13 @@
 			POKE32(I2C_DATA0 + i, *buf++);
 
 		/* Start the I2C */
-		POKE32(I2C_CTRL,
-		       FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+		POKE32(I2C_CTRL, PEEK32(I2C_CTRL) | I2C_CTRL_CTRL);
 
 		/* Wait until the transfer is completed. */
 		if (hw_i2c_wait_tx_done() != 0)
 			break;
 
-		/* Substract length */
+		/* Subtract length */
 		length -= (count + 1);
 
 		/* Total byte written */
@@ -184,8 +177,7 @@
 		POKE32(I2C_BYTE_COUNT, count);
 
 		/* Start the I2C */
-		POKE32(I2C_CTRL,
-		       FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+		POKE32(I2C_CTRL, PEEK32(I2C_CTRL) | I2C_CTRL_CTRL);
 
 		/* Wait until transaction done. */
 		if (hw_i2c_wait_tx_done() != 0)
@@ -195,7 +187,7 @@
 		for (i = 0; i <= count; i++)
 			*buf++ = PEEK32(I2C_DATA0 + i);
 
-		/* Substract length by 16 */
+		/* Subtract length by 16 */
 		length -= (count + 1);
 
 		/* Number of bytes read. */
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index fa35926..ccb4e06 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -25,13 +25,12 @@
 	   Note that normal SM750/SM718 only use those two register for
 	   auto-centering mode.
 	 */
-	POKE32(CRT_AUTO_CENTERING_TL,
-	FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0)
-	| FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0));
+	POKE32(CRT_AUTO_CENTERING_TL, 0);
 
 	POKE32(CRT_AUTO_CENTERING_BR,
-	FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1)
-	| FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1));
+		(((y - 1) << CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT) &
+			CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
+		((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
 
 	/* Assume common fields in dispControl have been properly set before
 	   calling this function.
@@ -39,33 +38,32 @@
 	 */
 
 	/* Clear bit 29:27 of display control register */
-	dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK);
+	dispControl &= ~CRT_DISPLAY_CTRL_CLK_MASK;
 
 	/* Set bit 29:27 of display control register for the right clock */
-	/* Note that SM750LE only need to supported 7 resoluitons. */
+	/* Note that SM750LE only need to supported 7 resolutions. */
 	if (x == 800 && y == 600)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL41;
 	else if (x == 1024 && y == 768)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL65;
 	else if (x == 1152 && y == 864)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80;
 	else if (x == 1280 && y == 768)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80;
 	else if (x == 1280 && y == 720)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL74;
 	else if (x == 1280 && y == 960)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108;
 	else if (x == 1280 && y == 1024)
-		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108;
 	else /* default to VGA clock */
-	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25);
+		dispControl |= CRT_DISPLAY_CTRL_CLK_PLL25;
 
 	/* Set bit 25:24 of display controller */
-	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT);
-	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT);
+	dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
 
 	/* Set bit 14 of display controller */
-	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW);
+	dispControl = DISPLAY_CTRL_CLOCK_PHASE;
 
 	POKE32(CRT_DISPLAY_CTRL, dispControl);
 
@@ -79,85 +77,105 @@
 {
 	int ret = 0;
 	int cnt = 0;
-	unsigned int ulTmpValue, ulReg;
+	unsigned int tmp, reg;
 
 	if (pll->clockType == SECONDARY_PLL) {
 		/* programe secondary pixel clock */
 		POKE32(CRT_PLL_CTRL, formatPllReg(pll));
 		POKE32(CRT_HORIZONTAL_TOTAL,
-		FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
-		| FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+			(((pModeParam->horizontal_total - 1) <<
+				CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
+				CRT_HORIZONTAL_TOTAL_TOTAL_MASK) |
+			((pModeParam->horizontal_display_end - 1) &
+				CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK));
 
 		POKE32(CRT_HORIZONTAL_SYNC,
-		FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
-		| FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+			((pModeParam->horizontal_sync_width <<
+				CRT_HORIZONTAL_SYNC_WIDTH_SHIFT) &
+				CRT_HORIZONTAL_SYNC_WIDTH_MASK) |
+			((pModeParam->horizontal_sync_start - 1) &
+				CRT_HORIZONTAL_SYNC_START_MASK));
 
 		POKE32(CRT_VERTICAL_TOTAL,
-		FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
-		| FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+			(((pModeParam->vertical_total - 1) <<
+				CRT_VERTICAL_TOTAL_TOTAL_SHIFT) &
+				CRT_VERTICAL_TOTAL_TOTAL_MASK) |
+			((pModeParam->vertical_display_end - 1) &
+				CRT_VERTICAL_TOTAL_DISPLAY_END_MASK));
 
 		POKE32(CRT_VERTICAL_SYNC,
-		FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
-		| FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+			((pModeParam->vertical_sync_height <<
+				CRT_VERTICAL_SYNC_HEIGHT_SHIFT) &
+				CRT_VERTICAL_SYNC_HEIGHT_MASK) |
+			((pModeParam->vertical_sync_start - 1) &
+				CRT_VERTICAL_SYNC_START_MASK));
 
 
-		ulTmpValue = FIELD_VALUE(0, CRT_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
-					  FIELD_VALUE(0, CRT_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
-					  FIELD_SET(0, CRT_DISPLAY_CTRL, TIMING, ENABLE)|
-					  FIELD_SET(0, CRT_DISPLAY_CTRL, PLANE, ENABLE);
-
+		tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
+		if (pModeParam->vertical_sync_polarity)
+			tmp |= DISPLAY_CTRL_VSYNC_PHASE;
+		if (pModeParam->horizontal_sync_polarity)
+			tmp |= DISPLAY_CTRL_HSYNC_PHASE;
 
 		if (getChipType() == SM750LE) {
-			displayControlAdjust_SM750LE(pModeParam, ulTmpValue);
+			displayControlAdjust_SM750LE(pModeParam, tmp);
 		} else {
-			ulReg = PEEK32(CRT_DISPLAY_CTRL)
-					& FIELD_CLEAR(CRT_DISPLAY_CTRL, VSYNC_PHASE)
-					& FIELD_CLEAR(CRT_DISPLAY_CTRL, HSYNC_PHASE)
-					& FIELD_CLEAR(CRT_DISPLAY_CTRL, TIMING)
-					& FIELD_CLEAR(CRT_DISPLAY_CTRL, PLANE);
+			reg = PEEK32(CRT_DISPLAY_CTRL) &
+				~(DISPLAY_CTRL_VSYNC_PHASE |
+				  DISPLAY_CTRL_HSYNC_PHASE |
+				  DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE);
 
-			 POKE32(CRT_DISPLAY_CTRL, ulTmpValue|ulReg);
+			 POKE32(CRT_DISPLAY_CTRL, tmp | reg);
 		}
 
 	} else if (pll->clockType == PRIMARY_PLL) {
-		unsigned int ulReservedBits;
+		unsigned int reserved;
 
 		POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
 
-		POKE32(PANEL_HORIZONTAL_TOTAL,
-		FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
-		| FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+		reg = ((pModeParam->horizontal_total - 1) <<
+			PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
+			PANEL_HORIZONTAL_TOTAL_TOTAL_MASK;
+		reg |= ((pModeParam->horizontal_display_end - 1) &
+			PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK);
+		POKE32(PANEL_HORIZONTAL_TOTAL, reg);
 
 		POKE32(PANEL_HORIZONTAL_SYNC,
-		FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
-		| FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+			((pModeParam->horizontal_sync_width <<
+				PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT) &
+				PANEL_HORIZONTAL_SYNC_WIDTH_MASK) |
+			((pModeParam->horizontal_sync_start - 1) &
+				PANEL_HORIZONTAL_SYNC_START_MASK));
 
 		POKE32(PANEL_VERTICAL_TOTAL,
-		FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
-			| FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+			(((pModeParam->vertical_total - 1) <<
+				PANEL_VERTICAL_TOTAL_TOTAL_SHIFT) &
+				PANEL_VERTICAL_TOTAL_TOTAL_MASK) |
+			((pModeParam->vertical_display_end - 1) &
+				PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK));
 
 		POKE32(PANEL_VERTICAL_SYNC,
-		FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
-		| FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+			((pModeParam->vertical_sync_height <<
+				PANEL_VERTICAL_SYNC_HEIGHT_SHIFT) &
+				PANEL_VERTICAL_SYNC_HEIGHT_MASK) |
+			((pModeParam->vertical_sync_start - 1) &
+				PANEL_VERTICAL_SYNC_START_MASK));
 
-		ulTmpValue = FIELD_VALUE(0, PANEL_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
-			     FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
-			     FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)|
-			     FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)|
-			     FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE);
+		tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
+		if (pModeParam->vertical_sync_polarity)
+			tmp |= DISPLAY_CTRL_VSYNC_PHASE;
+		if (pModeParam->horizontal_sync_polarity)
+			tmp |= DISPLAY_CTRL_HSYNC_PHASE;
+		if (pModeParam->clock_phase_polarity)
+			tmp |= DISPLAY_CTRL_CLOCK_PHASE;
 
-		ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
-				 FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
-				 FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)|
-				 FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW);
+		reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK |
+			PANEL_DISPLAY_CTRL_VSYNC;
 
-		ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits)
-			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE)
-			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE)
-			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE)
-			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING)
-			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE);
-
+		reg = (PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) &
+			~(DISPLAY_CTRL_CLOCK_PHASE | DISPLAY_CTRL_VSYNC_PHASE |
+			  DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING |
+			  DISPLAY_CTRL_PLANE);
 
 		/* May a hardware bug or just my test chip (not confirmed).
 		* PANEL_DISPLAY_CTRL register seems requiring few writes
@@ -167,13 +185,14 @@
 		*       next vertical sync to turn on/off the plane.
 		*/
 
-		POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
+		POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
 
-		while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg)) {
+		while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) !=
+			(tmp | reg)) {
 			cnt++;
 			if (cnt > 1000)
 				break;
-			POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
+			POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
 		}
 	} else {
 		ret = -1;
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 667e4f8..33e092e 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -7,12 +7,12 @@
 	unsigned int value;
 
 	if (getChipType() == SM750LE) {
-		value = PEEK32(CRT_DISPLAY_CTRL);
-		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL,
-						     DPMS, state));
+		value = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK;
+		value |= (state << CRT_DISPLAY_CTRL_DPMS_SHIFT);
+		POKE32(CRT_DISPLAY_CTRL, value);
 	} else {
 		value = PEEK32(SYSTEM_CTRL);
-		value = FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state);
+		value = (value & ~SYSTEM_CTRL_DPMS_MASK) | state;
 		POKE32(SYSTEM_CTRL, value);
 	}
 }
@@ -21,7 +21,7 @@
 {
 	if (getChipType() == SM750LE)
 		return 0;
-	return FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE);
+	return PEEK32(POWER_MODE_CTRL) & POWER_MODE_CTRL_MODE_MASK;
 }
 
 
@@ -33,25 +33,22 @@
 {
 	unsigned int control_value = 0;
 
-	control_value = PEEK32(POWER_MODE_CTRL);
+	control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
 
 	if (getChipType() == SM750LE)
 		return;
 
 	switch (powerMode) {
 	case POWER_MODE_CTRL_MODE_MODE0:
-		control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
-					  MODE0);
+		control_value |= POWER_MODE_CTRL_MODE_MODE0;
 		break;
 
 	case POWER_MODE_CTRL_MODE_MODE1:
-		control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
-					  MODE1);
+		control_value |= POWER_MODE_CTRL_MODE_MODE1;
 		break;
 
 	case POWER_MODE_CTRL_MODE_SLEEP:
-		control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
-					  SLEEP);
+		control_value |= POWER_MODE_CTRL_MODE_SLEEP;
 		break;
 
 	default:
@@ -60,17 +57,15 @@
 
 	/* Set up other fields in Power Control Register */
 	if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
-		control_value =
+		control_value &= ~POWER_MODE_CTRL_OSC_INPUT;
 #ifdef VALIDATION_CHIP
-		FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, OFF) |
+		control_value &= ~POWER_MODE_CTRL_336CLK;
 #endif
-		FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT,  OFF);
 	} else {
-		control_value =
+		control_value |= POWER_MODE_CTRL_OSC_INPUT;
 #ifdef VALIDATION_CHIP
-		FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, ON) |
+		control_value |= POWER_MODE_CTRL_336CLK;
 #endif
-		FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT,  ON);
 	}
 
 	/* Program new power mode. */
@@ -111,13 +106,10 @@
 	u32 gate;
 
 	gate = PEEK32(CURRENT_GATE);
-	if (enable) {
-		gate = FIELD_SET(gate, CURRENT_GATE, DE,  ON);
-		gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON);
-	} else {
-		gate = FIELD_SET(gate, CURRENT_GATE, DE,  OFF);
-		gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF);
-	}
+	if (enable) 
+		gate |= (CURRENT_GATE_DE | CURRENT_GATE_CSC);
+	 else 
+		gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC);
 
 	setCurrentGate(gate);
 }
@@ -129,9 +121,9 @@
 	/* Enable DMA Gate */
 	gate = PEEK32(CURRENT_GATE);
 	if (enable)
-		gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON);
+		gate |= CURRENT_GATE_DMA;
 	else
-		gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF);
+		gate &= ~CURRENT_GATE_DMA;
 
 	setCurrentGate(gate);
 }
@@ -146,9 +138,9 @@
 	/* Enable GPIO Gate */
 	gate = PEEK32(CURRENT_GATE);
 	if (enable)
-		gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
+		gate |= CURRENT_GATE_GPIO;
 	else
-		gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF);
+		gate &= ~CURRENT_GATE_GPIO;
 
 	setCurrentGate(gate);
 }
@@ -163,9 +155,9 @@
 	/* Enable I2C Gate */
 	gate = PEEK32(CURRENT_GATE);
 	if (enable)
-		gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
+		gate |= CURRENT_GATE_I2C;
 	else
-	gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF);
+		gate &= ~CURRENT_GATE_I2C;
 
 	setCurrentGate(gate);
 }
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 6e804d9..5963691 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -9,13 +9,10 @@
 }
 DPMS_t;
 
-#define setDAC(off) \
-		{	\
-		POKE32(MISC_CTRL, FIELD_VALUE(PEEK32(MISC_CTRL), \
-									MISC_CTRL,	\
-									DAC_POWER,	\
-									off));	\
-		}
+#define setDAC(off) {							\
+	POKE32(MISC_CTRL,						\
+	       (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
+}
 
 void ddk750_setDPMS(DPMS_t);
 
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 16a01c2..9552479 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -3,1865 +3,1149 @@
 
 /* New register for SM750LE */
 #define DE_STATE1                                        0x100054
-#define DE_STATE1_DE_ABORT                               0:0
-#define DE_STATE1_DE_ABORT_OFF                           0
-#define DE_STATE1_DE_ABORT_ON                            1
+#define DE_STATE1_DE_ABORT                               BIT(0)
 
 #define DE_STATE2                                        0x100058
-#define DE_STATE2_DE_FIFO                                3:3
-#define DE_STATE2_DE_FIFO_NOTEMPTY                       0
-#define DE_STATE2_DE_FIFO_EMPTY                          1
-#define DE_STATE2_DE_STATUS                              2:2
-#define DE_STATE2_DE_STATUS_IDLE                         0
-#define DE_STATE2_DE_STATUS_BUSY                         1
-#define DE_STATE2_DE_MEM_FIFO                            1:1
-#define DE_STATE2_DE_MEM_FIFO_NOTEMPTY                   0
-#define DE_STATE2_DE_MEM_FIFO_EMPTY                      1
-#define DE_STATE2_DE_RESERVED                            0:0
-
-
+#define DE_STATE2_DE_FIFO_EMPTY                          BIT(3)
+#define DE_STATE2_DE_STATUS_BUSY                         BIT(2)
+#define DE_STATE2_DE_MEM_FIFO_EMPTY                      BIT(1)
 
 #define SYSTEM_CTRL                                   0x000000
-#define SYSTEM_CTRL_DPMS                              31:30
-#define SYSTEM_CTRL_DPMS_VPHP                         0
-#define SYSTEM_CTRL_DPMS_VPHN                         1
-#define SYSTEM_CTRL_DPMS_VNHP                         2
-#define SYSTEM_CTRL_DPMS_VNHN                         3
-#define SYSTEM_CTRL_PCI_BURST                         29:29
-#define SYSTEM_CTRL_PCI_BURST_OFF                     0
-#define SYSTEM_CTRL_PCI_BURST_ON                      1
-#define SYSTEM_CTRL_PCI_MASTER                        25:25
-#define SYSTEM_CTRL_PCI_MASTER_OFF                    0
-#define SYSTEM_CTRL_PCI_MASTER_ON                     1
-#define SYSTEM_CTRL_LATENCY_TIMER                     24:24
-#define SYSTEM_CTRL_LATENCY_TIMER_ON                  0
-#define SYSTEM_CTRL_LATENCY_TIMER_OFF                 1
-#define SYSTEM_CTRL_DE_FIFO                           23:23
-#define SYSTEM_CTRL_DE_FIFO_NOTEMPTY                  0
-#define SYSTEM_CTRL_DE_FIFO_EMPTY                     1
-#define SYSTEM_CTRL_DE_STATUS                         22:22
-#define SYSTEM_CTRL_DE_STATUS_IDLE                    0
-#define SYSTEM_CTRL_DE_STATUS_BUSY                    1
-#define SYSTEM_CTRL_DE_MEM_FIFO                       21:21
-#define SYSTEM_CTRL_DE_MEM_FIFO_NOTEMPTY              0
-#define SYSTEM_CTRL_DE_MEM_FIFO_EMPTY                 1
-#define SYSTEM_CTRL_CSC_STATUS                        20:20
-#define SYSTEM_CTRL_CSC_STATUS_IDLE                   0
-#define SYSTEM_CTRL_CSC_STATUS_BUSY                   1
-#define SYSTEM_CTRL_CRT_VSYNC                         19:19
-#define SYSTEM_CTRL_CRT_VSYNC_INACTIVE                0
-#define SYSTEM_CTRL_CRT_VSYNC_ACTIVE                  1
-#define SYSTEM_CTRL_PANEL_VSYNC                       18:18
-#define SYSTEM_CTRL_PANEL_VSYNC_INACTIVE              0
-#define SYSTEM_CTRL_PANEL_VSYNC_ACTIVE                1
-#define SYSTEM_CTRL_CURRENT_BUFFER                    17:17
-#define SYSTEM_CTRL_CURRENT_BUFFER_NORMAL             0
-#define SYSTEM_CTRL_CURRENT_BUFFER_FLIP_PENDING       1
-#define SYSTEM_CTRL_DMA_STATUS                        16:16
-#define SYSTEM_CTRL_DMA_STATUS_IDLE                   0
-#define SYSTEM_CTRL_DMA_STATUS_BUSY                   1
-#define SYSTEM_CTRL_PCI_BURST_READ                    15:15
-#define SYSTEM_CTRL_PCI_BURST_READ_OFF                0
-#define SYSTEM_CTRL_PCI_BURST_READ_ON                 1
-#define SYSTEM_CTRL_DE_ABORT                          13:13
-#define SYSTEM_CTRL_DE_ABORT_OFF                      0
-#define SYSTEM_CTRL_DE_ABORT_ON                       1
-#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK                11:11
-#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK_OFF            0
-#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK_ON             1
-#define SYSTEM_CTRL_PCI_RETRY                         7:7
-#define SYSTEM_CTRL_PCI_RETRY_ON                      0
-#define SYSTEM_CTRL_PCI_RETRY_OFF                     1
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE         5:4
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_1       0
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_2       1
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_4       2
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_8       3
-#define SYSTEM_CTRL_CRT_TRISTATE                      3:3
-#define SYSTEM_CTRL_CRT_TRISTATE_OFF                  0
-#define SYSTEM_CTRL_CRT_TRISTATE_ON                   1
-#define SYSTEM_CTRL_PCIMEM_TRISTATE                   2:2
-#define SYSTEM_CTRL_PCIMEM_TRISTATE_OFF               0
-#define SYSTEM_CTRL_PCIMEM_TRISTATE_ON                1
-#define SYSTEM_CTRL_LOCALMEM_TRISTATE                 1:1
-#define SYSTEM_CTRL_LOCALMEM_TRISTATE_OFF             0
-#define SYSTEM_CTRL_LOCALMEM_TRISTATE_ON              1
-#define SYSTEM_CTRL_PANEL_TRISTATE                    0:0
-#define SYSTEM_CTRL_PANEL_TRISTATE_OFF                0
-#define SYSTEM_CTRL_PANEL_TRISTATE_ON                 1
+#define SYSTEM_CTRL_DPMS_MASK                         (0x3 << 30)
+#define SYSTEM_CTRL_DPMS_VPHP                         (0x0 << 30)
+#define SYSTEM_CTRL_DPMS_VPHN                         (0x1 << 30)
+#define SYSTEM_CTRL_DPMS_VNHP                         (0x2 << 30)
+#define SYSTEM_CTRL_DPMS_VNHN                         (0x3 << 30)
+#define SYSTEM_CTRL_PCI_BURST                         BIT(29)
+#define SYSTEM_CTRL_PCI_MASTER                        BIT(25)
+#define SYSTEM_CTRL_LATENCY_TIMER_OFF                 BIT(24)
+#define SYSTEM_CTRL_DE_FIFO_EMPTY                     BIT(23)
+#define SYSTEM_CTRL_DE_STATUS_BUSY                    BIT(22)
+#define SYSTEM_CTRL_DE_MEM_FIFO_EMPTY                 BIT(21)
+#define SYSTEM_CTRL_CSC_STATUS_BUSY                   BIT(20)
+#define SYSTEM_CTRL_CRT_VSYNC_ACTIVE                  BIT(19)
+#define SYSTEM_CTRL_PANEL_VSYNC_ACTIVE                BIT(18)
+#define SYSTEM_CTRL_CURRENT_BUFFER_FLIP_PENDING       BIT(17)
+#define SYSTEM_CTRL_DMA_STATUS_BUSY                   BIT(16)
+#define SYSTEM_CTRL_PCI_BURST_READ                    BIT(15)
+#define SYSTEM_CTRL_DE_ABORT                          BIT(13)
+#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK                BIT(11)
+#define SYSTEM_CTRL_PCI_RETRY_OFF                     BIT(7)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_MASK    (0x3 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_1       (0x0 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_2       (0x1 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_4       (0x2 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_8       (0x3 << 4)
+#define SYSTEM_CTRL_CRT_TRISTATE                      BIT(3)
+#define SYSTEM_CTRL_PCIMEM_TRISTATE                   BIT(2)
+#define SYSTEM_CTRL_LOCALMEM_TRISTATE                 BIT(1)
+#define SYSTEM_CTRL_PANEL_TRISTATE                    BIT(0)
 
 #define MISC_CTRL                                     0x000004
-#define MISC_CTRL_DRAM_RERESH_COUNT                   27:27
-#define MISC_CTRL_DRAM_RERESH_COUNT_1ROW              0
-#define MISC_CTRL_DRAM_RERESH_COUNT_3ROW              1
-#define MISC_CTRL_DRAM_REFRESH_TIME                   26:25
-#define MISC_CTRL_DRAM_REFRESH_TIME_8                 0
-#define MISC_CTRL_DRAM_REFRESH_TIME_16                1
-#define MISC_CTRL_DRAM_REFRESH_TIME_32                2
-#define MISC_CTRL_DRAM_REFRESH_TIME_64                3
-#define MISC_CTRL_INT_OUTPUT                          24:24
-#define MISC_CTRL_INT_OUTPUT_NORMAL                   0
-#define MISC_CTRL_INT_OUTPUT_INVERT                   1
-#define MISC_CTRL_PLL_CLK_COUNT                       23:23
-#define MISC_CTRL_PLL_CLK_COUNT_OFF                   0
-#define MISC_CTRL_PLL_CLK_COUNT_ON                    1
-#define MISC_CTRL_DAC_POWER                           20:20
-#define MISC_CTRL_DAC_POWER_ON                        0
-#define MISC_CTRL_DAC_POWER_OFF                       1
-#define MISC_CTRL_CLK_SELECT                          16:16
-#define MISC_CTRL_CLK_SELECT_OSC                      0
-#define MISC_CTRL_CLK_SELECT_TESTCLK                  1
-#define MISC_CTRL_DRAM_COLUMN_SIZE                    15:14
-#define MISC_CTRL_DRAM_COLUMN_SIZE_256                0
-#define MISC_CTRL_DRAM_COLUMN_SIZE_512                1
-#define MISC_CTRL_DRAM_COLUMN_SIZE_1024               2
-#define MISC_CTRL_LOCALMEM_SIZE                       13:12
-#define MISC_CTRL_LOCALMEM_SIZE_8M                    3
-#define MISC_CTRL_LOCALMEM_SIZE_16M                   0
-#define MISC_CTRL_LOCALMEM_SIZE_32M                   1
-#define MISC_CTRL_LOCALMEM_SIZE_64M                   2
-#define MISC_CTRL_DRAM_TWTR                           11:11
-#define MISC_CTRL_DRAM_TWTR_2CLK                      0
-#define MISC_CTRL_DRAM_TWTR_1CLK                      1
-#define MISC_CTRL_DRAM_TWR                            10:10
-#define MISC_CTRL_DRAM_TWR_3CLK                       0
-#define MISC_CTRL_DRAM_TWR_2CLK                       1
-#define MISC_CTRL_DRAM_TRP                            9:9
-#define MISC_CTRL_DRAM_TRP_3CLK                       0
-#define MISC_CTRL_DRAM_TRP_4CLK                       1
-#define MISC_CTRL_DRAM_TRFC                           8:8
-#define MISC_CTRL_DRAM_TRFC_12CLK                     0
-#define MISC_CTRL_DRAM_TRFC_14CLK                     1
-#define MISC_CTRL_DRAM_TRAS                           7:7
-#define MISC_CTRL_DRAM_TRAS_7CLK                      0
-#define MISC_CTRL_DRAM_TRAS_8CLK                      1
-#define MISC_CTRL_LOCALMEM_RESET                      6:6
-#define MISC_CTRL_LOCALMEM_RESET_RESET                0
-#define MISC_CTRL_LOCALMEM_RESET_NORMAL               1
-#define MISC_CTRL_LOCALMEM_STATE                      5:5
-#define MISC_CTRL_LOCALMEM_STATE_ACTIVE               0
-#define MISC_CTRL_LOCALMEM_STATE_INACTIVE             1
-#define MISC_CTRL_CPU_CAS_LATENCY                     4:4
-#define MISC_CTRL_CPU_CAS_LATENCY_2CLK                0
-#define MISC_CTRL_CPU_CAS_LATENCY_3CLK                1
-#define MISC_CTRL_DLL                                 3:3
-#define MISC_CTRL_DLL_ON                              0
-#define MISC_CTRL_DLL_OFF                             1
-#define MISC_CTRL_DRAM_OUTPUT                         2:2
-#define MISC_CTRL_DRAM_OUTPUT_LOW                     0
-#define MISC_CTRL_DRAM_OUTPUT_HIGH                    1
-#define MISC_CTRL_LOCALMEM_BUS_SIZE                   1:1
-#define MISC_CTRL_LOCALMEM_BUS_SIZE_32                0
-#define MISC_CTRL_LOCALMEM_BUS_SIZE_64                1
-#define MISC_CTRL_EMBEDDED_LOCALMEM                   0:0
-#define MISC_CTRL_EMBEDDED_LOCALMEM_ON                0
-#define MISC_CTRL_EMBEDDED_LOCALMEM_OFF               1
+#define MISC_CTRL_DRAM_RERESH_COUNT                   BIT(27)
+#define MISC_CTRL_DRAM_REFRESH_TIME_MASK              (0x3 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_8                 (0x0 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_16                (0x1 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_32                (0x2 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_64                (0x3 << 25)
+#define MISC_CTRL_INT_OUTPUT_INVERT                   BIT(24)
+#define MISC_CTRL_PLL_CLK_COUNT                       BIT(23)
+#define MISC_CTRL_DAC_POWER_OFF                       BIT(20)
+#define MISC_CTRL_CLK_SELECT_TESTCLK                  BIT(16)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_MASK               (0x3 << 14)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_256                (0x0 << 14)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_512                (0x1 << 14)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_1024               (0x2 << 14)
+#define MISC_CTRL_LOCALMEM_SIZE_MASK                  (0x3 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_8M                    (0x3 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_16M                   (0x0 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_32M                   (0x1 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_64M                   (0x2 << 12)
+#define MISC_CTRL_DRAM_TWTR                           BIT(11)
+#define MISC_CTRL_DRAM_TWR                            BIT(10)
+#define MISC_CTRL_DRAM_TRP                            BIT(9)
+#define MISC_CTRL_DRAM_TRFC                           BIT(8)
+#define MISC_CTRL_DRAM_TRAS                           BIT(7)
+#define MISC_CTRL_LOCALMEM_RESET                      BIT(6)
+#define MISC_CTRL_LOCALMEM_STATE_INACTIVE             BIT(5)
+#define MISC_CTRL_CPU_CAS_LATENCY                     BIT(4)
+#define MISC_CTRL_DLL_OFF                             BIT(3)
+#define MISC_CTRL_DRAM_OUTPUT_HIGH                    BIT(2)
+#define MISC_CTRL_LOCALMEM_BUS_SIZE                   BIT(1)
+#define MISC_CTRL_EMBEDDED_LOCALMEM_OFF               BIT(0)
 
 #define GPIO_MUX                                      0x000008
-#define GPIO_MUX_31                                   31:31
-#define GPIO_MUX_31_GPIO                              0
-#define GPIO_MUX_31_I2C                               1
-#define GPIO_MUX_30                                   30:30
-#define GPIO_MUX_30_GPIO                              0
-#define GPIO_MUX_30_I2C                               1
-#define GPIO_MUX_29                                   29:29
-#define GPIO_MUX_29_GPIO                              0
-#define GPIO_MUX_29_SSP1                              1
-#define GPIO_MUX_28                                   28:28
-#define GPIO_MUX_28_GPIO                              0
-#define GPIO_MUX_28_SSP1                              1
-#define GPIO_MUX_27                                   27:27
-#define GPIO_MUX_27_GPIO                              0
-#define GPIO_MUX_27_SSP1                              1
-#define GPIO_MUX_26                                   26:26
-#define GPIO_MUX_26_GPIO                              0
-#define GPIO_MUX_26_SSP1                              1
-#define GPIO_MUX_25                                   25:25
-#define GPIO_MUX_25_GPIO                              0
-#define GPIO_MUX_25_SSP1                              1
-#define GPIO_MUX_24                                   24:24
-#define GPIO_MUX_24_GPIO                              0
-#define GPIO_MUX_24_SSP0                              1
-#define GPIO_MUX_23                                   23:23
-#define GPIO_MUX_23_GPIO                              0
-#define GPIO_MUX_23_SSP0                              1
-#define GPIO_MUX_22                                   22:22
-#define GPIO_MUX_22_GPIO                              0
-#define GPIO_MUX_22_SSP0                              1
-#define GPIO_MUX_21                                   21:21
-#define GPIO_MUX_21_GPIO                              0
-#define GPIO_MUX_21_SSP0                              1
-#define GPIO_MUX_20                                   20:20
-#define GPIO_MUX_20_GPIO                              0
-#define GPIO_MUX_20_SSP0                              1
-#define GPIO_MUX_19                                   19:19
-#define GPIO_MUX_19_GPIO                              0
-#define GPIO_MUX_19_PWM                               1
-#define GPIO_MUX_18                                   18:18
-#define GPIO_MUX_18_GPIO                              0
-#define GPIO_MUX_18_PWM                               1
-#define GPIO_MUX_17                                   17:17
-#define GPIO_MUX_17_GPIO                              0
-#define GPIO_MUX_17_PWM                               1
-#define GPIO_MUX_16                                   16:16
-#define GPIO_MUX_16_GPIO_ZVPORT                       0
-#define GPIO_MUX_16_TEST_DATA                         1
-#define GPIO_MUX_15                                   15:15
-#define GPIO_MUX_15_GPIO_ZVPORT                       0
-#define GPIO_MUX_15_TEST_DATA                         1
-#define GPIO_MUX_14                                   14:14
-#define GPIO_MUX_14_GPIO_ZVPORT                       0
-#define GPIO_MUX_14_TEST_DATA                         1
-#define GPIO_MUX_13                                   13:13
-#define GPIO_MUX_13_GPIO_ZVPORT                       0
-#define GPIO_MUX_13_TEST_DATA                         1
-#define GPIO_MUX_12                                   12:12
-#define GPIO_MUX_12_GPIO_ZVPORT                       0
-#define GPIO_MUX_12_TEST_DATA                         1
-#define GPIO_MUX_11                                   11:11
-#define GPIO_MUX_11_GPIO_ZVPORT                       0
-#define GPIO_MUX_11_TEST_DATA                         1
-#define GPIO_MUX_10                                   10:10
-#define GPIO_MUX_10_GPIO_ZVPORT                       0
-#define GPIO_MUX_10_TEST_DATA                         1
-#define GPIO_MUX_9                                    9:9
-#define GPIO_MUX_9_GPIO_ZVPORT                        0
-#define GPIO_MUX_9_TEST_DATA                          1
-#define GPIO_MUX_8                                    8:8
-#define GPIO_MUX_8_GPIO_ZVPORT                        0
-#define GPIO_MUX_8_TEST_DATA                          1
-#define GPIO_MUX_7                                    7:7
-#define GPIO_MUX_7_GPIO_ZVPORT                        0
-#define GPIO_MUX_7_TEST_DATA                          1
-#define GPIO_MUX_6                                    6:6
-#define GPIO_MUX_6_GPIO_ZVPORT                        0
-#define GPIO_MUX_6_TEST_DATA                          1
-#define GPIO_MUX_5                                    5:5
-#define GPIO_MUX_5_GPIO_ZVPORT                        0
-#define GPIO_MUX_5_TEST_DATA                          1
-#define GPIO_MUX_4                                    4:4
-#define GPIO_MUX_4_GPIO_ZVPORT                        0
-#define GPIO_MUX_4_TEST_DATA                          1
-#define GPIO_MUX_3                                    3:3
-#define GPIO_MUX_3_GPIO_ZVPORT                        0
-#define GPIO_MUX_3_TEST_DATA                          1
-#define GPIO_MUX_2                                    2:2
-#define GPIO_MUX_2_GPIO_ZVPORT                        0
-#define GPIO_MUX_2_TEST_DATA                          1
-#define GPIO_MUX_1                                    1:1
-#define GPIO_MUX_1_GPIO_ZVPORT                        0
-#define GPIO_MUX_1_TEST_DATA                          1
-#define GPIO_MUX_0                                    0:0
-#define GPIO_MUX_0_GPIO_ZVPORT                        0
-#define GPIO_MUX_0_TEST_DATA                          1
+#define GPIO_MUX_31                                   BIT(31)
+#define GPIO_MUX_30                                   BIT(30)
+#define GPIO_MUX_29                                   BIT(29)
+#define GPIO_MUX_28                                   BIT(28)
+#define GPIO_MUX_27                                   BIT(27)
+#define GPIO_MUX_26                                   BIT(26)
+#define GPIO_MUX_25                                   BIT(25)
+#define GPIO_MUX_24                                   BIT(24)
+#define GPIO_MUX_23                                   BIT(23)
+#define GPIO_MUX_22                                   BIT(22)
+#define GPIO_MUX_21                                   BIT(21)
+#define GPIO_MUX_20                                   BIT(20)
+#define GPIO_MUX_19                                   BIT(19)
+#define GPIO_MUX_18                                   BIT(18)
+#define GPIO_MUX_17                                   BIT(17)
+#define GPIO_MUX_16                                   BIT(16)
+#define GPIO_MUX_15                                   BIT(15)
+#define GPIO_MUX_14                                   BIT(14)
+#define GPIO_MUX_13                                   BIT(13)
+#define GPIO_MUX_12                                   BIT(12)
+#define GPIO_MUX_11                                   BIT(11)
+#define GPIO_MUX_10                                   BIT(10)
+#define GPIO_MUX_9                                    BIT(9)
+#define GPIO_MUX_8                                    BIT(8)
+#define GPIO_MUX_7                                    BIT(7)
+#define GPIO_MUX_6                                    BIT(6)
+#define GPIO_MUX_5                                    BIT(5)
+#define GPIO_MUX_4                                    BIT(4)
+#define GPIO_MUX_3                                    BIT(3)
+#define GPIO_MUX_2                                    BIT(2)
+#define GPIO_MUX_1                                    BIT(1)
+#define GPIO_MUX_0                                    BIT(0)
 
 #define LOCALMEM_ARBITRATION                          0x00000C
-#define LOCALMEM_ARBITRATION_ROTATE                   28:28
-#define LOCALMEM_ARBITRATION_ROTATE_OFF               0
-#define LOCALMEM_ARBITRATION_ROTATE_ON                1
-#define LOCALMEM_ARBITRATION_VGA                      26:24
-#define LOCALMEM_ARBITRATION_VGA_OFF                  0
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_1           1
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_2           2
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_3           3
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_4           4
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_5           5
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_6           6
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_7           7
-#define LOCALMEM_ARBITRATION_DMA                      22:20
-#define LOCALMEM_ARBITRATION_DMA_OFF                  0
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_1           1
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_2           2
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_3           3
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_4           4
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_5           5
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_6           6
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_7           7
-#define LOCALMEM_ARBITRATION_ZVPORT1                  18:16
-#define LOCALMEM_ARBITRATION_ZVPORT1_OFF              0
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_1       1
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_2       2
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_3       3
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_4       4
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_5       5
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_6       6
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_7       7
-#define LOCALMEM_ARBITRATION_ZVPORT0                  14:12
-#define LOCALMEM_ARBITRATION_ZVPORT0_OFF              0
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_1       1
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_2       2
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_3       3
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_4       4
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_5       5
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_6       6
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_7       7
-#define LOCALMEM_ARBITRATION_VIDEO                    10:8
-#define LOCALMEM_ARBITRATION_VIDEO_OFF                0
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_1         1
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_2         2
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_3         3
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_4         4
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_5         5
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_6         6
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_7         7
-#define LOCALMEM_ARBITRATION_PANEL                    6:4
-#define LOCALMEM_ARBITRATION_PANEL_OFF                0
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_1         1
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_2         2
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_3         3
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_4         4
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_5         5
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_6         6
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_7         7
-#define LOCALMEM_ARBITRATION_CRT                      2:0
-#define LOCALMEM_ARBITRATION_CRT_OFF                  0
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_1           1
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_2           2
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_3           3
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_4           4
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_5           5
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_6           6
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_7           7
+#define LOCALMEM_ARBITRATION_ROTATE                   BIT(28)
+#define LOCALMEM_ARBITRATION_VGA_MASK                 (0x7 << 24)
+#define LOCALMEM_ARBITRATION_VGA_OFF                  (0x0 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_1           (0x1 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_2           (0x2 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_3           (0x3 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_4           (0x4 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_5           (0x5 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_6           (0x6 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_7           (0x7 << 24)
+#define LOCALMEM_ARBITRATION_DMA_MASK                 (0x7 << 20)
+#define LOCALMEM_ARBITRATION_DMA_OFF                  (0x0 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_1           (0x1 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_2           (0x2 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_3           (0x3 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_4           (0x4 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_5           (0x5 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_6           (0x6 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_7           (0x7 << 20)
+#define LOCALMEM_ARBITRATION_ZVPORT1_MASK             (0x7 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_OFF              (0x0 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_1       (0x1 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_2       (0x2 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_3       (0x3 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_4       (0x4 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_5       (0x5 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_6       (0x6 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_7       (0x7 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT0_MASK             (0x7 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_OFF              (0x0 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_1       (0x1 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_2       (0x2 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_3       (0x3 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_4       (0x4 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_5       (0x5 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_6       (0x6 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_7       (0x7 << 12)
+#define LOCALMEM_ARBITRATION_VIDEO_MASK               (0x7 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_OFF                (0x0 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_1         (0x1 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_2         (0x2 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_3         (0x3 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_4         (0x4 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_5         (0x5 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_6         (0x6 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_7         (0x7 << 8)
+#define LOCALMEM_ARBITRATION_PANEL_MASK               (0x7 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_OFF                (0x0 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_1         (0x1 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_2         (0x2 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_3         (0x3 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_4         (0x4 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_5         (0x5 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_6         (0x6 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_7         (0x7 << 4)
+#define LOCALMEM_ARBITRATION_CRT_MASK                 0x7
+#define LOCALMEM_ARBITRATION_CRT_OFF                  0x0
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_1           0x1
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_2           0x2
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_3           0x3
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_4           0x4
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_5           0x5
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_6           0x6
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_7           0x7
 
 #define PCIMEM_ARBITRATION                            0x000010
-#define PCIMEM_ARBITRATION_ROTATE                     28:28
-#define PCIMEM_ARBITRATION_ROTATE_OFF                 0
-#define PCIMEM_ARBITRATION_ROTATE_ON                  1
-#define PCIMEM_ARBITRATION_VGA                        26:24
-#define PCIMEM_ARBITRATION_VGA_OFF                    0
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_1             1
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_2             2
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_3             3
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_4             4
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_5             5
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_6             6
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_7             7
-#define PCIMEM_ARBITRATION_DMA                        22:20
-#define PCIMEM_ARBITRATION_DMA_OFF                    0
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_1             1
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_2             2
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_3             3
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_4             4
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_5             5
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_6             6
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_7             7
-#define PCIMEM_ARBITRATION_ZVPORT1                    18:16
-#define PCIMEM_ARBITRATION_ZVPORT1_OFF                0
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_1         1
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_2         2
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_3         3
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_4         4
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_5         5
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_6         6
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_7         7
-#define PCIMEM_ARBITRATION_ZVPORT0                    14:12
-#define PCIMEM_ARBITRATION_ZVPORT0_OFF                0
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_1         1
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_2         2
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_3         3
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_4         4
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_5         5
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_6         6
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_7         7
-#define PCIMEM_ARBITRATION_VIDEO                      10:8
-#define PCIMEM_ARBITRATION_VIDEO_OFF                  0
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_1           1
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_2           2
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_3           3
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_4           4
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_5           5
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_6           6
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_7           7
-#define PCIMEM_ARBITRATION_PANEL                      6:4
-#define PCIMEM_ARBITRATION_PANEL_OFF                  0
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_1           1
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_2           2
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_3           3
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_4           4
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_5           5
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_6           6
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_7           7
-#define PCIMEM_ARBITRATION_CRT                        2:0
-#define PCIMEM_ARBITRATION_CRT_OFF                    0
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_1             1
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_2             2
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_3             3
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_4             4
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_5             5
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_6             6
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_7             7
+#define PCIMEM_ARBITRATION_ROTATE                     BIT(28)
+#define PCIMEM_ARBITRATION_VGA_MASK                   (0x7 << 24)
+#define PCIMEM_ARBITRATION_VGA_OFF                    (0x0 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_1             (0x1 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_2             (0x2 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_3             (0x3 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_4             (0x4 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_5             (0x5 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_6             (0x6 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_7             (0x7 << 24)
+#define PCIMEM_ARBITRATION_DMA_MASK                   (0x7 << 20)
+#define PCIMEM_ARBITRATION_DMA_OFF                    (0x0 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_1             (0x1 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_2             (0x2 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_3             (0x3 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_4             (0x4 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_5             (0x5 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_6             (0x6 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_7             (0x7 << 20)
+#define PCIMEM_ARBITRATION_ZVPORT1_MASK               (0x7 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_OFF                (0x0 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_1         (0x1 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_2         (0x2 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_3         (0x3 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_4         (0x4 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_5         (0x5 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_6         (0x6 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_7         (0x7 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT0_MASK               (0x7 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_OFF                (0x0 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_1         (0x1 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_2         (0x2 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_3         (0x3 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_4         (0x4 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_5         (0x5 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_6         (0x6 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_7         (0x7 << 12)
+#define PCIMEM_ARBITRATION_VIDEO_MASK                 (0x7 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_OFF                  (0x0 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_1           (0x1 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_2           (0x2 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_3           (0x3 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_4           (0x4 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_5           (0x5 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_6           (0x6 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_7           (0x7 << 8)
+#define PCIMEM_ARBITRATION_PANEL_MASK                 (0x7 << 4)
+#define PCIMEM_ARBITRATION_PANEL_OFF                  (0x0 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_1           (0x1 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_2           (0x2 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_3           (0x3 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_4           (0x4 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_5           (0x5 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_6           (0x6 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_7           (0x7 << 4)
+#define PCIMEM_ARBITRATION_CRT_MASK                   0x7
+#define PCIMEM_ARBITRATION_CRT_OFF                    0x0
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_1             0x1
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_2             0x2
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_3             0x3
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_4             0x4
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_5             0x5
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_6             0x6
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_7             0x7
 
 #define RAW_INT                                       0x000020
-#define RAW_INT_ZVPORT1_VSYNC                         4:4
-#define RAW_INT_ZVPORT1_VSYNC_INACTIVE                0
-#define RAW_INT_ZVPORT1_VSYNC_ACTIVE                  1
-#define RAW_INT_ZVPORT1_VSYNC_CLEAR                   1
-#define RAW_INT_ZVPORT0_VSYNC                         3:3
-#define RAW_INT_ZVPORT0_VSYNC_INACTIVE                0
-#define RAW_INT_ZVPORT0_VSYNC_ACTIVE                  1
-#define RAW_INT_ZVPORT0_VSYNC_CLEAR                   1
-#define RAW_INT_CRT_VSYNC                             2:2
-#define RAW_INT_CRT_VSYNC_INACTIVE                    0
-#define RAW_INT_CRT_VSYNC_ACTIVE                      1
-#define RAW_INT_CRT_VSYNC_CLEAR                       1
-#define RAW_INT_PANEL_VSYNC                           1:1
-#define RAW_INT_PANEL_VSYNC_INACTIVE                  0
-#define RAW_INT_PANEL_VSYNC_ACTIVE                    1
-#define RAW_INT_PANEL_VSYNC_CLEAR                     1
-#define RAW_INT_VGA_VSYNC                             0:0
-#define RAW_INT_VGA_VSYNC_INACTIVE                    0
-#define RAW_INT_VGA_VSYNC_ACTIVE                      1
-#define RAW_INT_VGA_VSYNC_CLEAR                       1
+#define RAW_INT_ZVPORT1_VSYNC                         BIT(4)
+#define RAW_INT_ZVPORT0_VSYNC                         BIT(3)
+#define RAW_INT_CRT_VSYNC                             BIT(2)
+#define RAW_INT_PANEL_VSYNC                           BIT(1)
+#define RAW_INT_VGA_VSYNC                             BIT(0)
 
 #define INT_STATUS                                    0x000024
-#define INT_STATUS_GPIO31                             31:31
-#define INT_STATUS_GPIO31_INACTIVE                    0
-#define INT_STATUS_GPIO31_ACTIVE                      1
-#define INT_STATUS_GPIO30                             30:30
-#define INT_STATUS_GPIO30_INACTIVE                    0
-#define INT_STATUS_GPIO30_ACTIVE                      1
-#define INT_STATUS_GPIO29                             29:29
-#define INT_STATUS_GPIO29_INACTIVE                    0
-#define INT_STATUS_GPIO29_ACTIVE                      1
-#define INT_STATUS_GPIO28                             28:28
-#define INT_STATUS_GPIO28_INACTIVE                    0
-#define INT_STATUS_GPIO28_ACTIVE                      1
-#define INT_STATUS_GPIO27                             27:27
-#define INT_STATUS_GPIO27_INACTIVE                    0
-#define INT_STATUS_GPIO27_ACTIVE                      1
-#define INT_STATUS_GPIO26                             26:26
-#define INT_STATUS_GPIO26_INACTIVE                    0
-#define INT_STATUS_GPIO26_ACTIVE                      1
-#define INT_STATUS_GPIO25                             25:25
-#define INT_STATUS_GPIO25_INACTIVE                    0
-#define INT_STATUS_GPIO25_ACTIVE                      1
-#define INT_STATUS_I2C                                12:12
-#define INT_STATUS_I2C_INACTIVE                       0
-#define INT_STATUS_I2C_ACTIVE                         1
-#define INT_STATUS_PWM                                11:11
-#define INT_STATUS_PWM_INACTIVE                       0
-#define INT_STATUS_PWM_ACTIVE                         1
-#define INT_STATUS_DMA1                               10:10
-#define INT_STATUS_DMA1_INACTIVE                      0
-#define INT_STATUS_DMA1_ACTIVE                        1
-#define INT_STATUS_DMA0                               9:9
-#define INT_STATUS_DMA0_INACTIVE                      0
-#define INT_STATUS_DMA0_ACTIVE                        1
-#define INT_STATUS_PCI                                8:8
-#define INT_STATUS_PCI_INACTIVE                       0
-#define INT_STATUS_PCI_ACTIVE                         1
-#define INT_STATUS_SSP1                               7:7
-#define INT_STATUS_SSP1_INACTIVE                      0
-#define INT_STATUS_SSP1_ACTIVE                        1
-#define INT_STATUS_SSP0                               6:6
-#define INT_STATUS_SSP0_INACTIVE                      0
-#define INT_STATUS_SSP0_ACTIVE                        1
-#define INT_STATUS_DE                                 5:5
-#define INT_STATUS_DE_INACTIVE                        0
-#define INT_STATUS_DE_ACTIVE                          1
-#define INT_STATUS_ZVPORT1_VSYNC                      4:4
-#define INT_STATUS_ZVPORT1_VSYNC_INACTIVE             0
-#define INT_STATUS_ZVPORT1_VSYNC_ACTIVE               1
-#define INT_STATUS_ZVPORT0_VSYNC                      3:3
-#define INT_STATUS_ZVPORT0_VSYNC_INACTIVE             0
-#define INT_STATUS_ZVPORT0_VSYNC_ACTIVE               1
-#define INT_STATUS_CRT_VSYNC                          2:2
-#define INT_STATUS_CRT_VSYNC_INACTIVE                 0
-#define INT_STATUS_CRT_VSYNC_ACTIVE                   1
-#define INT_STATUS_PANEL_VSYNC                        1:1
-#define INT_STATUS_PANEL_VSYNC_INACTIVE               0
-#define INT_STATUS_PANEL_VSYNC_ACTIVE                 1
-#define INT_STATUS_VGA_VSYNC                          0:0
-#define INT_STATUS_VGA_VSYNC_INACTIVE                 0
-#define INT_STATUS_VGA_VSYNC_ACTIVE                   1
+#define INT_STATUS_GPIO31                             BIT(31)
+#define INT_STATUS_GPIO30                             BIT(30)
+#define INT_STATUS_GPIO29                             BIT(29)
+#define INT_STATUS_GPIO28                             BIT(28)
+#define INT_STATUS_GPIO27                             BIT(27)
+#define INT_STATUS_GPIO26                             BIT(26)
+#define INT_STATUS_GPIO25                             BIT(25)
+#define INT_STATUS_I2C                                BIT(12)
+#define INT_STATUS_PWM                                BIT(11)
+#define INT_STATUS_DMA1                               BIT(10)
+#define INT_STATUS_DMA0                               BIT(9)
+#define INT_STATUS_PCI                                BIT(8)
+#define INT_STATUS_SSP1                               BIT(7)
+#define INT_STATUS_SSP0                               BIT(6)
+#define INT_STATUS_DE                                 BIT(5)
+#define INT_STATUS_ZVPORT1_VSYNC                      BIT(4)
+#define INT_STATUS_ZVPORT0_VSYNC                      BIT(3)
+#define INT_STATUS_CRT_VSYNC                          BIT(2)
+#define INT_STATUS_PANEL_VSYNC                        BIT(1)
+#define INT_STATUS_VGA_VSYNC                          BIT(0)
 
 #define INT_MASK                                      0x000028
-#define INT_MASK_GPIO31                               31:31
-#define INT_MASK_GPIO31_DISABLE                       0
-#define INT_MASK_GPIO31_ENABLE                        1
-#define INT_MASK_GPIO30                               30:30
-#define INT_MASK_GPIO30_DISABLE                       0
-#define INT_MASK_GPIO30_ENABLE                        1
-#define INT_MASK_GPIO29                               29:29
-#define INT_MASK_GPIO29_DISABLE                       0
-#define INT_MASK_GPIO29_ENABLE                        1
-#define INT_MASK_GPIO28                               28:28
-#define INT_MASK_GPIO28_DISABLE                       0
-#define INT_MASK_GPIO28_ENABLE                        1
-#define INT_MASK_GPIO27                               27:27
-#define INT_MASK_GPIO27_DISABLE                       0
-#define INT_MASK_GPIO27_ENABLE                        1
-#define INT_MASK_GPIO26                               26:26
-#define INT_MASK_GPIO26_DISABLE                       0
-#define INT_MASK_GPIO26_ENABLE                        1
-#define INT_MASK_GPIO25                               25:25
-#define INT_MASK_GPIO25_DISABLE                       0
-#define INT_MASK_GPIO25_ENABLE                        1
-#define INT_MASK_I2C                                  12:12
-#define INT_MASK_I2C_DISABLE                          0
-#define INT_MASK_I2C_ENABLE                           1
-#define INT_MASK_PWM                                  11:11
-#define INT_MASK_PWM_DISABLE                          0
-#define INT_MASK_PWM_ENABLE                           1
-#define INT_MASK_DMA1                                 10:10
-#define INT_MASK_DMA1_DISABLE                         0
-#define INT_MASK_DMA1_ENABLE                          1
-#define INT_MASK_DMA                                  9:9
-#define INT_MASK_DMA_DISABLE                          0
-#define INT_MASK_DMA_ENABLE                           1
-#define INT_MASK_PCI                                  8:8
-#define INT_MASK_PCI_DISABLE                          0
-#define INT_MASK_PCI_ENABLE                           1
-#define INT_MASK_SSP1                                 7:7
-#define INT_MASK_SSP1_DISABLE                         0
-#define INT_MASK_SSP1_ENABLE                          1
-#define INT_MASK_SSP0                                 6:6
-#define INT_MASK_SSP0_DISABLE                         0
-#define INT_MASK_SSP0_ENABLE                          1
-#define INT_MASK_DE                                   5:5
-#define INT_MASK_DE_DISABLE                           0
-#define INT_MASK_DE_ENABLE                            1
-#define INT_MASK_ZVPORT1_VSYNC                        4:4
-#define INT_MASK_ZVPORT1_VSYNC_DISABLE                0
-#define INT_MASK_ZVPORT1_VSYNC_ENABLE                 1
-#define INT_MASK_ZVPORT0_VSYNC                        3:3
-#define INT_MASK_ZVPORT0_VSYNC_DISABLE                0
-#define INT_MASK_ZVPORT0_VSYNC_ENABLE                 1
-#define INT_MASK_CRT_VSYNC                            2:2
-#define INT_MASK_CRT_VSYNC_DISABLE                    0
-#define INT_MASK_CRT_VSYNC_ENABLE                     1
-#define INT_MASK_PANEL_VSYNC                          1:1
-#define INT_MASK_PANEL_VSYNC_DISABLE                  0
-#define INT_MASK_PANEL_VSYNC_ENABLE                   1
-#define INT_MASK_VGA_VSYNC                            0:0
-#define INT_MASK_VGA_VSYNC_DISABLE                    0
-#define INT_MASK_VGA_VSYNC_ENABLE                     1
+#define INT_MASK_GPIO31                               BIT(31)
+#define INT_MASK_GPIO30                               BIT(30)
+#define INT_MASK_GPIO29                               BIT(29)
+#define INT_MASK_GPIO28                               BIT(28)
+#define INT_MASK_GPIO27                               BIT(27)
+#define INT_MASK_GPIO26                               BIT(26)
+#define INT_MASK_GPIO25                               BIT(25)
+#define INT_MASK_I2C                                  BIT(12)
+#define INT_MASK_PWM                                  BIT(11)
+#define INT_MASK_DMA1                                 BIT(10)
+#define INT_MASK_DMA                                  BIT(9)
+#define INT_MASK_PCI                                  BIT(8)
+#define INT_MASK_SSP1                                 BIT(7)
+#define INT_MASK_SSP0                                 BIT(6)
+#define INT_MASK_DE                                   BIT(5)
+#define INT_MASK_ZVPORT1_VSYNC                        BIT(4)
+#define INT_MASK_ZVPORT0_VSYNC                        BIT(3)
+#define INT_MASK_CRT_VSYNC                            BIT(2)
+#define INT_MASK_PANEL_VSYNC                          BIT(1)
+#define INT_MASK_VGA_VSYNC                            BIT(0)
 
 #define CURRENT_GATE                                  0x000040
-#define CURRENT_GATE_MCLK                             15:14
+#define CURRENT_GATE_MCLK_MASK                        (0x3 << 14)
 #ifdef VALIDATION_CHIP
-    #define CURRENT_GATE_MCLK_112MHZ                      0
-    #define CURRENT_GATE_MCLK_84MHZ                       1
-    #define CURRENT_GATE_MCLK_56MHZ                       2
-    #define CURRENT_GATE_MCLK_42MHZ                       3
+    #define CURRENT_GATE_MCLK_112MHZ                  (0x0 << 14)
+    #define CURRENT_GATE_MCLK_84MHZ                   (0x1 << 14)
+    #define CURRENT_GATE_MCLK_56MHZ                   (0x2 << 14)
+    #define CURRENT_GATE_MCLK_42MHZ                   (0x3 << 14)
 #else
-    #define CURRENT_GATE_MCLK_DIV_3                       0
-    #define CURRENT_GATE_MCLK_DIV_4                       1
-    #define CURRENT_GATE_MCLK_DIV_6                       2
-    #define CURRENT_GATE_MCLK_DIV_8                       3
+    #define CURRENT_GATE_MCLK_DIV_3                   (0x0 << 14)
+    #define CURRENT_GATE_MCLK_DIV_4                   (0x1 << 14)
+    #define CURRENT_GATE_MCLK_DIV_6                   (0x2 << 14)
+    #define CURRENT_GATE_MCLK_DIV_8                   (0x3 << 14)
 #endif
-#define CURRENT_GATE_M2XCLK                           13:12
+#define CURRENT_GATE_M2XCLK_MASK                      (0x3 << 12)
 #ifdef VALIDATION_CHIP
-    #define CURRENT_GATE_M2XCLK_336MHZ                    0
-    #define CURRENT_GATE_M2XCLK_168MHZ                    1
-    #define CURRENT_GATE_M2XCLK_112MHZ                    2
-    #define CURRENT_GATE_M2XCLK_84MHZ                     3
+    #define CURRENT_GATE_M2XCLK_336MHZ                (0x0 << 12)
+    #define CURRENT_GATE_M2XCLK_168MHZ                (0x1 << 12)
+    #define CURRENT_GATE_M2XCLK_112MHZ                (0x2 << 12)
+    #define CURRENT_GATE_M2XCLK_84MHZ                 (0x3 << 12)
 #else
-    #define CURRENT_GATE_M2XCLK_DIV_1                     0
-    #define CURRENT_GATE_M2XCLK_DIV_2                     1
-    #define CURRENT_GATE_M2XCLK_DIV_3                     2
-    #define CURRENT_GATE_M2XCLK_DIV_4                     3
+    #define CURRENT_GATE_M2XCLK_DIV_1                 (0x0 << 12)
+    #define CURRENT_GATE_M2XCLK_DIV_2                 (0x1 << 12)
+    #define CURRENT_GATE_M2XCLK_DIV_3                 (0x2 << 12)
+    #define CURRENT_GATE_M2XCLK_DIV_4                 (0x3 << 12)
 #endif
-#define CURRENT_GATE_VGA                              10:10
-#define CURRENT_GATE_VGA_OFF                          0
-#define CURRENT_GATE_VGA_ON                           1
-#define CURRENT_GATE_PWM                              9:9
-#define CURRENT_GATE_PWM_OFF                          0
-#define CURRENT_GATE_PWM_ON                           1
-#define CURRENT_GATE_I2C                              8:8
-#define CURRENT_GATE_I2C_OFF                          0
-#define CURRENT_GATE_I2C_ON                           1
-#define CURRENT_GATE_SSP                              7:7
-#define CURRENT_GATE_SSP_OFF                          0
-#define CURRENT_GATE_SSP_ON                           1
-#define CURRENT_GATE_GPIO                             6:6
-#define CURRENT_GATE_GPIO_OFF                         0
-#define CURRENT_GATE_GPIO_ON                          1
-#define CURRENT_GATE_ZVPORT                           5:5
-#define CURRENT_GATE_ZVPORT_OFF                       0
-#define CURRENT_GATE_ZVPORT_ON                        1
-#define CURRENT_GATE_CSC                              4:4
-#define CURRENT_GATE_CSC_OFF                          0
-#define CURRENT_GATE_CSC_ON                           1
-#define CURRENT_GATE_DE                               3:3
-#define CURRENT_GATE_DE_OFF                           0
-#define CURRENT_GATE_DE_ON                            1
-#define CURRENT_GATE_DISPLAY                          2:2
-#define CURRENT_GATE_DISPLAY_OFF                      0
-#define CURRENT_GATE_DISPLAY_ON                       1
-#define CURRENT_GATE_LOCALMEM                         1:1
-#define CURRENT_GATE_LOCALMEM_OFF                     0
-#define CURRENT_GATE_LOCALMEM_ON                      1
-#define CURRENT_GATE_DMA                              0:0
-#define CURRENT_GATE_DMA_OFF                          0
-#define CURRENT_GATE_DMA_ON                           1
+#define CURRENT_GATE_VGA                              BIT(10)
+#define CURRENT_GATE_PWM                              BIT(9)
+#define CURRENT_GATE_I2C                              BIT(8)
+#define CURRENT_GATE_SSP                              BIT(7)
+#define CURRENT_GATE_GPIO                             BIT(6)
+#define CURRENT_GATE_ZVPORT                           BIT(5)
+#define CURRENT_GATE_CSC                              BIT(4)
+#define CURRENT_GATE_DE                               BIT(3)
+#define CURRENT_GATE_DISPLAY                          BIT(2)
+#define CURRENT_GATE_LOCALMEM                         BIT(1)
+#define CURRENT_GATE_DMA                              BIT(0)
 
 #define MODE0_GATE                                    0x000044
-#define MODE0_GATE_MCLK                               15:14
-#define MODE0_GATE_MCLK_112MHZ                        0
-#define MODE0_GATE_MCLK_84MHZ                         1
-#define MODE0_GATE_MCLK_56MHZ                         2
-#define MODE0_GATE_MCLK_42MHZ                         3
-#define MODE0_GATE_M2XCLK                             13:12
-#define MODE0_GATE_M2XCLK_336MHZ                      0
-#define MODE0_GATE_M2XCLK_168MHZ                      1
-#define MODE0_GATE_M2XCLK_112MHZ                      2
-#define MODE0_GATE_M2XCLK_84MHZ                       3
-#define MODE0_GATE_VGA                                10:10
-#define MODE0_GATE_VGA_OFF                            0
-#define MODE0_GATE_VGA_ON                             1
-#define MODE0_GATE_PWM                                9:9
-#define MODE0_GATE_PWM_OFF                            0
-#define MODE0_GATE_PWM_ON                             1
-#define MODE0_GATE_I2C                                8:8
-#define MODE0_GATE_I2C_OFF                            0
-#define MODE0_GATE_I2C_ON                             1
-#define MODE0_GATE_SSP                                7:7
-#define MODE0_GATE_SSP_OFF                            0
-#define MODE0_GATE_SSP_ON                             1
-#define MODE0_GATE_GPIO                               6:6
-#define MODE0_GATE_GPIO_OFF                           0
-#define MODE0_GATE_GPIO_ON                            1
-#define MODE0_GATE_ZVPORT                             5:5
-#define MODE0_GATE_ZVPORT_OFF                         0
-#define MODE0_GATE_ZVPORT_ON                          1
-#define MODE0_GATE_CSC                                4:4
-#define MODE0_GATE_CSC_OFF                            0
-#define MODE0_GATE_CSC_ON                             1
-#define MODE0_GATE_DE                                 3:3
-#define MODE0_GATE_DE_OFF                             0
-#define MODE0_GATE_DE_ON                              1
-#define MODE0_GATE_DISPLAY                            2:2
-#define MODE0_GATE_DISPLAY_OFF                        0
-#define MODE0_GATE_DISPLAY_ON                         1
-#define MODE0_GATE_LOCALMEM                           1:1
-#define MODE0_GATE_LOCALMEM_OFF                       0
-#define MODE0_GATE_LOCALMEM_ON                        1
-#define MODE0_GATE_DMA                                0:0
-#define MODE0_GATE_DMA_OFF                            0
-#define MODE0_GATE_DMA_ON                             1
+#define MODE0_GATE_MCLK_MASK                          (0x3 << 14)
+#define MODE0_GATE_MCLK_112MHZ                        (0x0 << 14)
+#define MODE0_GATE_MCLK_84MHZ                         (0x1 << 14)
+#define MODE0_GATE_MCLK_56MHZ                         (0x2 << 14)
+#define MODE0_GATE_MCLK_42MHZ                         (0x3 << 14)
+#define MODE0_GATE_M2XCLK_MASK                        (0x3 << 12)
+#define MODE0_GATE_M2XCLK_336MHZ                      (0x0 << 12)
+#define MODE0_GATE_M2XCLK_168MHZ                      (0x1 << 12)
+#define MODE0_GATE_M2XCLK_112MHZ                      (0x2 << 12)
+#define MODE0_GATE_M2XCLK_84MHZ                       (0x3 << 12)
+#define MODE0_GATE_VGA                                BIT(10)
+#define MODE0_GATE_PWM                                BIT(9)
+#define MODE0_GATE_I2C                                BIT(8)
+#define MODE0_GATE_SSP                                BIT(7)
+#define MODE0_GATE_GPIO                               BIT(6)
+#define MODE0_GATE_ZVPORT                             BIT(5)
+#define MODE0_GATE_CSC                                BIT(4)
+#define MODE0_GATE_DE                                 BIT(3)
+#define MODE0_GATE_DISPLAY                            BIT(2)
+#define MODE0_GATE_LOCALMEM                           BIT(1)
+#define MODE0_GATE_DMA                                BIT(0)
 
 #define MODE1_GATE                                    0x000048
-#define MODE1_GATE_MCLK                               15:14
-#define MODE1_GATE_MCLK_112MHZ                        0
-#define MODE1_GATE_MCLK_84MHZ                         1
-#define MODE1_GATE_MCLK_56MHZ                         2
-#define MODE1_GATE_MCLK_42MHZ                         3
-#define MODE1_GATE_M2XCLK                             13:12
-#define MODE1_GATE_M2XCLK_336MHZ                      0
-#define MODE1_GATE_M2XCLK_168MHZ                      1
-#define MODE1_GATE_M2XCLK_112MHZ                      2
-#define MODE1_GATE_M2XCLK_84MHZ                       3
-#define MODE1_GATE_VGA                                10:10
-#define MODE1_GATE_VGA_OFF                            0
-#define MODE1_GATE_VGA_ON                             1
-#define MODE1_GATE_PWM                                9:9
-#define MODE1_GATE_PWM_OFF                            0
-#define MODE1_GATE_PWM_ON                             1
-#define MODE1_GATE_I2C                                8:8
-#define MODE1_GATE_I2C_OFF                            0
-#define MODE1_GATE_I2C_ON                             1
-#define MODE1_GATE_SSP                                7:7
-#define MODE1_GATE_SSP_OFF                            0
-#define MODE1_GATE_SSP_ON                             1
-#define MODE1_GATE_GPIO                               6:6
-#define MODE1_GATE_GPIO_OFF                           0
-#define MODE1_GATE_GPIO_ON                            1
-#define MODE1_GATE_ZVPORT                             5:5
-#define MODE1_GATE_ZVPORT_OFF                         0
-#define MODE1_GATE_ZVPORT_ON                          1
-#define MODE1_GATE_CSC                                4:4
-#define MODE1_GATE_CSC_OFF                            0
-#define MODE1_GATE_CSC_ON                             1
-#define MODE1_GATE_DE                                 3:3
-#define MODE1_GATE_DE_OFF                             0
-#define MODE1_GATE_DE_ON                              1
-#define MODE1_GATE_DISPLAY                            2:2
-#define MODE1_GATE_DISPLAY_OFF                        0
-#define MODE1_GATE_DISPLAY_ON                         1
-#define MODE1_GATE_LOCALMEM                           1:1
-#define MODE1_GATE_LOCALMEM_OFF                       0
-#define MODE1_GATE_LOCALMEM_ON                        1
-#define MODE1_GATE_DMA                                0:0
-#define MODE1_GATE_DMA_OFF                            0
-#define MODE1_GATE_DMA_ON                             1
+#define MODE1_GATE_MCLK_MASK                          (0x3 << 14)
+#define MODE1_GATE_MCLK_112MHZ                        (0x0 << 14)
+#define MODE1_GATE_MCLK_84MHZ                         (0x1 << 14)
+#define MODE1_GATE_MCLK_56MHZ                         (0x2 << 14)
+#define MODE1_GATE_MCLK_42MHZ                         (0x3 << 14)
+#define MODE1_GATE_M2XCLK_MASK                        (0x3 << 12)
+#define MODE1_GATE_M2XCLK_336MHZ                      (0x0 << 12)
+#define MODE1_GATE_M2XCLK_168MHZ                      (0x1 << 12)
+#define MODE1_GATE_M2XCLK_112MHZ                      (0x2 << 12)
+#define MODE1_GATE_M2XCLK_84MHZ                       (0x3 << 12)
+#define MODE1_GATE_VGA                                BIT(10)
+#define MODE1_GATE_PWM                                BIT(9)
+#define MODE1_GATE_I2C                                BIT(8)
+#define MODE1_GATE_SSP                                BIT(7)
+#define MODE1_GATE_GPIO                               BIT(6)
+#define MODE1_GATE_ZVPORT                             BIT(5)
+#define MODE1_GATE_CSC                                BIT(4)
+#define MODE1_GATE_DE                                 BIT(3)
+#define MODE1_GATE_DISPLAY                            BIT(2)
+#define MODE1_GATE_LOCALMEM                           BIT(1)
+#define MODE1_GATE_DMA                                BIT(0)
 
 #define POWER_MODE_CTRL                               0x00004C
 #ifdef VALIDATION_CHIP
-    #define POWER_MODE_CTRL_336CLK                    4:4
-    #define POWER_MODE_CTRL_336CLK_OFF                0
-    #define POWER_MODE_CTRL_336CLK_ON                 1
+    #define POWER_MODE_CTRL_336CLK                    BIT(4)
 #endif
-#define POWER_MODE_CTRL_OSC_INPUT                     3:3
-#define POWER_MODE_CTRL_OSC_INPUT_OFF                 0
-#define POWER_MODE_CTRL_OSC_INPUT_ON                  1
-#define POWER_MODE_CTRL_ACPI                          2:2
-#define POWER_MODE_CTRL_ACPI_OFF                      0
-#define POWER_MODE_CTRL_ACPI_ON                       1
-#define POWER_MODE_CTRL_MODE                          1:0
-#define POWER_MODE_CTRL_MODE_MODE0                    0
-#define POWER_MODE_CTRL_MODE_MODE1                    1
-#define POWER_MODE_CTRL_MODE_SLEEP                    2
+#define POWER_MODE_CTRL_OSC_INPUT                     BIT(3)
+#define POWER_MODE_CTRL_ACPI                          BIT(2)
+#define POWER_MODE_CTRL_MODE_MASK                     (0x3 << 0)
+#define POWER_MODE_CTRL_MODE_MODE0                    (0x0 << 0)
+#define POWER_MODE_CTRL_MODE_MODE1                    (0x1 << 0)
+#define POWER_MODE_CTRL_MODE_SLEEP                    (0x2 << 0)
 
 #define PCI_MASTER_BASE                               0x000050
-#define PCI_MASTER_BASE_ADDRESS                       7:0
+#define PCI_MASTER_BASE_ADDRESS_MASK                  0xff
 
 #define DEVICE_ID                                     0x000054
-#define DEVICE_ID_DEVICE_ID                           31:16
-#define DEVICE_ID_REVISION_ID                         7:0
+#define DEVICE_ID_DEVICE_ID_MASK                      (0xffff << 16)
+#define DEVICE_ID_REVISION_ID_MASK                    0xff
 
 #define PLL_CLK_COUNT                                 0x000058
-#define PLL_CLK_COUNT_COUNTER                         15:0
+#define PLL_CLK_COUNT_COUNTER_MASK                    0xffff
 
 #define PANEL_PLL_CTRL                                0x00005C
-#define PANEL_PLL_CTRL_BYPASS                         18:18
-#define PANEL_PLL_CTRL_BYPASS_OFF                     0
-#define PANEL_PLL_CTRL_BYPASS_ON                      1
-#define PANEL_PLL_CTRL_POWER                          17:17
-#define PANEL_PLL_CTRL_POWER_OFF                      0
-#define PANEL_PLL_CTRL_POWER_ON                       1
-#define PANEL_PLL_CTRL_INPUT                          16:16
-#define PANEL_PLL_CTRL_INPUT_OSC                      0
-#define PANEL_PLL_CTRL_INPUT_TESTCLK                  1
+#define PLL_CTRL_BYPASS                               BIT(18)
+#define PLL_CTRL_POWER                                BIT(17)
+#define PLL_CTRL_INPUT                                BIT(16)
 #ifdef VALIDATION_CHIP
-    #define PANEL_PLL_CTRL_OD                         15:14
+    #define PLL_CTRL_OD_SHIFT                         14
+    #define PLL_CTRL_OD_MASK                          (0x3 << 14)
 #else
-    #define PANEL_PLL_CTRL_POD                        15:14
-    #define PANEL_PLL_CTRL_OD                         13:12
+    #define PLL_CTRL_POD_SHIFT                        14
+    #define PLL_CTRL_POD_MASK                         (0x3 << 14)
+    #define PLL_CTRL_OD_SHIFT                         12
+    #define PLL_CTRL_OD_MASK                          (0x3 << 12)
 #endif
-#define PANEL_PLL_CTRL_N                              11:8
-#define PANEL_PLL_CTRL_M                              7:0
+#define PLL_CTRL_N_SHIFT                              8
+#define PLL_CTRL_N_MASK                               (0xf << 8)
+#define PLL_CTRL_M_SHIFT                              0
+#define PLL_CTRL_M_MASK                               0xff
 
 #define CRT_PLL_CTRL                                  0x000060
-#define CRT_PLL_CTRL_BYPASS                           18:18
-#define CRT_PLL_CTRL_BYPASS_OFF                       0
-#define CRT_PLL_CTRL_BYPASS_ON                        1
-#define CRT_PLL_CTRL_POWER                            17:17
-#define CRT_PLL_CTRL_POWER_OFF                        0
-#define CRT_PLL_CTRL_POWER_ON                         1
-#define CRT_PLL_CTRL_INPUT                            16:16
-#define CRT_PLL_CTRL_INPUT_OSC                        0
-#define CRT_PLL_CTRL_INPUT_TESTCLK                    1
-#ifdef VALIDATION_CHIP
-    #define CRT_PLL_CTRL_OD                           15:14
-#else
-    #define CRT_PLL_CTRL_POD                          15:14
-    #define CRT_PLL_CTRL_OD                           13:12
-#endif
-#define CRT_PLL_CTRL_N                                11:8
-#define CRT_PLL_CTRL_M                                7:0
 
 #define VGA_PLL0_CTRL                                 0x000064
-#define VGA_PLL0_CTRL_BYPASS                          18:18
-#define VGA_PLL0_CTRL_BYPASS_OFF                      0
-#define VGA_PLL0_CTRL_BYPASS_ON                       1
-#define VGA_PLL0_CTRL_POWER                           17:17
-#define VGA_PLL0_CTRL_POWER_OFF                       0
-#define VGA_PLL0_CTRL_POWER_ON                        1
-#define VGA_PLL0_CTRL_INPUT                           16:16
-#define VGA_PLL0_CTRL_INPUT_OSC                       0
-#define VGA_PLL0_CTRL_INPUT_TESTCLK                   1
-#ifdef VALIDATION_CHIP
-    #define VGA_PLL0_CTRL_OD                          15:14
-#else
-    #define VGA_PLL0_CTRL_POD                         15:14
-    #define VGA_PLL0_CTRL_OD                          13:12
-#endif
-#define VGA_PLL0_CTRL_N                               11:8
-#define VGA_PLL0_CTRL_M                               7:0
 
 #define VGA_PLL1_CTRL                                 0x000068
-#define VGA_PLL1_CTRL_BYPASS                          18:18
-#define VGA_PLL1_CTRL_BYPASS_OFF                      0
-#define VGA_PLL1_CTRL_BYPASS_ON                       1
-#define VGA_PLL1_CTRL_POWER                           17:17
-#define VGA_PLL1_CTRL_POWER_OFF                       0
-#define VGA_PLL1_CTRL_POWER_ON                        1
-#define VGA_PLL1_CTRL_INPUT                           16:16
-#define VGA_PLL1_CTRL_INPUT_OSC                       0
-#define VGA_PLL1_CTRL_INPUT_TESTCLK                   1
-#ifdef VALIDATION_CHIP
-    #define VGA_PLL1_CTRL_OD                          15:14
-#else
-    #define VGA_PLL1_CTRL_POD                         15:14
-    #define VGA_PLL1_CTRL_OD                          13:12
-#endif
-#define VGA_PLL1_CTRL_N                               11:8
-#define VGA_PLL1_CTRL_M                               7:0
 
 #define SCRATCH_DATA                                  0x00006c
 
 #ifndef VALIDATION_CHIP
 
 #define MXCLK_PLL_CTRL                                0x000070
-#define MXCLK_PLL_CTRL_BYPASS                         18:18
-#define MXCLK_PLL_CTRL_BYPASS_OFF                     0
-#define MXCLK_PLL_CTRL_BYPASS_ON                      1
-#define MXCLK_PLL_CTRL_POWER                          17:17
-#define MXCLK_PLL_CTRL_POWER_OFF                      0
-#define MXCLK_PLL_CTRL_POWER_ON                       1
-#define MXCLK_PLL_CTRL_INPUT                          16:16
-#define MXCLK_PLL_CTRL_INPUT_OSC                      0
-#define MXCLK_PLL_CTRL_INPUT_TESTCLK                  1
-#define MXCLK_PLL_CTRL_POD                            15:14
-#define MXCLK_PLL_CTRL_OD                             13:12
-#define MXCLK_PLL_CTRL_N                              11:8
-#define MXCLK_PLL_CTRL_M                              7:0
 
 #define VGA_CONFIGURATION                             0x000088
-#define VGA_CONFIGURATION_USER_DEFINE                 5:4
-#define VGA_CONFIGURATION_PLL                         2:2
-#define VGA_CONFIGURATION_PLL_VGA                     0
-#define VGA_CONFIGURATION_PLL_PANEL                   1
-#define VGA_CONFIGURATION_MODE                        1:1
-#define VGA_CONFIGURATION_MODE_TEXT                   0
-#define VGA_CONFIGURATION_MODE_GRAPHIC                1
+#define VGA_CONFIGURATION_USER_DEFINE_MASK            (0x3 << 4)
+#define VGA_CONFIGURATION_PLL                         BIT(2)
+#define VGA_CONFIGURATION_MODE                        BIT(1)
 
 #endif
 
 #define GPIO_DATA                                       0x010000
-#define GPIO_DATA_31                                    31:31
-#define GPIO_DATA_30                                    30:30
-#define GPIO_DATA_29                                    29:29
-#define GPIO_DATA_28                                    28:28
-#define GPIO_DATA_27                                    27:27
-#define GPIO_DATA_26                                    26:26
-#define GPIO_DATA_25                                    25:25
-#define GPIO_DATA_24                                    24:24
-#define GPIO_DATA_23                                    23:23
-#define GPIO_DATA_22                                    22:22
-#define GPIO_DATA_21                                    21:21
-#define GPIO_DATA_20                                    20:20
-#define GPIO_DATA_19                                    19:19
-#define GPIO_DATA_18                                    18:18
-#define GPIO_DATA_17                                    17:17
-#define GPIO_DATA_16                                    16:16
-#define GPIO_DATA_15                                    15:15
-#define GPIO_DATA_14                                    14:14
-#define GPIO_DATA_13                                    13:13
-#define GPIO_DATA_12                                    12:12
-#define GPIO_DATA_11                                    11:11
-#define GPIO_DATA_10                                    10:10
-#define GPIO_DATA_9                                     9:9
-#define GPIO_DATA_8                                     8:8
-#define GPIO_DATA_7                                     7:7
-#define GPIO_DATA_6                                     6:6
-#define GPIO_DATA_5                                     5:5
-#define GPIO_DATA_4                                     4:4
-#define GPIO_DATA_3                                     3:3
-#define GPIO_DATA_2                                     2:2
-#define GPIO_DATA_1                                     1:1
-#define GPIO_DATA_0                                     0:0
+#define GPIO_DATA_31                                    BIT(31)
+#define GPIO_DATA_30                                    BIT(30)
+#define GPIO_DATA_29                                    BIT(29)
+#define GPIO_DATA_28                                    BIT(28)
+#define GPIO_DATA_27                                    BIT(27)
+#define GPIO_DATA_26                                    BIT(26)
+#define GPIO_DATA_25                                    BIT(25)
+#define GPIO_DATA_24                                    BIT(24)
+#define GPIO_DATA_23                                    BIT(23)
+#define GPIO_DATA_22                                    BIT(22)
+#define GPIO_DATA_21                                    BIT(21)
+#define GPIO_DATA_20                                    BIT(20)
+#define GPIO_DATA_19                                    BIT(19)
+#define GPIO_DATA_18                                    BIT(18)
+#define GPIO_DATA_17                                    BIT(17)
+#define GPIO_DATA_16                                    BIT(16)
+#define GPIO_DATA_15                                    BIT(15)
+#define GPIO_DATA_14                                    BIT(14)
+#define GPIO_DATA_13                                    BIT(13)
+#define GPIO_DATA_12                                    BIT(12)
+#define GPIO_DATA_11                                    BIT(11)
+#define GPIO_DATA_10                                    BIT(10)
+#define GPIO_DATA_9                                     BIT(9)
+#define GPIO_DATA_8                                     BIT(8)
+#define GPIO_DATA_7                                     BIT(7)
+#define GPIO_DATA_6                                     BIT(6)
+#define GPIO_DATA_5                                     BIT(5)
+#define GPIO_DATA_4                                     BIT(4)
+#define GPIO_DATA_3                                     BIT(3)
+#define GPIO_DATA_2                                     BIT(2)
+#define GPIO_DATA_1                                     BIT(1)
+#define GPIO_DATA_0                                     BIT(0)
 
 #define GPIO_DATA_DIRECTION                             0x010004
-#define GPIO_DATA_DIRECTION_31                          31:31
-#define GPIO_DATA_DIRECTION_31_INPUT                    0
-#define GPIO_DATA_DIRECTION_31_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_30                          30:30
-#define GPIO_DATA_DIRECTION_30_INPUT                    0
-#define GPIO_DATA_DIRECTION_30_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_29                          29:29
-#define GPIO_DATA_DIRECTION_29_INPUT                    0
-#define GPIO_DATA_DIRECTION_29_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_28                          28:28
-#define GPIO_DATA_DIRECTION_28_INPUT                    0
-#define GPIO_DATA_DIRECTION_28_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_27                          27:27
-#define GPIO_DATA_DIRECTION_27_INPUT                    0
-#define GPIO_DATA_DIRECTION_27_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_26                          26:26
-#define GPIO_DATA_DIRECTION_26_INPUT                    0
-#define GPIO_DATA_DIRECTION_26_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_25                          25:25
-#define GPIO_DATA_DIRECTION_25_INPUT                    0
-#define GPIO_DATA_DIRECTION_25_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_24                          24:24
-#define GPIO_DATA_DIRECTION_24_INPUT                    0
-#define GPIO_DATA_DIRECTION_24_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_23                          23:23
-#define GPIO_DATA_DIRECTION_23_INPUT                    0
-#define GPIO_DATA_DIRECTION_23_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_22                          22:22
-#define GPIO_DATA_DIRECTION_22_INPUT                    0
-#define GPIO_DATA_DIRECTION_22_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_21                          21:21
-#define GPIO_DATA_DIRECTION_21_INPUT                    0
-#define GPIO_DATA_DIRECTION_21_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_20                          20:20
-#define GPIO_DATA_DIRECTION_20_INPUT                    0
-#define GPIO_DATA_DIRECTION_20_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_19                          19:19
-#define GPIO_DATA_DIRECTION_19_INPUT                    0
-#define GPIO_DATA_DIRECTION_19_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_18                          18:18
-#define GPIO_DATA_DIRECTION_18_INPUT                    0
-#define GPIO_DATA_DIRECTION_18_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_17                          17:17
-#define GPIO_DATA_DIRECTION_17_INPUT                    0
-#define GPIO_DATA_DIRECTION_17_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_16                          16:16
-#define GPIO_DATA_DIRECTION_16_INPUT                    0
-#define GPIO_DATA_DIRECTION_16_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_15                          15:15
-#define GPIO_DATA_DIRECTION_15_INPUT                    0
-#define GPIO_DATA_DIRECTION_15_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_14                          14:14
-#define GPIO_DATA_DIRECTION_14_INPUT                    0
-#define GPIO_DATA_DIRECTION_14_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_13                          13:13
-#define GPIO_DATA_DIRECTION_13_INPUT                    0
-#define GPIO_DATA_DIRECTION_13_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_12                          12:12
-#define GPIO_DATA_DIRECTION_12_INPUT                    0
-#define GPIO_DATA_DIRECTION_12_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_11                          11:11
-#define GPIO_DATA_DIRECTION_11_INPUT                    0
-#define GPIO_DATA_DIRECTION_11_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_10                          10:10
-#define GPIO_DATA_DIRECTION_10_INPUT                    0
-#define GPIO_DATA_DIRECTION_10_OUTPUT                   1
-#define GPIO_DATA_DIRECTION_9                           9:9
-#define GPIO_DATA_DIRECTION_9_INPUT                     0
-#define GPIO_DATA_DIRECTION_9_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_8                           8:8
-#define GPIO_DATA_DIRECTION_8_INPUT                     0
-#define GPIO_DATA_DIRECTION_8_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_7                           7:7
-#define GPIO_DATA_DIRECTION_7_INPUT                     0
-#define GPIO_DATA_DIRECTION_7_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_6                           6:6
-#define GPIO_DATA_DIRECTION_6_INPUT                     0
-#define GPIO_DATA_DIRECTION_6_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_5                           5:5
-#define GPIO_DATA_DIRECTION_5_INPUT                     0
-#define GPIO_DATA_DIRECTION_5_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_4                           4:4
-#define GPIO_DATA_DIRECTION_4_INPUT                     0
-#define GPIO_DATA_DIRECTION_4_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_3                           3:3
-#define GPIO_DATA_DIRECTION_3_INPUT                     0
-#define GPIO_DATA_DIRECTION_3_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_2                           2:2
-#define GPIO_DATA_DIRECTION_2_INPUT                     0
-#define GPIO_DATA_DIRECTION_2_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_1                           131
-#define GPIO_DATA_DIRECTION_1_INPUT                     0
-#define GPIO_DATA_DIRECTION_1_OUTPUT                    1
-#define GPIO_DATA_DIRECTION_0                           0:0
-#define GPIO_DATA_DIRECTION_0_INPUT                     0
-#define GPIO_DATA_DIRECTION_0_OUTPUT                    1
+#define GPIO_DATA_DIRECTION_31                          BIT(31)
+#define GPIO_DATA_DIRECTION_30                          BIT(30)
+#define GPIO_DATA_DIRECTION_29                          BIT(29)
+#define GPIO_DATA_DIRECTION_28                          BIT(28)
+#define GPIO_DATA_DIRECTION_27                          BIT(27)
+#define GPIO_DATA_DIRECTION_26                          BIT(26)
+#define GPIO_DATA_DIRECTION_25                          BIT(25)
+#define GPIO_DATA_DIRECTION_24                          BIT(24)
+#define GPIO_DATA_DIRECTION_23                          BIT(23)
+#define GPIO_DATA_DIRECTION_22                          BIT(22)
+#define GPIO_DATA_DIRECTION_21                          BIT(21)
+#define GPIO_DATA_DIRECTION_20                          BIT(20)
+#define GPIO_DATA_DIRECTION_19                          BIT(19)
+#define GPIO_DATA_DIRECTION_18                          BIT(18)
+#define GPIO_DATA_DIRECTION_17                          BIT(17)
+#define GPIO_DATA_DIRECTION_16                          BIT(16)
+#define GPIO_DATA_DIRECTION_15                          BIT(15)
+#define GPIO_DATA_DIRECTION_14                          BIT(14)
+#define GPIO_DATA_DIRECTION_13                          BIT(13)
+#define GPIO_DATA_DIRECTION_12                          BIT(12)
+#define GPIO_DATA_DIRECTION_11                          BIT(11)
+#define GPIO_DATA_DIRECTION_10                          BIT(10)
+#define GPIO_DATA_DIRECTION_9                           BIT(9)
+#define GPIO_DATA_DIRECTION_8                           BIT(8)
+#define GPIO_DATA_DIRECTION_7                           BIT(7)
+#define GPIO_DATA_DIRECTION_6                           BIT(6)
+#define GPIO_DATA_DIRECTION_5                           BIT(5)
+#define GPIO_DATA_DIRECTION_4                           BIT(4)
+#define GPIO_DATA_DIRECTION_3                           BIT(3)
+#define GPIO_DATA_DIRECTION_2                           BIT(2)
+#define GPIO_DATA_DIRECTION_1                           BIT(1)
+#define GPIO_DATA_DIRECTION_0                           BIT(0)
 
 #define GPIO_INTERRUPT_SETUP                            0x010008
-#define GPIO_INTERRUPT_SETUP_TRIGGER_31                 22:22
-#define GPIO_INTERRUPT_SETUP_TRIGGER_31_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_31_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_30                 21:21
-#define GPIO_INTERRUPT_SETUP_TRIGGER_30_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_30_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_29                 20:20
-#define GPIO_INTERRUPT_SETUP_TRIGGER_29_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_29_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_28                 19:19
-#define GPIO_INTERRUPT_SETUP_TRIGGER_28_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_28_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_27                 18:18
-#define GPIO_INTERRUPT_SETUP_TRIGGER_27_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_27_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_26                 17:17
-#define GPIO_INTERRUPT_SETUP_TRIGGER_26_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_26_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_25                 16:16
-#define GPIO_INTERRUPT_SETUP_TRIGGER_25_EDGE            0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_25_LEVEL           1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_31                  14:14
-#define GPIO_INTERRUPT_SETUP_ACTIVE_31_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_31_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_30                  13:13
-#define GPIO_INTERRUPT_SETUP_ACTIVE_30_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_30_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_29                  12:12
-#define GPIO_INTERRUPT_SETUP_ACTIVE_29_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_29_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_28                  11:11
-#define GPIO_INTERRUPT_SETUP_ACTIVE_28_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_28_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_27                  10:10
-#define GPIO_INTERRUPT_SETUP_ACTIVE_27_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_27_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_26                  9:9
-#define GPIO_INTERRUPT_SETUP_ACTIVE_26_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_26_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_25                  8:8
-#define GPIO_INTERRUPT_SETUP_ACTIVE_25_LOW              0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_25_HIGH             1
-#define GPIO_INTERRUPT_SETUP_ENABLE_31                  6:6
-#define GPIO_INTERRUPT_SETUP_ENABLE_31_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_31_INTERRUPT        1
-#define GPIO_INTERRUPT_SETUP_ENABLE_30                  5:5
-#define GPIO_INTERRUPT_SETUP_ENABLE_30_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_30_INTERRUPT        1
-#define GPIO_INTERRUPT_SETUP_ENABLE_29                  4:4
-#define GPIO_INTERRUPT_SETUP_ENABLE_29_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_29_INTERRUPT        1
-#define GPIO_INTERRUPT_SETUP_ENABLE_28                  3:3
-#define GPIO_INTERRUPT_SETUP_ENABLE_28_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_28_INTERRUPT        1
-#define GPIO_INTERRUPT_SETUP_ENABLE_27                  2:2
-#define GPIO_INTERRUPT_SETUP_ENABLE_27_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_27_INTERRUPT        1
-#define GPIO_INTERRUPT_SETUP_ENABLE_26                  1:1
-#define GPIO_INTERRUPT_SETUP_ENABLE_26_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_26_INTERRUPT        1
-#define GPIO_INTERRUPT_SETUP_ENABLE_25                  0:0
-#define GPIO_INTERRUPT_SETUP_ENABLE_25_GPIO             0
-#define GPIO_INTERRUPT_SETUP_ENABLE_25_INTERRUPT        1
+#define GPIO_INTERRUPT_SETUP_TRIGGER_31                 BIT(22)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_30                 BIT(21)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_29                 BIT(20)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_28                 BIT(19)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_27                 BIT(18)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_26                 BIT(17)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_25                 BIT(16)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_31                  BIT(14)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_30                  BIT(13)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_29                  BIT(12)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_28                  BIT(11)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_27                  BIT(10)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_26                  BIT(9)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_25                  BIT(8)
+#define GPIO_INTERRUPT_SETUP_ENABLE_31                  BIT(6)
+#define GPIO_INTERRUPT_SETUP_ENABLE_30                  BIT(5)
+#define GPIO_INTERRUPT_SETUP_ENABLE_29                  BIT(4)
+#define GPIO_INTERRUPT_SETUP_ENABLE_28                  BIT(3)
+#define GPIO_INTERRUPT_SETUP_ENABLE_27                  BIT(2)
+#define GPIO_INTERRUPT_SETUP_ENABLE_26                  BIT(1)
+#define GPIO_INTERRUPT_SETUP_ENABLE_25                  BIT(0)
 
 #define GPIO_INTERRUPT_STATUS                           0x01000C
-#define GPIO_INTERRUPT_STATUS_31                        22:22
-#define GPIO_INTERRUPT_STATUS_31_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_31_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_31_RESET                  1
-#define GPIO_INTERRUPT_STATUS_30                        21:21
-#define GPIO_INTERRUPT_STATUS_30_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_30_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_30_RESET                  1
-#define GPIO_INTERRUPT_STATUS_29                        20:20
-#define GPIO_INTERRUPT_STATUS_29_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_29_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_29_RESET                  1
-#define GPIO_INTERRUPT_STATUS_28                        19:19
-#define GPIO_INTERRUPT_STATUS_28_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_28_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_28_RESET                  1
-#define GPIO_INTERRUPT_STATUS_27                        18:18
-#define GPIO_INTERRUPT_STATUS_27_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_27_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_27_RESET                  1
-#define GPIO_INTERRUPT_STATUS_26                        17:17
-#define GPIO_INTERRUPT_STATUS_26_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_26_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_26_RESET                  1
-#define GPIO_INTERRUPT_STATUS_25                        16:16
-#define GPIO_INTERRUPT_STATUS_25_INACTIVE               0
-#define GPIO_INTERRUPT_STATUS_25_ACTIVE                 1
-#define GPIO_INTERRUPT_STATUS_25_RESET                  1
+#define GPIO_INTERRUPT_STATUS_31                        BIT(22)
+#define GPIO_INTERRUPT_STATUS_30                        BIT(21)
+#define GPIO_INTERRUPT_STATUS_29                        BIT(20)
+#define GPIO_INTERRUPT_STATUS_28                        BIT(19)
+#define GPIO_INTERRUPT_STATUS_27                        BIT(18)
+#define GPIO_INTERRUPT_STATUS_26                        BIT(17)
+#define GPIO_INTERRUPT_STATUS_25                        BIT(16)
 
 
 #define PANEL_DISPLAY_CTRL                            0x080000
-#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK            31:30
-#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE    0
-#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE     3
-#define PANEL_DISPLAY_CTRL_SELECT                     29:28
-#define PANEL_DISPLAY_CTRL_SELECT_PANEL               0
-#define PANEL_DISPLAY_CTRL_SELECT_VGA                 1
-#define PANEL_DISPLAY_CTRL_SELECT_CRT                 2
-#define PANEL_DISPLAY_CTRL_FPEN                       27:27
-#define PANEL_DISPLAY_CTRL_FPEN_LOW                   0
-#define PANEL_DISPLAY_CTRL_FPEN_HIGH                  1
-#define PANEL_DISPLAY_CTRL_VBIASEN                    26:26
-#define PANEL_DISPLAY_CTRL_VBIASEN_LOW                0
-#define PANEL_DISPLAY_CTRL_VBIASEN_HIGH               1
-#define PANEL_DISPLAY_CTRL_DATA                       25:25
-#define PANEL_DISPLAY_CTRL_DATA_DISABLE               0
-#define PANEL_DISPLAY_CTRL_DATA_ENABLE                1
-#define PANEL_DISPLAY_CTRL_FPVDDEN                    24:24
-#define PANEL_DISPLAY_CTRL_FPVDDEN_LOW                0
-#define PANEL_DISPLAY_CTRL_FPVDDEN_HIGH               1
-#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK            23:20
-#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE    0
-#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE     15
-
-#define PANEL_DISPLAY_CTRL_TFT_DISP 19:18
-#define PANEL_DISPLAY_CTRL_TFT_DISP_24 0
-#define PANEL_DISPLAY_CTRL_TFT_DISP_36 1
-#define PANEL_DISPLAY_CTRL_TFT_DISP_18 2
-
-
-#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY               19:19
-#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY_DISABLE       0
-#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY_ENABLE        1
-#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL               18:18
-#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL_DISABLE       0
-#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL_ENABLE        1
-#define PANEL_DISPLAY_CTRL_FIFO                       17:16
-#define PANEL_DISPLAY_CTRL_FIFO_1                     0
-#define PANEL_DISPLAY_CTRL_FIFO_3                     1
-#define PANEL_DISPLAY_CTRL_FIFO_7                     2
-#define PANEL_DISPLAY_CTRL_FIFO_11                    3
-#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK            15:15
-#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE    0
-#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE     1
-#define PANEL_DISPLAY_CTRL_CLOCK_PHASE                14:14
-#define PANEL_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_HIGH    0
-#define PANEL_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_LOW     1
-#define PANEL_DISPLAY_CTRL_VSYNC_PHASE                13:13
-#define PANEL_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_HIGH    0
-#define PANEL_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_LOW     1
-#define PANEL_DISPLAY_CTRL_HSYNC_PHASE                12:12
-#define PANEL_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_HIGH    0
-#define PANEL_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_LOW     1
-#define PANEL_DISPLAY_CTRL_VSYNC                      11:11
-#define PANEL_DISPLAY_CTRL_VSYNC_ACTIVE_HIGH          0
-#define PANEL_DISPLAY_CTRL_VSYNC_ACTIVE_LOW           1
-#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING             10:10
-#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING_DISABLE     0
-#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING_ENABLE      1
-#define PANEL_DISPLAY_CTRL_COLOR_KEY                  9:9
-#define PANEL_DISPLAY_CTRL_COLOR_KEY_DISABLE          0
-#define PANEL_DISPLAY_CTRL_COLOR_KEY_ENABLE           1
-#define PANEL_DISPLAY_CTRL_TIMING                     8:8
-#define PANEL_DISPLAY_CTRL_TIMING_DISABLE             0
-#define PANEL_DISPLAY_CTRL_TIMING_ENABLE              1
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR           7:7
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR_DOWN      0
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR_UP        1
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN               6:6
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DISABLE       0
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_ENABLE        1
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR         5:5
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR_RIGHT   0
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR_LEFT    1
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN             4:4
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DISABLE     0
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_ENABLE      1
-#define PANEL_DISPLAY_CTRL_GAMMA                      3:3
-#define PANEL_DISPLAY_CTRL_GAMMA_DISABLE              0
-#define PANEL_DISPLAY_CTRL_GAMMA_ENABLE               1
-#define PANEL_DISPLAY_CTRL_PLANE                      2:2
-#define PANEL_DISPLAY_CTRL_PLANE_DISABLE              0
-#define PANEL_DISPLAY_CTRL_PLANE_ENABLE               1
-#define PANEL_DISPLAY_CTRL_FORMAT                     1:0
-#define PANEL_DISPLAY_CTRL_FORMAT_8                   0
-#define PANEL_DISPLAY_CTRL_FORMAT_16                  1
-#define PANEL_DISPLAY_CTRL_FORMAT_32                  2
+#define PANEL_DISPLAY_CTRL_RESERVED_MASK              0xc0f08000
+#define PANEL_DISPLAY_CTRL_SELECT_SHIFT               28
+#define PANEL_DISPLAY_CTRL_SELECT_MASK                (0x3 << 28)
+#define PANEL_DISPLAY_CTRL_SELECT_PANEL               (0x0 << 28)
+#define PANEL_DISPLAY_CTRL_SELECT_VGA                 (0x1 << 28)
+#define PANEL_DISPLAY_CTRL_SELECT_CRT                 (0x2 << 28)
+#define PANEL_DISPLAY_CTRL_FPEN                       BIT(27)
+#define PANEL_DISPLAY_CTRL_VBIASEN                    BIT(26)
+#define PANEL_DISPLAY_CTRL_DATA                       BIT(25)
+#define PANEL_DISPLAY_CTRL_FPVDDEN                    BIT(24)
+#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY               BIT(19)
+#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL               BIT(18)
+#define PANEL_DISPLAY_CTRL_FIFO                       (0x3 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_1                     (0x0 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_3                     (0x1 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_7                     (0x2 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_11                    (0x3 << 16)
+#define DISPLAY_CTRL_CLOCK_PHASE                      BIT(14)
+#define DISPLAY_CTRL_VSYNC_PHASE                      BIT(13)
+#define DISPLAY_CTRL_HSYNC_PHASE                      BIT(12)
+#define PANEL_DISPLAY_CTRL_VSYNC                      BIT(11)
+#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING             BIT(10)
+#define PANEL_DISPLAY_CTRL_COLOR_KEY                  BIT(9)
+#define DISPLAY_CTRL_TIMING                           BIT(8)
+#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR           BIT(7)
+#define PANEL_DISPLAY_CTRL_VERTICAL_PAN               BIT(6)
+#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR         BIT(5)
+#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN             BIT(4)
+#define DISPLAY_CTRL_GAMMA                            BIT(3)
+#define DISPLAY_CTRL_PLANE                            BIT(2)
+#define PANEL_DISPLAY_CTRL_FORMAT                     (0x3 << 0)
+#define PANEL_DISPLAY_CTRL_FORMAT_8                   (0x0 << 0)
+#define PANEL_DISPLAY_CTRL_FORMAT_16                  (0x1 << 0)
+#define PANEL_DISPLAY_CTRL_FORMAT_32                  (0x2 << 0)
 
 #define PANEL_PAN_CTRL                                0x080004
-#define PANEL_PAN_CTRL_VERTICAL_PAN                   31:24
-#define PANEL_PAN_CTRL_VERTICAL_VSYNC                 21:16
-#define PANEL_PAN_CTRL_HORIZONTAL_PAN                 15:8
-#define PANEL_PAN_CTRL_HORIZONTAL_VSYNC               5:0
+#define PANEL_PAN_CTRL_VERTICAL_PAN_MASK              (0xff << 24)
+#define PANEL_PAN_CTRL_VERTICAL_VSYNC_MASK            (0x3f << 16)
+#define PANEL_PAN_CTRL_HORIZONTAL_PAN_MASK            (0xff << 8)
+#define PANEL_PAN_CTRL_HORIZONTAL_VSYNC_MASK          0x3f
 
 #define PANEL_COLOR_KEY                               0x080008
-#define PANEL_COLOR_KEY_MASK                          31:16
-#define PANEL_COLOR_KEY_VALUE                         15:0
+#define PANEL_COLOR_KEY_MASK_MASK                     (0xffff << 16)
+#define PANEL_COLOR_KEY_VALUE_MASK                    0xffff
 
 #define PANEL_FB_ADDRESS                              0x08000C
-#define PANEL_FB_ADDRESS_STATUS                       31:31
-#define PANEL_FB_ADDRESS_STATUS_CURRENT               0
-#define PANEL_FB_ADDRESS_STATUS_PENDING               1
-#define PANEL_FB_ADDRESS_EXT                          27:27
-#define PANEL_FB_ADDRESS_EXT_LOCAL                    0
-#define PANEL_FB_ADDRESS_EXT_EXTERNAL                 1
-#define PANEL_FB_ADDRESS_ADDRESS                      25:0
+#define PANEL_FB_ADDRESS_STATUS                       BIT(31)
+#define PANEL_FB_ADDRESS_EXT                          BIT(27)
+#define PANEL_FB_ADDRESS_ADDRESS_MASK                 0x1ffffff
 
 #define PANEL_FB_WIDTH                                0x080010
-#define PANEL_FB_WIDTH_WIDTH                          29:16
-#define PANEL_FB_WIDTH_OFFSET                         13:0
+#define PANEL_FB_WIDTH_WIDTH_SHIFT                    16
+#define PANEL_FB_WIDTH_WIDTH_MASK                     (0x3fff << 16)
+#define PANEL_FB_WIDTH_OFFSET_MASK                    0x3fff
 
 #define PANEL_WINDOW_WIDTH                            0x080014
-#define PANEL_WINDOW_WIDTH_WIDTH                      27:16
-#define PANEL_WINDOW_WIDTH_X                          11:0
+#define PANEL_WINDOW_WIDTH_WIDTH_SHIFT                16
+#define PANEL_WINDOW_WIDTH_WIDTH_MASK                 (0xfff << 16)
+#define PANEL_WINDOW_WIDTH_X_MASK                     0xfff
 
 #define PANEL_WINDOW_HEIGHT                           0x080018
-#define PANEL_WINDOW_HEIGHT_HEIGHT                    27:16
-#define PANEL_WINDOW_HEIGHT_Y                         11:0
+#define PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT              16
+#define PANEL_WINDOW_HEIGHT_HEIGHT_MASK               (0xfff << 16)
+#define PANEL_WINDOW_HEIGHT_Y_MASK                    0xfff
 
 #define PANEL_PLANE_TL                                0x08001C
-#define PANEL_PLANE_TL_TOP                            26:16
-#define PANEL_PLANE_TL_LEFT                           10:0
+#define PANEL_PLANE_TL_TOP_SHIFT                      16
+#define PANEL_PLANE_TL_TOP_MASK                       (0xeff << 16)
+#define PANEL_PLANE_TL_LEFT_MASK                      0xeff
 
 #define PANEL_PLANE_BR                                0x080020
-#define PANEL_PLANE_BR_BOTTOM                         26:16
-#define PANEL_PLANE_BR_RIGHT                          10:0
+#define PANEL_PLANE_BR_BOTTOM_SHIFT                   16
+#define PANEL_PLANE_BR_BOTTOM_MASK                    (0xeff << 16)
+#define PANEL_PLANE_BR_RIGHT_MASK                     0xeff
 
 #define PANEL_HORIZONTAL_TOTAL                        0x080024
-#define PANEL_HORIZONTAL_TOTAL_TOTAL                  27:16
-#define PANEL_HORIZONTAL_TOTAL_DISPLAY_END            11:0
+#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT            16
+#define PANEL_HORIZONTAL_TOTAL_TOTAL_MASK             (0xfff << 16)
+#define PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK       0xfff
 
 #define PANEL_HORIZONTAL_SYNC                         0x080028
-#define PANEL_HORIZONTAL_SYNC_WIDTH                   23:16
-#define PANEL_HORIZONTAL_SYNC_START                   11:0
+#define PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT             16
+#define PANEL_HORIZONTAL_SYNC_WIDTH_MASK              (0xff << 16)
+#define PANEL_HORIZONTAL_SYNC_START_MASK              0xfff
 
 #define PANEL_VERTICAL_TOTAL                          0x08002C
-#define PANEL_VERTICAL_TOTAL_TOTAL                    26:16
-#define PANEL_VERTICAL_TOTAL_DISPLAY_END              10:0
+#define PANEL_VERTICAL_TOTAL_TOTAL_SHIFT              16
+#define PANEL_VERTICAL_TOTAL_TOTAL_MASK               (0x7ff << 16)
+#define PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK         0x7ff
 
 #define PANEL_VERTICAL_SYNC                           0x080030
-#define PANEL_VERTICAL_SYNC_HEIGHT                    21:16
-#define PANEL_VERTICAL_SYNC_START                     10:0
+#define PANEL_VERTICAL_SYNC_HEIGHT_SHIFT              16
+#define PANEL_VERTICAL_SYNC_HEIGHT_MASK               (0x3f << 16)
+#define PANEL_VERTICAL_SYNC_START_MASK                0x7ff
 
 #define PANEL_CURRENT_LINE                            0x080034
-#define PANEL_CURRENT_LINE_LINE                       10:0
+#define PANEL_CURRENT_LINE_LINE_MASK                  0x7ff
 
 /* Video Control */
 
 #define VIDEO_DISPLAY_CTRL                              0x080040
-#define VIDEO_DISPLAY_CTRL_LINE_BUFFER                  18:18
-#define VIDEO_DISPLAY_CTRL_LINE_BUFFER_DISABLE          0
-#define VIDEO_DISPLAY_CTRL_LINE_BUFFER_ENABLE           1
-#define VIDEO_DISPLAY_CTRL_FIFO                         17:16
-#define VIDEO_DISPLAY_CTRL_FIFO_1                       0
-#define VIDEO_DISPLAY_CTRL_FIFO_3                       1
-#define VIDEO_DISPLAY_CTRL_FIFO_7                       2
-#define VIDEO_DISPLAY_CTRL_FIFO_11                      3
-#define VIDEO_DISPLAY_CTRL_BUFFER                       15:15
-#define VIDEO_DISPLAY_CTRL_BUFFER_0                     0
-#define VIDEO_DISPLAY_CTRL_BUFFER_1                     1
-#define VIDEO_DISPLAY_CTRL_CAPTURE                      14:14
-#define VIDEO_DISPLAY_CTRL_CAPTURE_DISABLE              0
-#define VIDEO_DISPLAY_CTRL_CAPTURE_ENABLE               1
-#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER                13:13
-#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER_DISABLE        0
-#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER_ENABLE         1
-#define VIDEO_DISPLAY_CTRL_BYTE_SWAP                    12:12
-#define VIDEO_DISPLAY_CTRL_BYTE_SWAP_DISABLE            0
-#define VIDEO_DISPLAY_CTRL_BYTE_SWAP_ENABLE             1
-#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE               11:11
-#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE_NORMAL        0
-#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE_HALF          1
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE             10:10
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE_NORMAL      0
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE_HALF        1
-#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE                9:9
-#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE_REPLICATE      0
-#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE_INTERPOLATE    1
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE              8:8
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE_REPLICATE    0
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE_INTERPOLATE  1
-#define VIDEO_DISPLAY_CTRL_PIXEL                        7:4
-#define VIDEO_DISPLAY_CTRL_GAMMA                        3:3
-#define VIDEO_DISPLAY_CTRL_GAMMA_DISABLE                0
-#define VIDEO_DISPLAY_CTRL_GAMMA_ENABLE                 1
-#define VIDEO_DISPLAY_CTRL_PLANE                        2:2
-#define VIDEO_DISPLAY_CTRL_PLANE_DISABLE                0
-#define VIDEO_DISPLAY_CTRL_PLANE_ENABLE                 1
-#define VIDEO_DISPLAY_CTRL_FORMAT                       1:0
-#define VIDEO_DISPLAY_CTRL_FORMAT_8                     0
-#define VIDEO_DISPLAY_CTRL_FORMAT_16                    1
-#define VIDEO_DISPLAY_CTRL_FORMAT_32                    2
-#define VIDEO_DISPLAY_CTRL_FORMAT_YUV                   3
+#define VIDEO_DISPLAY_CTRL_LINE_BUFFER                  BIT(18)
+#define VIDEO_DISPLAY_CTRL_FIFO_MASK                    (0x3 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_1                       (0x0 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_3                       (0x1 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_7                       (0x2 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_11                      (0x3 << 16)
+#define VIDEO_DISPLAY_CTRL_BUFFER                       BIT(15)
+#define VIDEO_DISPLAY_CTRL_CAPTURE                      BIT(14)
+#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER                BIT(13)
+#define VIDEO_DISPLAY_CTRL_BYTE_SWAP                    BIT(12)
+#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE               BIT(11)
+#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE             BIT(10)
+#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE                BIT(9)
+#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE              BIT(8)
+#define VIDEO_DISPLAY_CTRL_PIXEL_MASK                   (0xf << 4)
+#define VIDEO_DISPLAY_CTRL_GAMMA                        BIT(3)
+#define VIDEO_DISPLAY_CTRL_FORMAT_MASK                  0x3
+#define VIDEO_DISPLAY_CTRL_FORMAT_8                     0x0
+#define VIDEO_DISPLAY_CTRL_FORMAT_16                    0x1
+#define VIDEO_DISPLAY_CTRL_FORMAT_32                    0x2
+#define VIDEO_DISPLAY_CTRL_FORMAT_YUV                   0x3
 
 #define VIDEO_FB_0_ADDRESS                            0x080044
-#define VIDEO_FB_0_ADDRESS_STATUS                     31:31
-#define VIDEO_FB_0_ADDRESS_STATUS_CURRENT             0
-#define VIDEO_FB_0_ADDRESS_STATUS_PENDING             1
-#define VIDEO_FB_0_ADDRESS_EXT                        27:27
-#define VIDEO_FB_0_ADDRESS_EXT_LOCAL                  0
-#define VIDEO_FB_0_ADDRESS_EXT_EXTERNAL               1
-#define VIDEO_FB_0_ADDRESS_ADDRESS                    25:0
+#define VIDEO_FB_0_ADDRESS_STATUS                     BIT(31)
+#define VIDEO_FB_0_ADDRESS_EXT                        BIT(27)
+#define VIDEO_FB_0_ADDRESS_ADDRESS_MASK               0x3ffffff
 
 #define VIDEO_FB_WIDTH                                0x080048
-#define VIDEO_FB_WIDTH_WIDTH                          29:16
-#define VIDEO_FB_WIDTH_OFFSET                         13:0
+#define VIDEO_FB_WIDTH_WIDTH_MASK                     (0x3fff << 16)
+#define VIDEO_FB_WIDTH_OFFSET_MASK                    0x3fff
 
 #define VIDEO_FB_0_LAST_ADDRESS                       0x08004C
-#define VIDEO_FB_0_LAST_ADDRESS_EXT                   27:27
-#define VIDEO_FB_0_LAST_ADDRESS_EXT_LOCAL             0
-#define VIDEO_FB_0_LAST_ADDRESS_EXT_EXTERNAL          1
-#define VIDEO_FB_0_LAST_ADDRESS_ADDRESS               25:0
+#define VIDEO_FB_0_LAST_ADDRESS_EXT                   BIT(27)
+#define VIDEO_FB_0_LAST_ADDRESS_ADDRESS_MASK          0x3ffffff
 
 #define VIDEO_PLANE_TL                                0x080050
-#define VIDEO_PLANE_TL_TOP                            26:16
-#define VIDEO_PLANE_TL_LEFT                           10:0
+#define VIDEO_PLANE_TL_TOP_MASK                       (0x7ff << 16)
+#define VIDEO_PLANE_TL_LEFT_MASK                      0x7ff
 
 #define VIDEO_PLANE_BR                                0x080054
-#define VIDEO_PLANE_BR_BOTTOM                         26:16
-#define VIDEO_PLANE_BR_RIGHT                          10:0
+#define VIDEO_PLANE_BR_BOTTOM_MASK                    (0x7ff << 16)
+#define VIDEO_PLANE_BR_RIGHT_MASK                     0x7ff
 
 #define VIDEO_SCALE                                   0x080058
-#define VIDEO_SCALE_VERTICAL_MODE                     31:31
-#define VIDEO_SCALE_VERTICAL_MODE_EXPAND              0
-#define VIDEO_SCALE_VERTICAL_MODE_SHRINK              1
-#define VIDEO_SCALE_VERTICAL_SCALE                    27:16
-#define VIDEO_SCALE_HORIZONTAL_MODE                   15:15
-#define VIDEO_SCALE_HORIZONTAL_MODE_EXPAND            0
-#define VIDEO_SCALE_HORIZONTAL_MODE_SHRINK            1
-#define VIDEO_SCALE_HORIZONTAL_SCALE                  11:0
+#define VIDEO_SCALE_VERTICAL_MODE                     BIT(31)
+#define VIDEO_SCALE_VERTICAL_SCALE_MASK               (0xfff << 16)
+#define VIDEO_SCALE_HORIZONTAL_MODE                   BIT(15)
+#define VIDEO_SCALE_HORIZONTAL_SCALE_MASK             0xfff
 
 #define VIDEO_INITIAL_SCALE                           0x08005C
-#define VIDEO_INITIAL_SCALE_FB_1                      27:16
-#define VIDEO_INITIAL_SCALE_FB_0                      11:0
+#define VIDEO_INITIAL_SCALE_FB_1_MASK                 (0xfff << 16)
+#define VIDEO_INITIAL_SCALE_FB_0_MASK                 0xfff
 
 #define VIDEO_YUV_CONSTANTS                           0x080060
-#define VIDEO_YUV_CONSTANTS_Y                         31:24
-#define VIDEO_YUV_CONSTANTS_R                         23:16
-#define VIDEO_YUV_CONSTANTS_G                         15:8
-#define VIDEO_YUV_CONSTANTS_B                         7:0
+#define VIDEO_YUV_CONSTANTS_Y_MASK                    (0xff << 24)
+#define VIDEO_YUV_CONSTANTS_R_MASK                    (0xff << 16)
+#define VIDEO_YUV_CONSTANTS_G_MASK                    (0xff << 8)
+#define VIDEO_YUV_CONSTANTS_B_MASK                    0xff
 
 #define VIDEO_FB_1_ADDRESS                            0x080064
-#define VIDEO_FB_1_ADDRESS_STATUS                     31:31
-#define VIDEO_FB_1_ADDRESS_STATUS_CURRENT             0
-#define VIDEO_FB_1_ADDRESS_STATUS_PENDING             1
-#define VIDEO_FB_1_ADDRESS_EXT                        27:27
-#define VIDEO_FB_1_ADDRESS_EXT_LOCAL                  0
-#define VIDEO_FB_1_ADDRESS_EXT_EXTERNAL               1
-#define VIDEO_FB_1_ADDRESS_ADDRESS                    25:0
+#define VIDEO_FB_1_ADDRESS_STATUS                     BIT(31)
+#define VIDEO_FB_1_ADDRESS_EXT                        BIT(27)
+#define VIDEO_FB_1_ADDRESS_ADDRESS_MASK               0x3ffffff
 
 #define VIDEO_FB_1_LAST_ADDRESS                       0x080068
-#define VIDEO_FB_1_LAST_ADDRESS_EXT                   27:27
-#define VIDEO_FB_1_LAST_ADDRESS_EXT_LOCAL             0
-#define VIDEO_FB_1_LAST_ADDRESS_EXT_EXTERNAL          1
-#define VIDEO_FB_1_LAST_ADDRESS_ADDRESS               25:0
+#define VIDEO_FB_1_LAST_ADDRESS_EXT                   BIT(27)
+#define VIDEO_FB_1_LAST_ADDRESS_ADDRESS_MASK          0x3ffffff
 
 /* Video Alpha Control */
 
 #define VIDEO_ALPHA_DISPLAY_CTRL                        0x080080
-#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT                 28:28
-#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT_PER_PIXEL       0
-#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT_ALPHA           1
-#define VIDEO_ALPHA_DISPLAY_CTRL_ALPHA                  27:24
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO                   17:16
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_1                 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_3                 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_7                 2
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_11                3
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE             11:11
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE_NORMAL      0
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE_HALF        1
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE             10:10
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE_NORMAL      0
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE_HALF        1
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE              9:9
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE_REPLICATE    0
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE_INTERPOLATE  1
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE              8:8
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE_REPLICATE    0
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE_INTERPOLATE  1
-#define VIDEO_ALPHA_DISPLAY_CTRL_PIXEL                  7:4
-#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY             3:3
-#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY_DISABLE     0
-#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY_ENABLE      1
-#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE                  2:2
-#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE_DISABLE          0
-#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE_ENABLE           1
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT                 1:0
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_8               0
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_16              1
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4       2
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4   3
+#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT                 BIT(28)
+#define VIDEO_ALPHA_DISPLAY_CTRL_ALPHA_MASK             (0xf << 24)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_MASK              (0x3 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_1                 (0x0 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_3                 (0x1 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_7                 (0x2 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_11                (0x3 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE             BIT(11)
+#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE             BIT(10)
+#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE              BIT(9)
+#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE              BIT(8)
+#define VIDEO_ALPHA_DISPLAY_CTRL_PIXEL_MASK             (0xf << 4)
+#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY             BIT(3)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_MASK            0x3
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_8               0x0
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_16              0x1
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4       0x2
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4   0x3
 
 #define VIDEO_ALPHA_FB_ADDRESS                        0x080084
-#define VIDEO_ALPHA_FB_ADDRESS_STATUS                 31:31
-#define VIDEO_ALPHA_FB_ADDRESS_STATUS_CURRENT         0
-#define VIDEO_ALPHA_FB_ADDRESS_STATUS_PENDING         1
-#define VIDEO_ALPHA_FB_ADDRESS_EXT                    27:27
-#define VIDEO_ALPHA_FB_ADDRESS_EXT_LOCAL              0
-#define VIDEO_ALPHA_FB_ADDRESS_EXT_EXTERNAL           1
-#define VIDEO_ALPHA_FB_ADDRESS_ADDRESS                25:0
+#define VIDEO_ALPHA_FB_ADDRESS_STATUS                 BIT(31)
+#define VIDEO_ALPHA_FB_ADDRESS_EXT                    BIT(27)
+#define VIDEO_ALPHA_FB_ADDRESS_ADDRESS_MASK           0x3ffffff
 
 #define VIDEO_ALPHA_FB_WIDTH                          0x080088
-#define VIDEO_ALPHA_FB_WIDTH_WIDTH                    29:16
-#define VIDEO_ALPHA_FB_WIDTH_OFFSET                   13:0
+#define VIDEO_ALPHA_FB_WIDTH_WIDTH_MASK               (0x3fff << 16)
+#define VIDEO_ALPHA_FB_WIDTH_OFFSET_MASK              0x3fff
 
 #define VIDEO_ALPHA_FB_LAST_ADDRESS                   0x08008C
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT               27:27
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT_LOCAL         0
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT_EXTERNAL      1
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_ADDRESS           25:0
+#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT               BIT(27)
+#define VIDEO_ALPHA_FB_LAST_ADDRESS_ADDRESS_MASK      0x3ffffff
 
 #define VIDEO_ALPHA_PLANE_TL                          0x080090
-#define VIDEO_ALPHA_PLANE_TL_TOP                      26:16
-#define VIDEO_ALPHA_PLANE_TL_LEFT                     10:0
+#define VIDEO_ALPHA_PLANE_TL_TOP_MASK                 (0x7ff << 16)
+#define VIDEO_ALPHA_PLANE_TL_LEFT_MASK                0x7ff
 
 #define VIDEO_ALPHA_PLANE_BR                          0x080094
-#define VIDEO_ALPHA_PLANE_BR_BOTTOM                   26:16
-#define VIDEO_ALPHA_PLANE_BR_RIGHT                    10:0
+#define VIDEO_ALPHA_PLANE_BR_BOTTOM_MASK              (0x7ff << 16)
+#define VIDEO_ALPHA_PLANE_BR_RIGHT_MASK               0x7ff
 
 #define VIDEO_ALPHA_SCALE                             0x080098
-#define VIDEO_ALPHA_SCALE_VERTICAL_MODE               31:31
-#define VIDEO_ALPHA_SCALE_VERTICAL_MODE_EXPAND        0
-#define VIDEO_ALPHA_SCALE_VERTICAL_MODE_SHRINK        1
-#define VIDEO_ALPHA_SCALE_VERTICAL_SCALE              27:16
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE             15:15
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE_EXPAND      0
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE_SHRINK      1
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_SCALE            11:0
+#define VIDEO_ALPHA_SCALE_VERTICAL_MODE               BIT(31)
+#define VIDEO_ALPHA_SCALE_VERTICAL_SCALE_MASK         (0xfff << 16)
+#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE             BIT(15)
+#define VIDEO_ALPHA_SCALE_HORIZONTAL_SCALE_MASK       0xfff
 
 #define VIDEO_ALPHA_INITIAL_SCALE                     0x08009C
-#define VIDEO_ALPHA_INITIAL_SCALE_VERTICAL            27:16
-#define VIDEO_ALPHA_INITIAL_SCALE_HORIZONTAL          11:0
+#define VIDEO_ALPHA_INITIAL_SCALE_VERTICAL_MASK       (0xfff << 16)
+#define VIDEO_ALPHA_INITIAL_SCALE_HORIZONTAL_MASK     0xfff
 
 #define VIDEO_ALPHA_CHROMA_KEY                        0x0800A0
-#define VIDEO_ALPHA_CHROMA_KEY_MASK                   31:16
-#define VIDEO_ALPHA_CHROMA_KEY_VALUE                  15:0
+#define VIDEO_ALPHA_CHROMA_KEY_MASK_MASK              (0xffff << 16)
+#define VIDEO_ALPHA_CHROMA_KEY_VALUE_MASK             0xffff
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_01                   0x0800A4
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_23                   0x0800A8
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_45                   0x0800AC
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_67                   0x0800B0
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_89                   0x0800B4
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_AB                   0x0800B8
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_CD                   0x0800BC
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_BLUE_MASK       0x1f
 
 #define VIDEO_ALPHA_COLOR_LOOKUP_EF                   0x0800C0
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F                 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_RED             31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_GREEN           26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_BLUE            20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E                 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_RED             15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_GREEN           10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_BLUE            4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_MASK            (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_RED_MASK        (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_GREEN_MASK      (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_BLUE_MASK       (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_MASK            0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_RED_MASK        (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_GREEN_MASK      (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_BLUE_MASK       0x1f
 
 /* Panel Cursor Control */
 
 #define PANEL_HWC_ADDRESS                             0x0800F0
-#define PANEL_HWC_ADDRESS_ENABLE                      31:31
-#define PANEL_HWC_ADDRESS_ENABLE_DISABLE              0
-#define PANEL_HWC_ADDRESS_ENABLE_ENABLE               1
-#define PANEL_HWC_ADDRESS_EXT                         27:27
-#define PANEL_HWC_ADDRESS_EXT_LOCAL                   0
-#define PANEL_HWC_ADDRESS_EXT_EXTERNAL                1
-#define PANEL_HWC_ADDRESS_ADDRESS                     25:0
+#define PANEL_HWC_ADDRESS_ENABLE                      BIT(31)
+#define PANEL_HWC_ADDRESS_EXT                         BIT(27)
+#define PANEL_HWC_ADDRESS_ADDRESS_MASK                0x3ffffff
 
 #define PANEL_HWC_LOCATION                            0x0800F4
-#define PANEL_HWC_LOCATION_TOP                        27:27
-#define PANEL_HWC_LOCATION_TOP_INSIDE                 0
-#define PANEL_HWC_LOCATION_TOP_OUTSIDE                1
-#define PANEL_HWC_LOCATION_Y                          26:16
-#define PANEL_HWC_LOCATION_LEFT                       11:11
-#define PANEL_HWC_LOCATION_LEFT_INSIDE                0
-#define PANEL_HWC_LOCATION_LEFT_OUTSIDE               1
-#define PANEL_HWC_LOCATION_X                          10:0
+#define PANEL_HWC_LOCATION_TOP                        BIT(27)
+#define PANEL_HWC_LOCATION_Y_MASK                     (0x7ff << 16)
+#define PANEL_HWC_LOCATION_LEFT                       BIT(11)
+#define PANEL_HWC_LOCATION_X_MASK                     0x7ff
 
 #define PANEL_HWC_COLOR_12                            0x0800F8
-#define PANEL_HWC_COLOR_12_2_RGB565                   31:16
-#define PANEL_HWC_COLOR_12_1_RGB565                   15:0
+#define PANEL_HWC_COLOR_12_2_RGB565_MASK              (0xffff << 16)
+#define PANEL_HWC_COLOR_12_1_RGB565_MASK              0xffff
 
 #define PANEL_HWC_COLOR_3                             0x0800FC
-#define PANEL_HWC_COLOR_3_RGB565                      15:0
+#define PANEL_HWC_COLOR_3_RGB565_MASK                 0xffff
 
 /* Old Definitions +++ */
 #define PANEL_HWC_COLOR_01                            0x0800F8
-#define PANEL_HWC_COLOR_01_1_RED                      31:27
-#define PANEL_HWC_COLOR_01_1_GREEN                    26:21
-#define PANEL_HWC_COLOR_01_1_BLUE                     20:16
-#define PANEL_HWC_COLOR_01_0_RED                      15:11
-#define PANEL_HWC_COLOR_01_0_GREEN                    10:5
-#define PANEL_HWC_COLOR_01_0_BLUE                     4:0
+#define PANEL_HWC_COLOR_01_1_RED_MASK                 (0x1f << 27)
+#define PANEL_HWC_COLOR_01_1_GREEN_MASK               (0x3f << 21)
+#define PANEL_HWC_COLOR_01_1_BLUE_MASK                (0x1f << 16)
+#define PANEL_HWC_COLOR_01_0_RED_MASK                 (0x1f << 11)
+#define PANEL_HWC_COLOR_01_0_GREEN_MASK               (0x3f << 5)
+#define PANEL_HWC_COLOR_01_0_BLUE_MASK                0x1f
 
 #define PANEL_HWC_COLOR_2                             0x0800FC
-#define PANEL_HWC_COLOR_2_RED                         15:11
-#define PANEL_HWC_COLOR_2_GREEN                       10:5
-#define PANEL_HWC_COLOR_2_BLUE                        4:0
+#define PANEL_HWC_COLOR_2_RED_MASK                    (0x1f << 11)
+#define PANEL_HWC_COLOR_2_GREEN_MASK                  (0x3f << 5)
+#define PANEL_HWC_COLOR_2_BLUE_MASK                   0x1f
 /* Old Definitions --- */
 
 /* Alpha Control */
 
 #define ALPHA_DISPLAY_CTRL                            0x080100
-#define ALPHA_DISPLAY_CTRL_SELECT                     28:28
-#define ALPHA_DISPLAY_CTRL_SELECT_PER_PIXEL           0
-#define ALPHA_DISPLAY_CTRL_SELECT_ALPHA               1
-#define ALPHA_DISPLAY_CTRL_ALPHA                      27:24
-#define ALPHA_DISPLAY_CTRL_FIFO                       17:16
-#define ALPHA_DISPLAY_CTRL_FIFO_1                     0
-#define ALPHA_DISPLAY_CTRL_FIFO_3                     1
-#define ALPHA_DISPLAY_CTRL_FIFO_7                     2
-#define ALPHA_DISPLAY_CTRL_FIFO_11                    3
-#define ALPHA_DISPLAY_CTRL_PIXEL                      7:4
-#define ALPHA_DISPLAY_CTRL_CHROMA_KEY                 3:3
-#define ALPHA_DISPLAY_CTRL_CHROMA_KEY_DISABLE         0
-#define ALPHA_DISPLAY_CTRL_CHROMA_KEY_ENABLE          1
-#define ALPHA_DISPLAY_CTRL_PLANE                      2:2
-#define ALPHA_DISPLAY_CTRL_PLANE_DISABLE              0
-#define ALPHA_DISPLAY_CTRL_PLANE_ENABLE               1
-#define ALPHA_DISPLAY_CTRL_FORMAT                     1:0
-#define ALPHA_DISPLAY_CTRL_FORMAT_16                  1
-#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4           2
-#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4       3
+#define ALPHA_DISPLAY_CTRL_SELECT                     BIT(28)
+#define ALPHA_DISPLAY_CTRL_ALPHA_MASK                 (0xf << 24)
+#define ALPHA_DISPLAY_CTRL_FIFO_MASK                  (0x3 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_1                     (0x0 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_3                     (0x1 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_7                     (0x2 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_11                    (0x3 << 16)
+#define ALPHA_DISPLAY_CTRL_PIXEL_MASK                 (0xf << 4)
+#define ALPHA_DISPLAY_CTRL_CHROMA_KEY                 BIT(3)
+#define ALPHA_DISPLAY_CTRL_FORMAT_MASK                0x3
+#define ALPHA_DISPLAY_CTRL_FORMAT_16                  0x1
+#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4           0x2
+#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4       0x3
 
 #define ALPHA_FB_ADDRESS                              0x080104
-#define ALPHA_FB_ADDRESS_STATUS                       31:31
-#define ALPHA_FB_ADDRESS_STATUS_CURRENT               0
-#define ALPHA_FB_ADDRESS_STATUS_PENDING               1
-#define ALPHA_FB_ADDRESS_EXT                          27:27
-#define ALPHA_FB_ADDRESS_EXT_LOCAL                    0
-#define ALPHA_FB_ADDRESS_EXT_EXTERNAL                 1
-#define ALPHA_FB_ADDRESS_ADDRESS                      25:0
+#define ALPHA_FB_ADDRESS_STATUS                       BIT(31)
+#define ALPHA_FB_ADDRESS_EXT                          BIT(27)
+#define ALPHA_FB_ADDRESS_ADDRESS_MASK                 0x3ffffff
 
 #define ALPHA_FB_WIDTH                                0x080108
-#define ALPHA_FB_WIDTH_WIDTH                          29:16
-#define ALPHA_FB_WIDTH_OFFSET                         13:0
+#define ALPHA_FB_WIDTH_WIDTH_MASK                     (0x3fff << 16)
+#define ALPHA_FB_WIDTH_OFFSET_MASK                    0x3fff
 
 #define ALPHA_PLANE_TL                                0x08010C
-#define ALPHA_PLANE_TL_TOP                            26:16
-#define ALPHA_PLANE_TL_LEFT                           10:0
+#define ALPHA_PLANE_TL_TOP_MASK                       (0x7ff << 16)
+#define ALPHA_PLANE_TL_LEFT_MASK                      0x7ff
 
 #define ALPHA_PLANE_BR                                0x080110
-#define ALPHA_PLANE_BR_BOTTOM                         26:16
-#define ALPHA_PLANE_BR_RIGHT                          10:0
+#define ALPHA_PLANE_BR_BOTTOM_MASK                    (0x7ff << 16)
+#define ALPHA_PLANE_BR_RIGHT_MASK                     0x7ff
 
 #define ALPHA_CHROMA_KEY                              0x080114
-#define ALPHA_CHROMA_KEY_MASK                         31:16
-#define ALPHA_CHROMA_KEY_VALUE                        15:0
+#define ALPHA_CHROMA_KEY_MASK_MASK                    (0xffff << 16)
+#define ALPHA_CHROMA_KEY_VALUE_MASK                   0xffff
 
 #define ALPHA_COLOR_LOOKUP_01                         0x080118
-#define ALPHA_COLOR_LOOKUP_01_1                       31:16
-#define ALPHA_COLOR_LOOKUP_01_1_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_01_1_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_01_1_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_01_0                       15:0
-#define ALPHA_COLOR_LOOKUP_01_0_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_01_0_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_01_0_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_01_1_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_01_1_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_01_1_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_01_1_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_01_0_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_01_0_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_01_0_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_01_0_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_23                         0x08011C
-#define ALPHA_COLOR_LOOKUP_23_3                       31:16
-#define ALPHA_COLOR_LOOKUP_23_3_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_23_3_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_23_3_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_23_2                       15:0
-#define ALPHA_COLOR_LOOKUP_23_2_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_23_2_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_23_2_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_23_3_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_23_3_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_23_3_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_23_3_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_23_2_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_23_2_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_23_2_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_23_2_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_45                         0x080120
-#define ALPHA_COLOR_LOOKUP_45_5                       31:16
-#define ALPHA_COLOR_LOOKUP_45_5_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_45_5_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_45_5_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_45_4                       15:0
-#define ALPHA_COLOR_LOOKUP_45_4_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_45_4_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_45_4_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_45_5_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_45_5_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_45_5_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_45_5_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_45_4_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_45_4_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_45_4_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_45_4_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_67                         0x080124
-#define ALPHA_COLOR_LOOKUP_67_7                       31:16
-#define ALPHA_COLOR_LOOKUP_67_7_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_67_7_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_67_7_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_67_6                       15:0
-#define ALPHA_COLOR_LOOKUP_67_6_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_67_6_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_67_6_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_67_7_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_67_7_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_67_7_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_67_7_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_67_6_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_67_6_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_67_6_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_67_6_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_89                         0x080128
-#define ALPHA_COLOR_LOOKUP_89_9                       31:16
-#define ALPHA_COLOR_LOOKUP_89_9_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_89_9_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_89_9_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_89_8                       15:0
-#define ALPHA_COLOR_LOOKUP_89_8_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_89_8_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_89_8_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_89_9_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_89_9_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_89_9_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_89_9_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_89_8_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_89_8_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_89_8_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_89_8_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_AB                         0x08012C
-#define ALPHA_COLOR_LOOKUP_AB_B                       31:16
-#define ALPHA_COLOR_LOOKUP_AB_B_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_AB_B_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_AB_B_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_AB_A                       15:0
-#define ALPHA_COLOR_LOOKUP_AB_A_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_AB_A_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_AB_A_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_AB_B_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_AB_B_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_AB_B_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_AB_B_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_AB_A_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_AB_A_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_AB_A_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_AB_A_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_CD                         0x080130
-#define ALPHA_COLOR_LOOKUP_CD_D                       31:16
-#define ALPHA_COLOR_LOOKUP_CD_D_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_CD_D_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_CD_D_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_CD_C                       15:0
-#define ALPHA_COLOR_LOOKUP_CD_C_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_CD_C_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_CD_C_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_CD_D_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_CD_D_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_CD_D_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_CD_D_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_CD_C_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_CD_C_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_CD_C_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_CD_C_BLUE_MASK             0x1f
 
 #define ALPHA_COLOR_LOOKUP_EF                         0x080134
-#define ALPHA_COLOR_LOOKUP_EF_F                       31:16
-#define ALPHA_COLOR_LOOKUP_EF_F_RED                   31:27
-#define ALPHA_COLOR_LOOKUP_EF_F_GREEN                 26:21
-#define ALPHA_COLOR_LOOKUP_EF_F_BLUE                  20:16
-#define ALPHA_COLOR_LOOKUP_EF_E                       15:0
-#define ALPHA_COLOR_LOOKUP_EF_E_RED                   15:11
-#define ALPHA_COLOR_LOOKUP_EF_E_GREEN                 10:5
-#define ALPHA_COLOR_LOOKUP_EF_E_BLUE                  4:0
+#define ALPHA_COLOR_LOOKUP_EF_F_MASK                  (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_EF_F_RED_MASK              (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_EF_F_GREEN_MASK            (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_EF_F_BLUE_MASK             (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_EF_E_MASK                  0xffff
+#define ALPHA_COLOR_LOOKUP_EF_E_RED_MASK              (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_EF_E_GREEN_MASK            (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_EF_E_BLUE_MASK             0x1f
 
 /* CRT Graphics Control */
 
 #define CRT_DISPLAY_CTRL                              0x080200
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK	      31:27
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE      0
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE       0x1F
+#define CRT_DISPLAY_CTRL_RESERVED_MASK                0xfb008200
 
 /* SM750LE definition */
-#define CRT_DISPLAY_CTRL_DPMS                         31:30
-#define CRT_DISPLAY_CTRL_DPMS_0                       0
-#define CRT_DISPLAY_CTRL_DPMS_1                       1
-#define CRT_DISPLAY_CTRL_DPMS_2                       2
-#define CRT_DISPLAY_CTRL_DPMS_3                       3
-#define CRT_DISPLAY_CTRL_CLK                          29:27
-#define CRT_DISPLAY_CTRL_CLK_PLL25                    0
-#define CRT_DISPLAY_CTRL_CLK_PLL41                    1
-#define CRT_DISPLAY_CTRL_CLK_PLL62                    2
-#define CRT_DISPLAY_CTRL_CLK_PLL65                    3
-#define CRT_DISPLAY_CTRL_CLK_PLL74                    4
-#define CRT_DISPLAY_CTRL_CLK_PLL80                    5
-#define CRT_DISPLAY_CTRL_CLK_PLL108                   6
-#define CRT_DISPLAY_CTRL_CLK_RESERVED                 7
-#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC                26:26
-#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_DISABLE        1
-#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE         0
-
-
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK	      25:24
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE	      3
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE      0
+#define CRT_DISPLAY_CTRL_DPMS_SHIFT                   30
+#define CRT_DISPLAY_CTRL_DPMS_MASK                    (0x3 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_0                       (0x0 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_1                       (0x1 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_2                       (0x2 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_3                       (0x3 << 30)
+#define CRT_DISPLAY_CTRL_CLK_MASK                     (0x7 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL25                    (0x0 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL41                    (0x1 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL62                    (0x2 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL65                    (0x3 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL74                    (0x4 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL80                    (0x5 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL108                   (0x6 << 27)
+#define CRT_DISPLAY_CTRL_CLK_RESERVED                 (0x7 << 27)
+#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC                BIT(26)
 
 /* SM750LE definition */
-#define CRT_DISPLAY_CTRL_CRTSELECT                    25:25
-#define CRT_DISPLAY_CTRL_CRTSELECT_VGA                0
-#define CRT_DISPLAY_CTRL_CRTSELECT_CRT                1
-#define CRT_DISPLAY_CTRL_RGBBIT                       24:24
-#define CRT_DISPLAY_CTRL_RGBBIT_24BIT                 0
-#define CRT_DISPLAY_CTRL_RGBBIT_12BIT                 1
-
-
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK	      15:15
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE      0
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE       1
-
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK	      9:9
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_DISABLE      0
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_ENABLE       1
+#define CRT_DISPLAY_CTRL_CRTSELECT                    BIT(25)
+#define CRT_DISPLAY_CTRL_RGBBIT                       BIT(24)
 
 #ifndef VALIDATION_CHIP
-    #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC            26:26
-    #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_DISABLE    1
-    #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE     0
-    #define CRT_DISPLAY_CTRL_CENTERING                24:24
-    #define CRT_DISPLAY_CTRL_CENTERING_DISABLE        0
-    #define CRT_DISPLAY_CTRL_CENTERING_ENABLE         1
+    #define CRT_DISPLAY_CTRL_CENTERING                BIT(24)
 #endif
-#define CRT_DISPLAY_CTRL_LOCK_TIMING                  23:23
-#define CRT_DISPLAY_CTRL_LOCK_TIMING_DISABLE          0
-#define CRT_DISPLAY_CTRL_LOCK_TIMING_ENABLE           1
-#define CRT_DISPLAY_CTRL_EXPANSION                    22:22
-#define CRT_DISPLAY_CTRL_EXPANSION_DISABLE            0
-#define CRT_DISPLAY_CTRL_EXPANSION_ENABLE             1
-#define CRT_DISPLAY_CTRL_VERTICAL_MODE                21:21
-#define CRT_DISPLAY_CTRL_VERTICAL_MODE_REPLICATE      0
-#define CRT_DISPLAY_CTRL_VERTICAL_MODE_INTERPOLATE    1
-#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE              20:20
-#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE_REPLICATE    0
-#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE_INTERPOLATE  1
-#define CRT_DISPLAY_CTRL_SELECT                       19:18
-#define CRT_DISPLAY_CTRL_SELECT_PANEL                 0
-#define CRT_DISPLAY_CTRL_SELECT_VGA                   1
-#define CRT_DISPLAY_CTRL_SELECT_CRT                   2
-#define CRT_DISPLAY_CTRL_FIFO                         17:16
-#define CRT_DISPLAY_CTRL_FIFO_1                       0
-#define CRT_DISPLAY_CTRL_FIFO_3                       1
-#define CRT_DISPLAY_CTRL_FIFO_7                       2
-#define CRT_DISPLAY_CTRL_FIFO_11                      3
-#define CRT_DISPLAY_CTRL_CLOCK_PHASE                  14:14
-#define CRT_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_HIGH      0
-#define CRT_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_LOW       1
-#define CRT_DISPLAY_CTRL_VSYNC_PHASE                  13:13
-#define CRT_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_HIGH      0
-#define CRT_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_LOW       1
-#define CRT_DISPLAY_CTRL_HSYNC_PHASE                  12:12
-#define CRT_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_HIGH      0
-#define CRT_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_LOW       1
-#define CRT_DISPLAY_CTRL_BLANK                        10:10
-#define CRT_DISPLAY_CTRL_BLANK_OFF                    0
-#define CRT_DISPLAY_CTRL_BLANK_ON                     1
-#define CRT_DISPLAY_CTRL_TIMING                       8:8
-#define CRT_DISPLAY_CTRL_TIMING_DISABLE               0
-#define CRT_DISPLAY_CTRL_TIMING_ENABLE                1
-#define CRT_DISPLAY_CTRL_PIXEL                        7:4
-#define CRT_DISPLAY_CTRL_GAMMA                        3:3
-#define CRT_DISPLAY_CTRL_GAMMA_DISABLE                0
-#define CRT_DISPLAY_CTRL_GAMMA_ENABLE                 1
-#define CRT_DISPLAY_CTRL_PLANE                        2:2
-#define CRT_DISPLAY_CTRL_PLANE_DISABLE                0
-#define CRT_DISPLAY_CTRL_PLANE_ENABLE                 1
-#define CRT_DISPLAY_CTRL_FORMAT                       1:0
-#define CRT_DISPLAY_CTRL_FORMAT_8                     0
-#define CRT_DISPLAY_CTRL_FORMAT_16                    1
-#define CRT_DISPLAY_CTRL_FORMAT_32                    2
-#define CRT_DISPLAY_CTRL_RESERVED_BITS_MASK           0xFF000200
+#define CRT_DISPLAY_CTRL_LOCK_TIMING                  BIT(23)
+#define CRT_DISPLAY_CTRL_EXPANSION                    BIT(22)
+#define CRT_DISPLAY_CTRL_VERTICAL_MODE                BIT(21)
+#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE              BIT(20)
+#define CRT_DISPLAY_CTRL_SELECT_SHIFT                 18
+#define CRT_DISPLAY_CTRL_SELECT_MASK                  (0x3 << 18)
+#define CRT_DISPLAY_CTRL_SELECT_PANEL                 (0x0 << 18)
+#define CRT_DISPLAY_CTRL_SELECT_VGA                   (0x1 << 18)
+#define CRT_DISPLAY_CTRL_SELECT_CRT                   (0x2 << 18)
+#define CRT_DISPLAY_CTRL_FIFO_MASK                    (0x3 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_1                       (0x0 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_3                       (0x1 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_7                       (0x2 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_11                      (0x3 << 16)
+#define CRT_DISPLAY_CTRL_BLANK                        BIT(10)
+#define CRT_DISPLAY_CTRL_PIXEL_MASK                   (0xf << 4)
+#define CRT_DISPLAY_CTRL_FORMAT_MASK                  (0x3 << 0)
+#define CRT_DISPLAY_CTRL_FORMAT_8                     (0x0 << 0)
+#define CRT_DISPLAY_CTRL_FORMAT_16                    (0x1 << 0)
+#define CRT_DISPLAY_CTRL_FORMAT_32                    (0x2 << 0)
 
 #define CRT_FB_ADDRESS                                0x080204
-#define CRT_FB_ADDRESS_STATUS                         31:31
-#define CRT_FB_ADDRESS_STATUS_CURRENT                 0
-#define CRT_FB_ADDRESS_STATUS_PENDING                 1
-#define CRT_FB_ADDRESS_EXT                            27:27
-#define CRT_FB_ADDRESS_EXT_LOCAL                      0
-#define CRT_FB_ADDRESS_EXT_EXTERNAL                   1
-#define CRT_FB_ADDRESS_ADDRESS                        25:0
+#define CRT_FB_ADDRESS_STATUS                         BIT(31)
+#define CRT_FB_ADDRESS_EXT                            BIT(27)
+#define CRT_FB_ADDRESS_ADDRESS_MASK                   0x3ffffff
 
 #define CRT_FB_WIDTH                                  0x080208
-#define CRT_FB_WIDTH_WIDTH                            29:16
-#define CRT_FB_WIDTH_OFFSET                           13:0
+#define CRT_FB_WIDTH_WIDTH_SHIFT                      16
+#define CRT_FB_WIDTH_WIDTH_MASK                       (0x3fff << 16)
+#define CRT_FB_WIDTH_OFFSET_MASK                      0x3fff
 
 #define CRT_HORIZONTAL_TOTAL                          0x08020C
-#define CRT_HORIZONTAL_TOTAL_TOTAL                    27:16
-#define CRT_HORIZONTAL_TOTAL_DISPLAY_END              11:0
+#define CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT              16
+#define CRT_HORIZONTAL_TOTAL_TOTAL_MASK               (0xfff << 16)
+#define CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK         0xfff
 
 #define CRT_HORIZONTAL_SYNC                           0x080210
-#define CRT_HORIZONTAL_SYNC_WIDTH                     23:16
-#define CRT_HORIZONTAL_SYNC_START                     11:0
+#define CRT_HORIZONTAL_SYNC_WIDTH_SHIFT               16
+#define CRT_HORIZONTAL_SYNC_WIDTH_MASK                (0xff << 16)
+#define CRT_HORIZONTAL_SYNC_START_MASK                0xfff
 
 #define CRT_VERTICAL_TOTAL                            0x080214
-#define CRT_VERTICAL_TOTAL_TOTAL                      26:16
-#define CRT_VERTICAL_TOTAL_DISPLAY_END                10:0
+#define CRT_VERTICAL_TOTAL_TOTAL_SHIFT                16
+#define CRT_VERTICAL_TOTAL_TOTAL_MASK                 (0x7ff << 16)
+#define CRT_VERTICAL_TOTAL_DISPLAY_END_MASK           (0x7ff)
 
 #define CRT_VERTICAL_SYNC                             0x080218
-#define CRT_VERTICAL_SYNC_HEIGHT                      21:16
-#define CRT_VERTICAL_SYNC_START                       10:0
+#define CRT_VERTICAL_SYNC_HEIGHT_SHIFT                16
+#define CRT_VERTICAL_SYNC_HEIGHT_MASK                 (0x3f << 16)
+#define CRT_VERTICAL_SYNC_START_MASK                  0x7ff
 
 #define CRT_SIGNATURE_ANALYZER                        0x08021C
-#define CRT_SIGNATURE_ANALYZER_STATUS                 31:16
-#define CRT_SIGNATURE_ANALYZER_ENABLE                 3:3
-#define CRT_SIGNATURE_ANALYZER_ENABLE_DISABLE         0
-#define CRT_SIGNATURE_ANALYZER_ENABLE_ENABLE          1
-#define CRT_SIGNATURE_ANALYZER_RESET                  2:2
-#define CRT_SIGNATURE_ANALYZER_RESET_NORMAL           0
-#define CRT_SIGNATURE_ANALYZER_RESET_RESET            1
-#define CRT_SIGNATURE_ANALYZER_SOURCE                 1:0
+#define CRT_SIGNATURE_ANALYZER_STATUS_MASK            (0xffff << 16)
+#define CRT_SIGNATURE_ANALYZER_ENABLE                 BIT(3)
+#define CRT_SIGNATURE_ANALYZER_RESET                  BIT(2)
+#define CRT_SIGNATURE_ANALYZER_SOURCE_MASK            0x3
 #define CRT_SIGNATURE_ANALYZER_SOURCE_RED             0
 #define CRT_SIGNATURE_ANALYZER_SOURCE_GREEN           1
 #define CRT_SIGNATURE_ANALYZER_SOURCE_BLUE            2
 
 #define CRT_CURRENT_LINE                              0x080220
-#define CRT_CURRENT_LINE_LINE                         10:0
+#define CRT_CURRENT_LINE_LINE_MASK                    0x7ff
 
 #define CRT_MONITOR_DETECT                            0x080224
-#define CRT_MONITOR_DETECT_VALUE                      25:25
-#define CRT_MONITOR_DETECT_VALUE_DISABLE              0
-#define CRT_MONITOR_DETECT_VALUE_ENABLE               1
-#define CRT_MONITOR_DETECT_ENABLE                     24:24
-#define CRT_MONITOR_DETECT_ENABLE_DISABLE             0
-#define CRT_MONITOR_DETECT_ENABLE_ENABLE              1
-#define CRT_MONITOR_DETECT_RED                        23:16
-#define CRT_MONITOR_DETECT_GREEN                      15:8
-#define CRT_MONITOR_DETECT_BLUE                       7:0
+#define CRT_MONITOR_DETECT_VALUE                      BIT(25)
+#define CRT_MONITOR_DETECT_ENABLE                     BIT(24)
+#define CRT_MONITOR_DETECT_RED_MASK                   (0xff << 16)
+#define CRT_MONITOR_DETECT_GREEN_MASK                 (0xff << 8)
+#define CRT_MONITOR_DETECT_BLUE_MASK                  0xff
 
 #define CRT_SCALE                                     0x080228
-#define CRT_SCALE_VERTICAL_MODE                       31:31
-#define CRT_SCALE_VERTICAL_MODE_EXPAND                0
-#define CRT_SCALE_VERTICAL_MODE_SHRINK                1
-#define CRT_SCALE_VERTICAL_SCALE                      27:16
-#define CRT_SCALE_HORIZONTAL_MODE                     15:15
-#define CRT_SCALE_HORIZONTAL_MODE_EXPAND              0
-#define CRT_SCALE_HORIZONTAL_MODE_SHRINK              1
-#define CRT_SCALE_HORIZONTAL_SCALE                    11:0
+#define CRT_SCALE_VERTICAL_MODE                       BIT(31)
+#define CRT_SCALE_VERTICAL_SCALE_MASK                 (0xfff << 16)
+#define CRT_SCALE_HORIZONTAL_MODE                     BIT(15)
+#define CRT_SCALE_HORIZONTAL_SCALE_MASK               0xfff
 
 /* CRT Cursor Control */
 
 #define CRT_HWC_ADDRESS                               0x080230
-#define CRT_HWC_ADDRESS_ENABLE                        31:31
-#define CRT_HWC_ADDRESS_ENABLE_DISABLE                0
-#define CRT_HWC_ADDRESS_ENABLE_ENABLE                 1
-#define CRT_HWC_ADDRESS_EXT                           27:27
-#define CRT_HWC_ADDRESS_EXT_LOCAL                     0
-#define CRT_HWC_ADDRESS_EXT_EXTERNAL                  1
-#define CRT_HWC_ADDRESS_ADDRESS                       25:0
+#define CRT_HWC_ADDRESS_ENABLE                        BIT(31)
+#define CRT_HWC_ADDRESS_EXT                           BIT(27)
+#define CRT_HWC_ADDRESS_ADDRESS_MASK                  0x3ffffff
 
 #define CRT_HWC_LOCATION                              0x080234
-#define CRT_HWC_LOCATION_TOP                          27:27
-#define CRT_HWC_LOCATION_TOP_INSIDE                   0
-#define CRT_HWC_LOCATION_TOP_OUTSIDE                  1
-#define CRT_HWC_LOCATION_Y                            26:16
-#define CRT_HWC_LOCATION_LEFT                         11:11
-#define CRT_HWC_LOCATION_LEFT_INSIDE                  0
-#define CRT_HWC_LOCATION_LEFT_OUTSIDE                 1
-#define CRT_HWC_LOCATION_X                            10:0
+#define CRT_HWC_LOCATION_TOP                          BIT(27)
+#define CRT_HWC_LOCATION_Y_MASK                       (0x7ff << 16)
+#define CRT_HWC_LOCATION_LEFT                         BIT(11)
+#define CRT_HWC_LOCATION_X_MASK                       0x7ff
 
 #define CRT_HWC_COLOR_12                              0x080238
-#define CRT_HWC_COLOR_12_2_RGB565                     31:16
-#define CRT_HWC_COLOR_12_1_RGB565                     15:0
+#define CRT_HWC_COLOR_12_2_RGB565_MASK                (0xffff << 16)
+#define CRT_HWC_COLOR_12_1_RGB565_MASK                0xffff
 
 #define CRT_HWC_COLOR_3                               0x08023C
-#define CRT_HWC_COLOR_3_RGB565                        15:0
+#define CRT_HWC_COLOR_3_RGB565_MASK                   0xffff
 
 /* This vertical expansion below start at 0x080240 ~ 0x080264 */
 #define CRT_VERTICAL_EXPANSION                        0x080240
 #ifndef VALIDATION_CHIP
-    #define CRT_VERTICAL_CENTERING_VALUE              31:24
+    #define CRT_VERTICAL_CENTERING_VALUE_MASK         (0xff << 24)
 #endif
-#define CRT_VERTICAL_EXPANSION_COMPARE_VALUE          23:16
-#define CRT_VERTICAL_EXPANSION_LINE_BUFFER            15:12
-#define CRT_VERTICAL_EXPANSION_SCALE_FACTOR           11:0
+#define CRT_VERTICAL_EXPANSION_COMPARE_VALUE_MASK     (0xff << 16)
+#define CRT_VERTICAL_EXPANSION_LINE_BUFFER_MASK       (0xf << 12)
+#define CRT_VERTICAL_EXPANSION_SCALE_FACTOR_MASK      0xfff
 
 /* This horizontal expansion below start at 0x080268 ~ 0x08027C */
 #define CRT_HORIZONTAL_EXPANSION                      0x080268
 #ifndef VALIDATION_CHIP
-    #define CRT_HORIZONTAL_CENTERING_VALUE            31:24
+    #define CRT_HORIZONTAL_CENTERING_VALUE_MASK       (0xff << 24)
 #endif
-#define CRT_HORIZONTAL_EXPANSION_COMPARE_VALUE        23:16
-#define CRT_HORIZONTAL_EXPANSION_SCALE_FACTOR         11:0
+#define CRT_HORIZONTAL_EXPANSION_COMPARE_VALUE_MASK   (0xff << 16)
+#define CRT_HORIZONTAL_EXPANSION_SCALE_FACTOR_MASK    0xfff
 
 #ifndef VALIDATION_CHIP
     /* Auto Centering */
     #define CRT_AUTO_CENTERING_TL                     0x080280
-    #define CRT_AUTO_CENTERING_TL_TOP                 26:16
-    #define CRT_AUTO_CENTERING_TL_LEFT                10:0
+    #define CRT_AUTO_CENTERING_TL_TOP_MASK            (0x7ff << 16)
+    #define CRT_AUTO_CENTERING_TL_LEFT_MASK           0x7ff
 
     #define CRT_AUTO_CENTERING_BR                     0x080284
-    #define CRT_AUTO_CENTERING_BR_BOTTOM              26:16
-    #define CRT_AUTO_CENTERING_BR_RIGHT               10:0
+    #define CRT_AUTO_CENTERING_BR_BOTTOM_MASK         (0x7ff << 16)
+    #define CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT        16
+    #define CRT_AUTO_CENTERING_BR_RIGHT_MASK          0x7ff
 #endif
 
 /* sm750le new register to control panel output */
@@ -1877,155 +1161,106 @@
 /* Color Space Conversion registers. */
 
 #define CSC_Y_SOURCE_BASE                               0x1000C8
-#define CSC_Y_SOURCE_BASE_EXT                           27:27
-#define CSC_Y_SOURCE_BASE_EXT_LOCAL                     0
-#define CSC_Y_SOURCE_BASE_EXT_EXTERNAL                  1
-#define CSC_Y_SOURCE_BASE_CS                            26:26
-#define CSC_Y_SOURCE_BASE_CS_0                          0
-#define CSC_Y_SOURCE_BASE_CS_1                          1
-#define CSC_Y_SOURCE_BASE_ADDRESS                       25:0
+#define CSC_Y_SOURCE_BASE_EXT                           BIT(27)
+#define CSC_Y_SOURCE_BASE_CS                            BIT(26)
+#define CSC_Y_SOURCE_BASE_ADDRESS_MASK                  0x3ffffff
 
 #define CSC_CONSTANTS                                   0x1000CC
-#define CSC_CONSTANTS_Y                                 31:24
-#define CSC_CONSTANTS_R                                 23:16
-#define CSC_CONSTANTS_G                                 15:8
-#define CSC_CONSTANTS_B                                 7:0
+#define CSC_CONSTANTS_Y_MASK                            (0xff << 24)
+#define CSC_CONSTANTS_R_MASK                            (0xff << 16)
+#define CSC_CONSTANTS_G_MASK                            (0xff << 8)
+#define CSC_CONSTANTS_B_MASK                            0xff
 
 #define CSC_Y_SOURCE_X                                  0x1000D0
-#define CSC_Y_SOURCE_X_INTEGER                          26:16
-#define CSC_Y_SOURCE_X_FRACTION                         15:3
+#define CSC_Y_SOURCE_X_INTEGER_MASK                     (0x7ff << 16)
+#define CSC_Y_SOURCE_X_FRACTION_MASK                    (0x1fff << 3)
 
 #define CSC_Y_SOURCE_Y                                  0x1000D4
-#define CSC_Y_SOURCE_Y_INTEGER                          27:16
-#define CSC_Y_SOURCE_Y_FRACTION                         15:3
+#define CSC_Y_SOURCE_Y_INTEGER_MASK                     (0xfff << 16)
+#define CSC_Y_SOURCE_Y_FRACTION_MASK                    (0x1fff << 3)
 
 #define CSC_U_SOURCE_BASE                               0x1000D8
-#define CSC_U_SOURCE_BASE_EXT                           27:27
-#define CSC_U_SOURCE_BASE_EXT_LOCAL                     0
-#define CSC_U_SOURCE_BASE_EXT_EXTERNAL                  1
-#define CSC_U_SOURCE_BASE_CS                            26:26
-#define CSC_U_SOURCE_BASE_CS_0                          0
-#define CSC_U_SOURCE_BASE_CS_1                          1
-#define CSC_U_SOURCE_BASE_ADDRESS                       25:0
+#define CSC_U_SOURCE_BASE_EXT                           BIT(27)
+#define CSC_U_SOURCE_BASE_CS                            BIT(26)
+#define CSC_U_SOURCE_BASE_ADDRESS_MASK                  0x3ffffff
 
 #define CSC_V_SOURCE_BASE                               0x1000DC
-#define CSC_V_SOURCE_BASE_EXT                           27:27
-#define CSC_V_SOURCE_BASE_EXT_LOCAL                     0
-#define CSC_V_SOURCE_BASE_EXT_EXTERNAL                  1
-#define CSC_V_SOURCE_BASE_CS                            26:26
-#define CSC_V_SOURCE_BASE_CS_0                          0
-#define CSC_V_SOURCE_BASE_CS_1                          1
-#define CSC_V_SOURCE_BASE_ADDRESS                       25:0
+#define CSC_V_SOURCE_BASE_EXT                           BIT(27)
+#define CSC_V_SOURCE_BASE_CS                            BIT(26)
+#define CSC_V_SOURCE_BASE_ADDRESS_MASK                  0x3ffffff
 
 #define CSC_SOURCE_DIMENSION                            0x1000E0
-#define CSC_SOURCE_DIMENSION_X                          31:16
-#define CSC_SOURCE_DIMENSION_Y                          15:0
+#define CSC_SOURCE_DIMENSION_X_MASK                     (0xffff << 16)
+#define CSC_SOURCE_DIMENSION_Y_MASK                     0xffff
 
 #define CSC_SOURCE_PITCH                                0x1000E4
-#define CSC_SOURCE_PITCH_Y                              31:16
-#define CSC_SOURCE_PITCH_UV                             15:0
+#define CSC_SOURCE_PITCH_Y_MASK                         (0xffff << 16)
+#define CSC_SOURCE_PITCH_UV_MASK                        0xffff
 
 #define CSC_DESTINATION                                 0x1000E8
-#define CSC_DESTINATION_WRAP                            31:31
-#define CSC_DESTINATION_WRAP_DISABLE                    0
-#define CSC_DESTINATION_WRAP_ENABLE                     1
-#define CSC_DESTINATION_X                               27:16
-#define CSC_DESTINATION_Y                               11:0
+#define CSC_DESTINATION_WRAP                            BIT(31)
+#define CSC_DESTINATION_X_MASK                          (0xfff << 16)
+#define CSC_DESTINATION_Y_MASK                          0xfff
 
 #define CSC_DESTINATION_DIMENSION                       0x1000EC
-#define CSC_DESTINATION_DIMENSION_X                     31:16
-#define CSC_DESTINATION_DIMENSION_Y                     15:0
+#define CSC_DESTINATION_DIMENSION_X_MASK                (0xffff << 16)
+#define CSC_DESTINATION_DIMENSION_Y_MASK                0xffff
 
 #define CSC_DESTINATION_PITCH                           0x1000F0
-#define CSC_DESTINATION_PITCH_X                         31:16
-#define CSC_DESTINATION_PITCH_Y                         15:0
+#define CSC_DESTINATION_PITCH_X_MASK                    (0xffff << 16)
+#define CSC_DESTINATION_PITCH_Y_MASK                    0xffff
 
 #define CSC_SCALE_FACTOR                                0x1000F4
-#define CSC_SCALE_FACTOR_HORIZONTAL                     31:16
-#define CSC_SCALE_FACTOR_VERTICAL                       15:0
+#define CSC_SCALE_FACTOR_HORIZONTAL_MASK                (0xffff << 16)
+#define CSC_SCALE_FACTOR_VERTICAL_MASK                  0xffff
 
 #define CSC_DESTINATION_BASE                            0x1000F8
-#define CSC_DESTINATION_BASE_EXT                        27:27
-#define CSC_DESTINATION_BASE_EXT_LOCAL                  0
-#define CSC_DESTINATION_BASE_EXT_EXTERNAL               1
-#define CSC_DESTINATION_BASE_CS                         26:26
-#define CSC_DESTINATION_BASE_CS_0                       0
-#define CSC_DESTINATION_BASE_CS_1                       1
-#define CSC_DESTINATION_BASE_ADDRESS                    25:0
+#define CSC_DESTINATION_BASE_EXT                        BIT(27)
+#define CSC_DESTINATION_BASE_CS                         BIT(26)
+#define CSC_DESTINATION_BASE_ADDRESS_MASK               0x3ffffff
 
 #define CSC_CONTROL                                     0x1000FC
-#define CSC_CONTROL_STATUS                              31:31
-#define CSC_CONTROL_STATUS_STOP                         0
-#define CSC_CONTROL_STATUS_START                        1
-#define CSC_CONTROL_SOURCE_FORMAT                       30:28
-#define CSC_CONTROL_SOURCE_FORMAT_YUV422                0
-#define CSC_CONTROL_SOURCE_FORMAT_YUV420I               1
-#define CSC_CONTROL_SOURCE_FORMAT_YUV420                2
-#define CSC_CONTROL_SOURCE_FORMAT_YVU9                  3
-#define CSC_CONTROL_SOURCE_FORMAT_IYU1                  4
-#define CSC_CONTROL_SOURCE_FORMAT_IYU2                  5
-#define CSC_CONTROL_SOURCE_FORMAT_RGB565                6
-#define CSC_CONTROL_SOURCE_FORMAT_RGB8888               7
-#define CSC_CONTROL_DESTINATION_FORMAT                  27:26
-#define CSC_CONTROL_DESTINATION_FORMAT_RGB565           0
-#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888          1
-#define CSC_CONTROL_HORIZONTAL_FILTER                   25:25
-#define CSC_CONTROL_HORIZONTAL_FILTER_DISABLE           0
-#define CSC_CONTROL_HORIZONTAL_FILTER_ENABLE            1
-#define CSC_CONTROL_VERTICAL_FILTER                     24:24
-#define CSC_CONTROL_VERTICAL_FILTER_DISABLE             0
-#define CSC_CONTROL_VERTICAL_FILTER_ENABLE              1
-#define CSC_CONTROL_BYTE_ORDER                          23:23
-#define CSC_CONTROL_BYTE_ORDER_YUYV                     0
-#define CSC_CONTROL_BYTE_ORDER_UYVY                     1
+#define CSC_CONTROL_STATUS                              BIT(31)
+#define CSC_CONTROL_SOURCE_FORMAT_MASK                  (0x7 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YUV422                (0x0 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YUV420I               (0x1 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YUV420                (0x2 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YVU9                  (0x3 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_IYU1                  (0x4 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_IYU2                  (0x5 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_RGB565                (0x6 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_RGB8888               (0x7 << 28)
+#define CSC_CONTROL_DESTINATION_FORMAT_MASK             (0x3 << 26)
+#define CSC_CONTROL_DESTINATION_FORMAT_RGB565           (0x0 << 26)
+#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888          (0x1 << 26)
+#define CSC_CONTROL_HORIZONTAL_FILTER                   BIT(25)
+#define CSC_CONTROL_VERTICAL_FILTER                     BIT(24)
+#define CSC_CONTROL_BYTE_ORDER                          BIT(23)
 
 #define DE_DATA_PORT                                    0x110000
 
 #define I2C_BYTE_COUNT                                  0x010040
-#define I2C_BYTE_COUNT_COUNT                            3:0
+#define I2C_BYTE_COUNT_COUNT_MASK                       0xf
 
 #define I2C_CTRL                                        0x010041
-#define I2C_CTRL_INT                                    4:4
-#define I2C_CTRL_INT_DISABLE                            0
-#define I2C_CTRL_INT_ENABLE                             1
-#define I2C_CTRL_DIR                                    3:3
-#define I2C_CTRL_DIR_WR                                 0
-#define I2C_CTRL_DIR_RD                                 1
-#define I2C_CTRL_CTRL                                   2:2
-#define I2C_CTRL_CTRL_STOP                              0
-#define I2C_CTRL_CTRL_START                             1
-#define I2C_CTRL_MODE                                   1:1
-#define I2C_CTRL_MODE_STANDARD                          0
-#define I2C_CTRL_MODE_FAST                              1
-#define I2C_CTRL_EN                                     0:0
-#define I2C_CTRL_EN_DISABLE                             0
-#define I2C_CTRL_EN_ENABLE                              1
+#define I2C_CTRL_INT                                    BIT(4)
+#define I2C_CTRL_DIR                                    BIT(3)
+#define I2C_CTRL_CTRL                                   BIT(2)
+#define I2C_CTRL_MODE                                   BIT(1)
+#define I2C_CTRL_EN                                     BIT(0)
 
 #define I2C_STATUS                                      0x010042
-#define I2C_STATUS_TX                                   3:3
-#define I2C_STATUS_TX_PROGRESS                          0
-#define I2C_STATUS_TX_COMPLETED                         1
-#define I2C_TX_DONE                                     0x08
-#define I2C_STATUS_ERR                                  2:2
-#define I2C_STATUS_ERR_NORMAL                           0
-#define I2C_STATUS_ERR_ERROR                            1
-#define I2C_STATUS_ERR_CLEAR                            0
-#define I2C_STATUS_ACK                                  1:1
-#define I2C_STATUS_ACK_RECEIVED                         0
-#define I2C_STATUS_ACK_NOT                              1
-#define I2C_STATUS_BSY                                  0:0
-#define I2C_STATUS_BSY_IDLE                             0
-#define I2C_STATUS_BSY_BUSY                             1
+#define I2C_STATUS_TX                                   BIT(3)
+#define I2C_STATUS_ERR                                  BIT(2)
+#define I2C_STATUS_ACK                                  BIT(1)
+#define I2C_STATUS_BSY                                  BIT(0)
 
 #define I2C_RESET                                       0x010042
-#define I2C_RESET_BUS_ERROR                             2:2
-#define I2C_RESET_BUS_ERROR_CLEAR                       0
+#define I2C_RESET_BUS_ERROR                             BIT(2)
 
 #define I2C_SLAVE_ADDRESS                               0x010043
-#define I2C_SLAVE_ADDRESS_ADDRESS                       7:1
-#define I2C_SLAVE_ADDRESS_RW                            0:0
-#define I2C_SLAVE_ADDRESS_RW_W                          0
-#define I2C_SLAVE_ADDRESS_RW_R                          1
+#define I2C_SLAVE_ADDRESS_ADDRESS_MASK                  (0x7f << 1)
+#define I2C_SLAVE_ADDRESS_RW                            BIT(0)
 
 #define I2C_DATA0                                       0x010044
 #define I2C_DATA1                                       0x010045
@@ -2046,120 +1281,59 @@
 
 
 #define ZV0_CAPTURE_CTRL                                0x090000
-#define ZV0_CAPTURE_CTRL_FIELD_INPUT                    27:27
-#define ZV0_CAPTURE_CTRL_FIELD_INPUT_EVEN_FIELD         0
-#define ZV0_CAPTURE_CTRL_FIELD_INPUT_ODD_FIELD          1
-#define ZV0_CAPTURE_CTRL_SCAN                           26:26
-#define ZV0_CAPTURE_CTRL_SCAN_PROGRESSIVE               0
-#define ZV0_CAPTURE_CTRL_SCAN_INTERLACE                 1
-#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER                 25:25
-#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER_0               0
-#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER_1               1
-#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC                  24:24
-#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC_INACTIVE         0
-#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC_ACTIVE           1
-#define ZV0_CAPTURE_CTRL_ADJ                            19:19
-#define ZV0_CAPTURE_CTRL_ADJ_NORMAL                     0
-#define ZV0_CAPTURE_CTRL_ADJ_DELAY                      1
-#define ZV0_CAPTURE_CTRL_HA                             18:18
-#define ZV0_CAPTURE_CTRL_HA_DISABLE                     0
-#define ZV0_CAPTURE_CTRL_HA_ENABLE                      1
-#define ZV0_CAPTURE_CTRL_VSK                            17:17
-#define ZV0_CAPTURE_CTRL_VSK_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_VSK_ENABLE                     1
-#define ZV0_CAPTURE_CTRL_HSK                            16:16
-#define ZV0_CAPTURE_CTRL_HSK_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_HSK_ENABLE                     1
-#define ZV0_CAPTURE_CTRL_FD                             15:15
-#define ZV0_CAPTURE_CTRL_FD_RISING                      0
-#define ZV0_CAPTURE_CTRL_FD_FALLING                     1
-#define ZV0_CAPTURE_CTRL_VP                             14:14
-#define ZV0_CAPTURE_CTRL_VP_HIGH                        0
-#define ZV0_CAPTURE_CTRL_VP_LOW                         1
-#define ZV0_CAPTURE_CTRL_HP                             13:13
-#define ZV0_CAPTURE_CTRL_HP_HIGH                        0
-#define ZV0_CAPTURE_CTRL_HP_LOW                         1
-#define ZV0_CAPTURE_CTRL_CP                             12:12
-#define ZV0_CAPTURE_CTRL_CP_HIGH                        0
-#define ZV0_CAPTURE_CTRL_CP_LOW                         1
-#define ZV0_CAPTURE_CTRL_UVS                            11:11
-#define ZV0_CAPTURE_CTRL_UVS_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_UVS_ENABLE                     1
-#define ZV0_CAPTURE_CTRL_BS                             10:10
-#define ZV0_CAPTURE_CTRL_BS_DISABLE                     0
-#define ZV0_CAPTURE_CTRL_BS_ENABLE                      1
-#define ZV0_CAPTURE_CTRL_CS                             9:9
-#define ZV0_CAPTURE_CTRL_CS_16                          0
-#define ZV0_CAPTURE_CTRL_CS_8                           1
-#define ZV0_CAPTURE_CTRL_CF                             8:8
-#define ZV0_CAPTURE_CTRL_CF_YUV                         0
-#define ZV0_CAPTURE_CTRL_CF_RGB                         1
-#define ZV0_CAPTURE_CTRL_FS                             7:7
-#define ZV0_CAPTURE_CTRL_FS_DISABLE                     0
-#define ZV0_CAPTURE_CTRL_FS_ENABLE                      1
-#define ZV0_CAPTURE_CTRL_WEAVE                          6:6
-#define ZV0_CAPTURE_CTRL_WEAVE_DISABLE                  0
-#define ZV0_CAPTURE_CTRL_WEAVE_ENABLE                   1
-#define ZV0_CAPTURE_CTRL_BOB                            5:5
-#define ZV0_CAPTURE_CTRL_BOB_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_BOB_ENABLE                     1
-#define ZV0_CAPTURE_CTRL_DB                             4:4
-#define ZV0_CAPTURE_CTRL_DB_DISABLE                     0
-#define ZV0_CAPTURE_CTRL_DB_ENABLE                      1
-#define ZV0_CAPTURE_CTRL_CC                             3:3
-#define ZV0_CAPTURE_CTRL_CC_CONTINUE                    0
-#define ZV0_CAPTURE_CTRL_CC_CONDITION                   1
-#define ZV0_CAPTURE_CTRL_RGB                            2:2
-#define ZV0_CAPTURE_CTRL_RGB_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_RGB_ENABLE                     1
-#define ZV0_CAPTURE_CTRL_656                            1:1
-#define ZV0_CAPTURE_CTRL_656_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_656_ENABLE                     1
-#define ZV0_CAPTURE_CTRL_CAP                            0:0
-#define ZV0_CAPTURE_CTRL_CAP_DISABLE                    0
-#define ZV0_CAPTURE_CTRL_CAP_ENABLE                     1
+#define ZV0_CAPTURE_CTRL_FIELD_INPUT                    BIT(27)
+#define ZV0_CAPTURE_CTRL_SCAN                           BIT(26)
+#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER                 BIT(25)
+#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC                  BIT(24)
+#define ZV0_CAPTURE_CTRL_ADJ                            BIT(19)
+#define ZV0_CAPTURE_CTRL_HA                             BIT(18)
+#define ZV0_CAPTURE_CTRL_VSK                            BIT(17)
+#define ZV0_CAPTURE_CTRL_HSK                            BIT(16)
+#define ZV0_CAPTURE_CTRL_FD                             BIT(15)
+#define ZV0_CAPTURE_CTRL_VP                             BIT(14)
+#define ZV0_CAPTURE_CTRL_HP                             BIT(13)
+#define ZV0_CAPTURE_CTRL_CP                             BIT(12)
+#define ZV0_CAPTURE_CTRL_UVS                            BIT(11)
+#define ZV0_CAPTURE_CTRL_BS                             BIT(10)
+#define ZV0_CAPTURE_CTRL_CS                             BIT(9)
+#define ZV0_CAPTURE_CTRL_CF                             BIT(8)
+#define ZV0_CAPTURE_CTRL_FS                             BIT(7)
+#define ZV0_CAPTURE_CTRL_WEAVE                          BIT(6)
+#define ZV0_CAPTURE_CTRL_BOB                            BIT(5)
+#define ZV0_CAPTURE_CTRL_DB                             BIT(4)
+#define ZV0_CAPTURE_CTRL_CC                             BIT(3)
+#define ZV0_CAPTURE_CTRL_RGB                            BIT(2)
+#define ZV0_CAPTURE_CTRL_656                            BIT(1)
+#define ZV0_CAPTURE_CTRL_CAP                            BIT(0)
 
 #define ZV0_CAPTURE_CLIP                                0x090004
-#define ZV0_CAPTURE_CLIP_YCLIP_EVEN_FIELD                25:16
-#define ZV0_CAPTURE_CLIP_YCLIP                          25:16
-#define ZV0_CAPTURE_CLIP_XCLIP                          9:0
+#define ZV0_CAPTURE_CLIP_EYCLIP_MASK                    (0x3ff << 16)
+#define ZV0_CAPTURE_CLIP_XCLIP_MASK                     0x3ff
 
 #define ZV0_CAPTURE_SIZE                                0x090008
-#define ZV0_CAPTURE_SIZE_HEIGHT                         26:16
-#define ZV0_CAPTURE_SIZE_WIDTH                          10:0
+#define ZV0_CAPTURE_SIZE_HEIGHT_MASK                    (0x7ff << 16)
+#define ZV0_CAPTURE_SIZE_WIDTH_MASK                     0x7ff
 
 #define ZV0_CAPTURE_BUF0_ADDRESS                        0x09000C
-#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS                 31:31
-#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS_CURRENT         0
-#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS_PENDING         1
-#define ZV0_CAPTURE_BUF0_ADDRESS_EXT                    27:27
-#define ZV0_CAPTURE_BUF0_ADDRESS_EXT_LOCAL              0
-#define ZV0_CAPTURE_BUF0_ADDRESS_EXT_EXTERNAL           1
-#define ZV0_CAPTURE_BUF0_ADDRESS_CS                     26:26
-#define ZV0_CAPTURE_BUF0_ADDRESS_CS_0                   0
-#define ZV0_CAPTURE_BUF0_ADDRESS_CS_1                   1
-#define ZV0_CAPTURE_BUF0_ADDRESS_ADDRESS                25:0
+#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS                 BIT(31)
+#define ZV0_CAPTURE_BUF0_ADDRESS_EXT                    BIT(27)
+#define ZV0_CAPTURE_BUF0_ADDRESS_CS                     BIT(26)
+#define ZV0_CAPTURE_BUF0_ADDRESS_ADDRESS_MASK           0x3ffffff
 
 #define ZV0_CAPTURE_BUF1_ADDRESS                        0x090010
-#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS                 31:31
-#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS_CURRENT         0
-#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS_PENDING         1
-#define ZV0_CAPTURE_BUF1_ADDRESS_EXT                    27:27
-#define ZV0_CAPTURE_BUF1_ADDRESS_EXT_LOCAL              0
-#define ZV0_CAPTURE_BUF1_ADDRESS_EXT_EXTERNAL           1
-#define ZV0_CAPTURE_BUF1_ADDRESS_CS                     26:26
-#define ZV0_CAPTURE_BUF1_ADDRESS_CS_0                   0
-#define ZV0_CAPTURE_BUF1_ADDRESS_CS_1                   1
-#define ZV0_CAPTURE_BUF1_ADDRESS_ADDRESS                25:0
+#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS                 BIT(31)
+#define ZV0_CAPTURE_BUF1_ADDRESS_EXT                    BIT(27)
+#define ZV0_CAPTURE_BUF1_ADDRESS_CS                     BIT(26)
+#define ZV0_CAPTURE_BUF1_ADDRESS_ADDRESS_MASK           0x3ffffff
 
 #define ZV0_CAPTURE_BUF_OFFSET                          0x090014
 #ifndef VALIDATION_CHIP
-    #define ZV0_CAPTURE_BUF_OFFSET_YCLIP_ODD_FIELD      25:16
+    #define ZV0_CAPTURE_BUF_OFFSET_YCLIP_ODD_FIELD      (0x3ff << 16)
 #endif
-#define ZV0_CAPTURE_BUF_OFFSET_OFFSET                   15:0
+#define ZV0_CAPTURE_BUF_OFFSET_OFFSET_MASK              0xffff
 
 #define ZV0_CAPTURE_FIFO_CTRL                           0x090018
-#define ZV0_CAPTURE_FIFO_CTRL_FIFO                      2:0
+#define ZV0_CAPTURE_FIFO_CTRL_FIFO_MASK                 0x7
 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_0                    0
 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_1                    1
 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_2                    2
@@ -2170,130 +1344,68 @@
 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_7                    7
 
 #define ZV0_CAPTURE_YRGB_CONST                          0x09001C
-#define ZV0_CAPTURE_YRGB_CONST_Y                        31:24
-#define ZV0_CAPTURE_YRGB_CONST_R                        23:16
-#define ZV0_CAPTURE_YRGB_CONST_G                        15:8
-#define ZV0_CAPTURE_YRGB_CONST_B                        7:0
+#define ZV0_CAPTURE_YRGB_CONST_Y_MASK                   (0xff << 24)
+#define ZV0_CAPTURE_YRGB_CONST_R_MASK                   (0xff << 16)
+#define ZV0_CAPTURE_YRGB_CONST_G_MASK                   (0xff << 8)
+#define ZV0_CAPTURE_YRGB_CONST_B_MASK                   0xff
 
 #define ZV0_CAPTURE_LINE_COMP                           0x090020
-#define ZV0_CAPTURE_LINE_COMP_LC                        10:0
+#define ZV0_CAPTURE_LINE_COMP_LC_MASK                   0x7ff
 
 /* ZV1 */
 
 #define ZV1_CAPTURE_CTRL                                0x098000
-#define ZV1_CAPTURE_CTRL_FIELD_INPUT                    27:27
-#define ZV1_CAPTURE_CTRL_FIELD_INPUT_EVEN_FIELD         0
-#define ZV1_CAPTURE_CTRL_FIELD_INPUT_ODD_FIELD          0
-#define ZV1_CAPTURE_CTRL_SCAN                           26:26
-#define ZV1_CAPTURE_CTRL_SCAN_PROGRESSIVE               0
-#define ZV1_CAPTURE_CTRL_SCAN_INTERLACE                 1
-#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER                 25:25
-#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER_0               0
-#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER_1               1
-#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC                  24:24
-#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC_INACTIVE         0
-#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC_ACTIVE           1
-#define ZV1_CAPTURE_CTRL_PANEL                          20:20
-#define ZV1_CAPTURE_CTRL_PANEL_DISABLE                  0
-#define ZV1_CAPTURE_CTRL_PANEL_ENABLE                   1
-#define ZV1_CAPTURE_CTRL_ADJ                            19:19
-#define ZV1_CAPTURE_CTRL_ADJ_NORMAL                     0
-#define ZV1_CAPTURE_CTRL_ADJ_DELAY                      1
-#define ZV1_CAPTURE_CTRL_HA                             18:18
-#define ZV1_CAPTURE_CTRL_HA_DISABLE                     0
-#define ZV1_CAPTURE_CTRL_HA_ENABLE                      1
-#define ZV1_CAPTURE_CTRL_VSK                            17:17
-#define ZV1_CAPTURE_CTRL_VSK_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_VSK_ENABLE                     1
-#define ZV1_CAPTURE_CTRL_HSK                            16:16
-#define ZV1_CAPTURE_CTRL_HSK_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_HSK_ENABLE                     1
-#define ZV1_CAPTURE_CTRL_FD                             15:15
-#define ZV1_CAPTURE_CTRL_FD_RISING                      0
-#define ZV1_CAPTURE_CTRL_FD_FALLING                     1
-#define ZV1_CAPTURE_CTRL_VP                             14:14
-#define ZV1_CAPTURE_CTRL_VP_HIGH                        0
-#define ZV1_CAPTURE_CTRL_VP_LOW                         1
-#define ZV1_CAPTURE_CTRL_HP                             13:13
-#define ZV1_CAPTURE_CTRL_HP_HIGH                        0
-#define ZV1_CAPTURE_CTRL_HP_LOW                         1
-#define ZV1_CAPTURE_CTRL_CP                             12:12
-#define ZV1_CAPTURE_CTRL_CP_HIGH                        0
-#define ZV1_CAPTURE_CTRL_CP_LOW                         1
-#define ZV1_CAPTURE_CTRL_UVS                            11:11
-#define ZV1_CAPTURE_CTRL_UVS_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_UVS_ENABLE                     1
-#define ZV1_CAPTURE_CTRL_BS                             10:10
-#define ZV1_CAPTURE_CTRL_BS_DISABLE                     0
-#define ZV1_CAPTURE_CTRL_BS_ENABLE                      1
-#define ZV1_CAPTURE_CTRL_CS                             9:9
-#define ZV1_CAPTURE_CTRL_CS_16                          0
-#define ZV1_CAPTURE_CTRL_CS_8                           1
-#define ZV1_CAPTURE_CTRL_CF                             8:8
-#define ZV1_CAPTURE_CTRL_CF_YUV                         0
-#define ZV1_CAPTURE_CTRL_CF_RGB                         1
-#define ZV1_CAPTURE_CTRL_FS                             7:7
-#define ZV1_CAPTURE_CTRL_FS_DISABLE                     0
-#define ZV1_CAPTURE_CTRL_FS_ENABLE                      1
-#define ZV1_CAPTURE_CTRL_WEAVE                          6:6
-#define ZV1_CAPTURE_CTRL_WEAVE_DISABLE                  0
-#define ZV1_CAPTURE_CTRL_WEAVE_ENABLE                   1
-#define ZV1_CAPTURE_CTRL_BOB                            5:5
-#define ZV1_CAPTURE_CTRL_BOB_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_BOB_ENABLE                     1
-#define ZV1_CAPTURE_CTRL_DB                             4:4
-#define ZV1_CAPTURE_CTRL_DB_DISABLE                     0
-#define ZV1_CAPTURE_CTRL_DB_ENABLE                      1
-#define ZV1_CAPTURE_CTRL_CC                             3:3
-#define ZV1_CAPTURE_CTRL_CC_CONTINUE                    0
-#define ZV1_CAPTURE_CTRL_CC_CONDITION                   1
-#define ZV1_CAPTURE_CTRL_RGB                            2:2
-#define ZV1_CAPTURE_CTRL_RGB_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_RGB_ENABLE                     1
-#define ZV1_CAPTURE_CTRL_656                            1:1
-#define ZV1_CAPTURE_CTRL_656_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_656_ENABLE                     1
-#define ZV1_CAPTURE_CTRL_CAP                            0:0
-#define ZV1_CAPTURE_CTRL_CAP_DISABLE                    0
-#define ZV1_CAPTURE_CTRL_CAP_ENABLE                     1
+#define ZV1_CAPTURE_CTRL_FIELD_INPUT                    BIT(27)
+#define ZV1_CAPTURE_CTRL_SCAN                           BIT(26)
+#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER                 BIT(25)
+#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC                  BIT(24)
+#define ZV1_CAPTURE_CTRL_PANEL                          BIT(20)
+#define ZV1_CAPTURE_CTRL_ADJ                            BIT(19)
+#define ZV1_CAPTURE_CTRL_HA                             BIT(18)
+#define ZV1_CAPTURE_CTRL_VSK                            BIT(17)
+#define ZV1_CAPTURE_CTRL_HSK                            BIT(16)
+#define ZV1_CAPTURE_CTRL_FD                             BIT(15)
+#define ZV1_CAPTURE_CTRL_VP                             BIT(14)
+#define ZV1_CAPTURE_CTRL_HP                             BIT(13)
+#define ZV1_CAPTURE_CTRL_CP                             BIT(12)
+#define ZV1_CAPTURE_CTRL_UVS                            BIT(11)
+#define ZV1_CAPTURE_CTRL_BS                             BIT(10)
+#define ZV1_CAPTURE_CTRL_CS                             BIT(9)
+#define ZV1_CAPTURE_CTRL_CF                             BIT(8)
+#define ZV1_CAPTURE_CTRL_FS                             BIT(7)
+#define ZV1_CAPTURE_CTRL_WEAVE                          BIT(6)
+#define ZV1_CAPTURE_CTRL_BOB                            BIT(5)
+#define ZV1_CAPTURE_CTRL_DB                             BIT(4)
+#define ZV1_CAPTURE_CTRL_CC                             BIT(3)
+#define ZV1_CAPTURE_CTRL_RGB                            BIT(2)
+#define ZV1_CAPTURE_CTRL_656                            BIT(1)
+#define ZV1_CAPTURE_CTRL_CAP                            BIT(0)
 
 #define ZV1_CAPTURE_CLIP                                0x098004
-#define ZV1_CAPTURE_CLIP_YCLIP                          25:16
-#define ZV1_CAPTURE_CLIP_XCLIP                          9:0
+#define ZV1_CAPTURE_CLIP_YCLIP_MASK                     (0x3ff << 16)
+#define ZV1_CAPTURE_CLIP_XCLIP_MASK                     0x3ff
 
 #define ZV1_CAPTURE_SIZE                                0x098008
-#define ZV1_CAPTURE_SIZE_HEIGHT                         26:16
-#define ZV1_CAPTURE_SIZE_WIDTH                          10:0
+#define ZV1_CAPTURE_SIZE_HEIGHT_MASK                    (0x7ff << 16)
+#define ZV1_CAPTURE_SIZE_WIDTH_MASK                     0x7ff
 
 #define ZV1_CAPTURE_BUF0_ADDRESS                        0x09800C
-#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS                 31:31
-#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS_CURRENT         0
-#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS_PENDING         1
-#define ZV1_CAPTURE_BUF0_ADDRESS_EXT                    27:27
-#define ZV1_CAPTURE_BUF0_ADDRESS_EXT_LOCAL              0
-#define ZV1_CAPTURE_BUF0_ADDRESS_EXT_EXTERNAL           1
-#define ZV1_CAPTURE_BUF0_ADDRESS_CS                     26:26
-#define ZV1_CAPTURE_BUF0_ADDRESS_CS_0                   0
-#define ZV1_CAPTURE_BUF0_ADDRESS_CS_1                   1
-#define ZV1_CAPTURE_BUF0_ADDRESS_ADDRESS                25:0
+#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS                 BIT(31)
+#define ZV1_CAPTURE_BUF0_ADDRESS_EXT                    BIT(27)
+#define ZV1_CAPTURE_BUF0_ADDRESS_CS                     BIT(26)
+#define ZV1_CAPTURE_BUF0_ADDRESS_ADDRESS_MASK           0x3ffffff
 
 #define ZV1_CAPTURE_BUF1_ADDRESS                        0x098010
-#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS                 31:31
-#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS_CURRENT         0
-#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS_PENDING         1
-#define ZV1_CAPTURE_BUF1_ADDRESS_EXT                    27:27
-#define ZV1_CAPTURE_BUF1_ADDRESS_EXT_LOCAL              0
-#define ZV1_CAPTURE_BUF1_ADDRESS_EXT_EXTERNAL           1
-#define ZV1_CAPTURE_BUF1_ADDRESS_CS                     26:26
-#define ZV1_CAPTURE_BUF1_ADDRESS_CS_0                   0
-#define ZV1_CAPTURE_BUF1_ADDRESS_CS_1                   1
-#define ZV1_CAPTURE_BUF1_ADDRESS_ADDRESS                25:0
+#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS                 BIT(31)
+#define ZV1_CAPTURE_BUF1_ADDRESS_EXT                    BIT(27)
+#define ZV1_CAPTURE_BUF1_ADDRESS_CS                     BIT(26)
+#define ZV1_CAPTURE_BUF1_ADDRESS_ADDRESS_MASK           0x3ffffff
 
 #define ZV1_CAPTURE_BUF_OFFSET                          0x098014
-#define ZV1_CAPTURE_BUF_OFFSET_OFFSET                   15:0
+#define ZV1_CAPTURE_BUF_OFFSET_OFFSET_MASK              0xffff
 
 #define ZV1_CAPTURE_FIFO_CTRL                           0x098018
-#define ZV1_CAPTURE_FIFO_CTRL_FIFO                      2:0
+#define ZV1_CAPTURE_FIFO_CTRL_FIFO_MASK                 0x7
 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_0                    0
 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_1                    1
 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_2                    2
@@ -2304,52 +1416,30 @@
 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_7                    7
 
 #define ZV1_CAPTURE_YRGB_CONST                          0x09801C
-#define ZV1_CAPTURE_YRGB_CONST_Y                        31:24
-#define ZV1_CAPTURE_YRGB_CONST_R                        23:16
-#define ZV1_CAPTURE_YRGB_CONST_G                        15:8
-#define ZV1_CAPTURE_YRGB_CONST_B                        7:0
+#define ZV1_CAPTURE_YRGB_CONST_Y_MASK                   (0xff << 24)
+#define ZV1_CAPTURE_YRGB_CONST_R_MASK                   (0xff << 16)
+#define ZV1_CAPTURE_YRGB_CONST_G_MASK                   (0xff << 8)
+#define ZV1_CAPTURE_YRGB_CONST_B_MASK                   0xff
 
 #define DMA_1_SOURCE                                    0x0D0010
-#define DMA_1_SOURCE_ADDRESS_EXT                        27:27
-#define DMA_1_SOURCE_ADDRESS_EXT_LOCAL                  0
-#define DMA_1_SOURCE_ADDRESS_EXT_EXTERNAL               1
-#define DMA_1_SOURCE_ADDRESS_CS                         26:26
-#define DMA_1_SOURCE_ADDRESS_CS_0                       0
-#define DMA_1_SOURCE_ADDRESS_CS_1                       1
-#define DMA_1_SOURCE_ADDRESS                            25:0
+#define DMA_1_SOURCE_ADDRESS_EXT                        BIT(27)
+#define DMA_1_SOURCE_ADDRESS_CS                         BIT(26)
+#define DMA_1_SOURCE_ADDRESS_MASK                       0x3ffffff
 
 #define DMA_1_DESTINATION                               0x0D0014
-#define DMA_1_DESTINATION_ADDRESS_EXT                   27:27
-#define DMA_1_DESTINATION_ADDRESS_EXT_LOCAL             0
-#define DMA_1_DESTINATION_ADDRESS_EXT_EXTERNAL          1
-#define DMA_1_DESTINATION_ADDRESS_CS                    26:26
-#define DMA_1_DESTINATION_ADDRESS_CS_0                  0
-#define DMA_1_DESTINATION_ADDRESS_CS_1                  1
-#define DMA_1_DESTINATION_ADDRESS                       25:0
+#define DMA_1_DESTINATION_ADDRESS_EXT                   BIT(27)
+#define DMA_1_DESTINATION_ADDRESS_CS                    BIT(26)
+#define DMA_1_DESTINATION_ADDRESS_MASK                  0x3ffffff
 
 #define DMA_1_SIZE_CONTROL                              0x0D0018
-#define DMA_1_SIZE_CONTROL_STATUS                       31:31
-#define DMA_1_SIZE_CONTROL_STATUS_IDLE                  0
-#define DMA_1_SIZE_CONTROL_STATUS_ACTIVE                1
-#define DMA_1_SIZE_CONTROL_SIZE                         23:0
+#define DMA_1_SIZE_CONTROL_STATUS                       BIT(31)
+#define DMA_1_SIZE_CONTROL_SIZE_MASK                    0xffffff
 
 #define DMA_ABORT_INTERRUPT                             0x0D0020
-#define DMA_ABORT_INTERRUPT_ABORT_1                     5:5
-#define DMA_ABORT_INTERRUPT_ABORT_1_ENABLE              0
-#define DMA_ABORT_INTERRUPT_ABORT_1_ABORT               1
-#define DMA_ABORT_INTERRUPT_ABORT_0                     4:4
-#define DMA_ABORT_INTERRUPT_ABORT_0_ENABLE              0
-#define DMA_ABORT_INTERRUPT_ABORT_0_ABORT               1
-#define DMA_ABORT_INTERRUPT_INT_1                       1:1
-#define DMA_ABORT_INTERRUPT_INT_1_CLEAR                 0
-#define DMA_ABORT_INTERRUPT_INT_1_FINISHED              1
-#define DMA_ABORT_INTERRUPT_INT_0                       0:0
-#define DMA_ABORT_INTERRUPT_INT_0_CLEAR                 0
-#define DMA_ABORT_INTERRUPT_INT_0_FINISHED              1
-
-
-
-
+#define DMA_ABORT_INTERRUPT_ABORT_1                     BIT(5)
+#define DMA_ABORT_INTERRUPT_ABORT_0                     BIT(4)
+#define DMA_ABORT_INTERRUPT_INT_1                       BIT(1)
+#define DMA_ABORT_INTERRUPT_INT_0                       BIT(0)
 
 /* Default i2c CLK and Data GPIO. These are the default i2c pins */
 #define DEFAULT_I2C_SCL                     30
@@ -2357,16 +1447,12 @@
 
 
 #define GPIO_DATA_SM750LE                               0x020018
-#define GPIO_DATA_SM750LE_1                             1:1
-#define GPIO_DATA_SM750LE_0                             0:0
+#define GPIO_DATA_SM750LE_1                             BIT(1)
+#define GPIO_DATA_SM750LE_0                             BIT(0)
 
 #define GPIO_DATA_DIRECTION_SM750LE                     0x02001C
-#define GPIO_DATA_DIRECTION_SM750LE_1                   1:1
-#define GPIO_DATA_DIRECTION_SM750LE_1_INPUT             0
-#define GPIO_DATA_DIRECTION_SM750LE_1_OUTPUT            1
-#define GPIO_DATA_DIRECTION_SM750LE_0                   0:0
-#define GPIO_DATA_DIRECTION_SM750LE_0_INPUT             0
-#define GPIO_DATA_DIRECTION_SM750LE_0_OUTPUT            1
+#define GPIO_DATA_DIRECTION_SM750LE_1                   BIT(1)
+#define GPIO_DATA_DIRECTION_SM750LE_0                   BIT(0)
 
 
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 241b77b..9fd6afb 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -14,8 +14,8 @@
     #define i2cWriteReg sm750_hw_i2c_write_reg
     #define i2cReadReg  sm750_hw_i2c_read_reg
 #else
-    #define i2cWriteReg swI2CWriteReg
-    #define i2cReadReg  swI2CReadReg
+    #define i2cWriteReg sm750_sw_i2c_write_reg
+    #define i2cReadReg  sm750_sw_i2c_read_reg
 #endif
 
 /* SII164 Vendor and Device ID */
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index f2610c9..664ad08 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -39,7 +39,10 @@
 unsigned char sii164CheckInterrupt(void);
 void sii164ClearInterrupt(void);
 #endif
-/* below register definination is used for Silicon Image SiI164 DVI controller chip */
+/*
+ * below register definition is used for
+ * Silicon Image SiI164 DVI controller chip
+ */
 /*
  * Vendor ID registers
  */
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index c78421b..59c7455 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -13,8 +13,6 @@
 #include <linux/vmalloc.h>
 #include <linux/pagemap.h>
 #include <linux/screen_info.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
 #include <linux/console.h>
 #include <asm/fb.h>
 #include "sm750.h"
@@ -189,7 +187,7 @@
 	 * If not use spin_lock,system will die if user load driver
 	 * and immediately unload driver frequently (dual)
 	 */
-	if (sm750_dev->dual)
+	if (sm750_dev->fb_count > 1)
 		spin_lock(&sm750_dev->slock);
 
 	sm750_dev->accel.de_fillrect(&sm750_dev->accel,
@@ -197,7 +195,7 @@
 				     region->dx, region->dy,
 				     region->width, region->height,
 				     color, rop);
-	if (sm750_dev->dual)
+	if (sm750_dev->fb_count > 1)
 		spin_unlock(&sm750_dev->slock);
 }
 
@@ -223,7 +221,7 @@
 	 * If not use spin_lock, system will die if user load driver
 	 * and immediately unload driver frequently (dual)
 	 */
-	if (sm750_dev->dual)
+	if (sm750_dev->fb_count > 1)
 		spin_lock(&sm750_dev->slock);
 
 	sm750_dev->accel.de_copyarea(&sm750_dev->accel,
@@ -231,7 +229,7 @@
 				     base, pitch, Bpp, region->dx, region->dy,
 				     region->width, region->height,
 				     HW_ROP2_COPY);
-	if (sm750_dev->dual)
+	if (sm750_dev->fb_count > 1)
 		spin_unlock(&sm750_dev->slock);
 }
 
@@ -272,7 +270,7 @@
 	 * If not use spin_lock, system will die if user load driver
 	 * and immediately unload driver frequently (dual)
 	 */
-	if (sm750_dev->dual)
+	if (sm750_dev->fb_count > 1)
 		spin_lock(&sm750_dev->slock);
 
 	sm750_dev->accel.de_imageblit(&sm750_dev->accel,
@@ -281,7 +279,7 @@
 				      image->dx, image->dy,
 				      image->width, image->height,
 				      fgcol, bgcol, HW_ROP2_COPY);
-	if (sm750_dev->dual)
+	if (sm750_dev->fb_count > 1)
 		spin_unlock(&sm750_dev->slock);
 }
 
@@ -319,7 +317,7 @@
 	var = &info->var;
 	fix = &info->fix;
 
-	/* fix structur is not so FIX ... */
+	/* fix structure is not so FIX ... */
 	line_length = var->xres_virtual * var->bits_per_pixel / 8;
 	line_length = ALIGN(line_length, crtc->line_pad);
 	fix->line_length = line_length;
@@ -650,8 +648,10 @@
 	output = &par->output;
 	crtc = &par->crtc;
 
-	crtc->vidmem_size = (sm750_dev->dual) ? sm750_dev->vidmem_size >> 1 :
-			     sm750_dev->vidmem_size;
+	crtc->vidmem_size = sm750_dev->vidmem_size;
+	if (sm750_dev->fb_count > 1)
+		crtc->vidmem_size >>= 1;
+
 	/* setup crtc and output member */
 	sm750_dev->hwCursor = g_hwcursor;
 
@@ -981,7 +981,7 @@
 
 NO_PARAM:
 	if (sm750_dev->revid != SM750LE_REVISION_ID) {
-		if (sm750_dev->dual) {
+		if (sm750_dev->fb_count > 1) {
 			if (swap)
 				sm750_dev->dataflow = sm750_dual_swap;
 			else
@@ -1000,35 +1000,75 @@
 	}
 }
 
+static void sm750fb_frambuffer_release(struct sm750_dev *sm750_dev)
+{
+	struct fb_info *fb_info;
+
+	while (sm750_dev->fb_count) {
+		fb_info = sm750_dev->fbinfo[sm750_dev->fb_count - 1];
+		unregister_framebuffer(fb_info);
+		framebuffer_release(fb_info);
+		sm750_dev->fb_count--;
+	}
+}
+
+static int sm750fb_frambuffer_alloc(struct sm750_dev *sm750_dev, int fbidx)
+{
+	struct fb_info *fb_info;
+	struct lynxfb_par *par;
+	int err;
+
+	fb_info = framebuffer_alloc(sizeof(struct lynxfb_par),
+				    &sm750_dev->pdev->dev);
+	if (!fb_info)
+		return -ENOMEM;
+
+	sm750_dev->fbinfo[fbidx] = fb_info;
+	par = fb_info->par;
+	par->dev = sm750_dev;
+
+	err = lynxfb_set_fbinfo(fb_info, fbidx);
+	if (err)
+		goto release_fb;
+
+	err = register_framebuffer(fb_info);
+	if (err < 0)
+		goto release_fb;
+
+	sm750_dev->fb_count++;
+
+	return 0;
+
+release_fb:
+	framebuffer_release(fb_info);
+	return err;
+}
+
 static int lynxfb_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
-	struct fb_info *info[] = {NULL, NULL};
 	struct sm750_dev *sm750_dev = NULL;
+	int max_fb;
 	int fbidx;
+	int err;
 
 	/* enable device */
-	if (pci_enable_device(pdev)) {
-		pr_err("can not enable device.\n");
-		goto err_enable;
-	}
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
 
+	err = -ENOMEM;
 	sm750_dev = kzalloc(sizeof(*sm750_dev), GFP_KERNEL);
-	if (!sm750_dev) {
-		pr_err("Could not allocate memory for share.\n");
-		goto err_share;
-	}
+	if (!sm750_dev)
+		goto disable_pci;
 
 	sm750_dev->fbinfo[0] = sm750_dev->fbinfo[1] = NULL;
 	sm750_dev->devid = pdev->device;
 	sm750_dev->revid = pdev->revision;
-
-	pr_info("share->revid = %02x\n", sm750_dev->revid);
 	sm750_dev->pdev = pdev;
 	sm750_dev->mtrr_off = g_nomtrr;
 	sm750_dev->mtrr.vram = 0;
 	sm750_dev->accel_off = g_noaccel;
-	sm750_dev->dual = g_dualview;
 	spin_lock_init(&sm750_dev->slock);
 
 	if (!sm750_dev->accel_off) {
@@ -1042,19 +1082,15 @@
 		sm750_dev->accel.de_fillrect = hw_fillrect;
 		sm750_dev->accel.de_copyarea = hw_copyarea;
 		sm750_dev->accel.de_imageblit = hw_imageblit;
-		pr_info("enable 2d acceleration\n");
-	} else {
-		pr_info("disable 2d acceleration\n");
 	}
 
 	/* call chip specific setup routine  */
 	sm750fb_setup(sm750_dev, g_settings);
 
 	/* call chip specific mmap routine */
-	if (hw_sm750_map(sm750_dev, pdev)) {
-		pr_err("Memory map failed\n");
-		goto err_map;
-	}
+	err = hw_sm750_map(sm750_dev, pdev);
+	if (err)
+		goto free_sm750_dev;
 
 	if (!sm750_dev->mtrr_off)
 		sm750_dev->mtrr.vram = arch_phys_wc_add(sm750_dev->vidmem_start,
@@ -1062,100 +1098,37 @@
 
 	memset_io(sm750_dev->pvMem, 0, sm750_dev->vidmem_size);
 
-	pr_info("sm%3x mmio address = %p\n", sm750_dev->devid,
-		sm750_dev->pvReg);
-
 	pci_set_drvdata(pdev, sm750_dev);
 
 	/* call chipInit routine */
 	hw_sm750_inithw(sm750_dev, pdev);
 
-	/* allocate frame buffer info structor according to g_dualview */
-	fbidx = 0;
-ALLOC_FB:
-	info[fbidx] = framebuffer_alloc(sizeof(struct lynxfb_par), &pdev->dev);
-	if (!info[fbidx]) {
-		pr_err("Could not allocate framebuffer #%d.\n", fbidx);
-		if (fbidx == 0)
-			goto err_info0_alloc;
-		else
-			goto err_info1_alloc;
-	} else {
-		struct lynxfb_par *par;
-		int errno;
-
-		pr_info("framebuffer #%d alloc okay\n", fbidx);
-		sm750_dev->fbinfo[fbidx] = info[fbidx];
-		par = info[fbidx]->par;
-		par->dev = sm750_dev;
-
-		/* set fb_info structure */
-		if (lynxfb_set_fbinfo(info[fbidx], fbidx)) {
-			pr_err("Failed to initial fb_info #%d.\n", fbidx);
-			if (fbidx == 0)
-				goto err_info0_set;
-			else
-				goto err_info1_set;
-		}
-
-		/* register frame buffer */
-		pr_info("Ready to register framebuffer #%d.\n", fbidx);
-		errno = register_framebuffer(info[fbidx]);
-		if (errno < 0) {
-			pr_err("Failed to register fb_info #%d. err %d\n",
-			       fbidx,
-			       errno);
-			if (fbidx == 0)
-				goto err_register0;
-			else
-				goto err_register1;
-		}
-		pr_info("Accomplished register framebuffer #%d.\n", fbidx);
+	/* allocate frame buffer info structures according to g_dualview */
+	max_fb = g_dualview ? 2 : 1;
+	for (fbidx = 0; fbidx < max_fb; fbidx++) {
+		err = sm750fb_frambuffer_alloc(sm750_dev, fbidx);
+		if (err)
+			goto release_fb;
 	}
 
-	/* no dual view by far */
-	fbidx++;
-	if (sm750_dev->dual && fbidx < 2)
-		goto ALLOC_FB;
-
 	return 0;
 
-err_register1:
-err_info1_set:
-	framebuffer_release(info[1]);
-err_info1_alloc:
-	unregister_framebuffer(info[0]);
-err_register0:
-err_info0_set:
-	framebuffer_release(info[0]);
-err_info0_alloc:
-err_map:
+release_fb:
+	sm750fb_frambuffer_release(sm750_dev);
+free_sm750_dev:
 	kfree(sm750_dev);
-err_share:
-err_enable:
-	return -ENODEV;
+disable_pci:
+	pci_disable_device(pdev);
+	return err;
 }
 
 static void lynxfb_pci_remove(struct pci_dev *pdev)
 {
-	struct fb_info *info;
 	struct sm750_dev *sm750_dev;
-	struct lynxfb_par *par;
-	int cnt;
 
-	cnt = 2;
 	sm750_dev = pci_get_drvdata(pdev);
 
-	while (cnt-- > 0) {
-		info = sm750_dev->fbinfo[cnt];
-		if (!info)
-			continue;
-		par = info->par;
-
-		unregister_framebuffer(info);
-		/* release frame buffer */
-		framebuffer_release(info);
-	}
+	sm750fb_frambuffer_release(sm750_dev);
 	arch_phys_wc_del(sm750_dev->mtrr.vram);
 
 	iounmap(sm750_dev->pvReg);
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index b0a93cd..8e70ce0 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -53,7 +53,7 @@
 	/* base virtual address of de data port */
 	volatile unsigned char __iomem *dpPortBase;
 
-	/* function fointers */
+	/* function pointers */
 	void (*de_init)(struct lynx_accel *);
 
 	int (*de_wait)(void);/* see if hardware ready to work */
@@ -79,7 +79,7 @@
 	struct fb_info *fbinfo[2];
 	struct lynx_accel accel;
 	int accel_off;
-	int dual;
+	int fb_count;
 	int mtrr_off;
 	struct{
 		int vram;
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 43e5972..9aa4066 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -17,7 +17,6 @@
 
 #include "sm750.h"
 #include "sm750_accel.h"
-#include "sm750_help.h"
 static inline void write_dpr(struct lynx_accel *accel, int offset, u32 regValue)
 {
 	writel(regValue, accel->dprBase + offset);
@@ -41,20 +40,16 @@
 	write_dpr(accel, DE_MASKS, 0xFFFFFFFF);
 
 	/* dpr1c */
-	reg = FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY, NORMAL)|
-		FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_Y, 0)|
-		FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X, 0)|
-		FIELD_SET(0, DE_STRETCH_FORMAT, ADDRESSING, XY)|
-		FIELD_VALUE(0, DE_STRETCH_FORMAT, SOURCE_HEIGHT, 3);
+	reg =  0x3;
 
-	clr = FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_XY)&
-		FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_Y)&
-		FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_X)&
-		FIELD_CLEAR(DE_STRETCH_FORMAT, ADDRESSING)&
-		FIELD_CLEAR(DE_STRETCH_FORMAT, SOURCE_HEIGHT);
+	clr = DE_STRETCH_FORMAT_PATTERN_XY | DE_STRETCH_FORMAT_PATTERN_Y_MASK |
+		DE_STRETCH_FORMAT_PATTERN_X_MASK |
+		DE_STRETCH_FORMAT_ADDRESSING_MASK |
+		DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK;
 
-	/* DE_STRETCH bpp format need be initilized in setMode routine */
-	write_dpr(accel, DE_STRETCH_FORMAT, (read_dpr(accel, DE_STRETCH_FORMAT) & clr) | reg);
+	/* DE_STRETCH bpp format need be initialized in setMode routine */
+	write_dpr(accel, DE_STRETCH_FORMAT,
+		  (read_dpr(accel, DE_STRETCH_FORMAT) & ~clr) | reg);
 
 	/* disable clipping and transparent */
 	write_dpr(accel, DE_CLIP_TL, 0); /* dpr2c */
@@ -63,16 +58,11 @@
 	write_dpr(accel, DE_COLOR_COMPARE_MASK, 0); /* dpr24 */
 	write_dpr(accel, DE_COLOR_COMPARE, 0);
 
-	reg = FIELD_SET(0, DE_CONTROL, TRANSPARENCY, DISABLE)|
-		FIELD_SET(0, DE_CONTROL, TRANSPARENCY_MATCH, OPAQUE)|
-		FIELD_SET(0, DE_CONTROL, TRANSPARENCY_SELECT, SOURCE);
-
-	clr = FIELD_CLEAR(DE_CONTROL, TRANSPARENCY)&
-		FIELD_CLEAR(DE_CONTROL, TRANSPARENCY_MATCH)&
-		FIELD_CLEAR(DE_CONTROL, TRANSPARENCY_SELECT);
+	clr = DE_CONTROL_TRANSPARENCY | DE_CONTROL_TRANSPARENCY_MATCH |
+		DE_CONTROL_TRANSPARENCY_SELECT;
 
 	/* dpr0c */
-	write_dpr(accel, DE_CONTROL, (read_dpr(accel, DE_CONTROL)&clr)|reg);
+	write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr);
 }
 
 /* set2dformat only be called from setmode functions
@@ -85,7 +75,9 @@
 
 	/* fmt=0,1,2 for 8,16,32,bpp on sm718/750/502 */
 	reg = read_dpr(accel, DE_STRETCH_FORMAT);
-	reg = FIELD_VALUE(reg, DE_STRETCH_FORMAT, PIXEL_FORMAT, fmt);
+	reg &= ~DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK;
+	reg |= ((fmt << DE_STRETCH_FORMAT_PIXEL_FORMAT_SHIFT) &
+		DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK);
 	write_dpr(accel, DE_STRETCH_FORMAT, reg);
 }
 
@@ -105,31 +97,28 @@
 
 	write_dpr(accel, DE_WINDOW_DESTINATION_BASE, base); /* dpr40 */
 	write_dpr(accel, DE_PITCH,
-			FIELD_VALUE(0, DE_PITCH, DESTINATION, pitch/Bpp)|
-			FIELD_VALUE(0, DE_PITCH, SOURCE, pitch/Bpp)); /* dpr10 */
+		  ((pitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
+		   DE_PITCH_DESTINATION_MASK) |
+		  (pitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
 
 	write_dpr(accel, DE_WINDOW_WIDTH,
-			FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, pitch/Bpp)|
-			FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, pitch/Bpp)); /* dpr44 */
+		  ((pitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
+		   DE_WINDOW_WIDTH_DST_MASK) |
+		   (pitch / Bpp & DE_WINDOW_WIDTH_SRC_MASK)); /* dpr44 */
 
 	write_dpr(accel, DE_FOREGROUND, color); /* DPR14 */
 
 	write_dpr(accel, DE_DESTINATION,
-			FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE)|
-			FIELD_VALUE(0, DE_DESTINATION, X, x)|
-			FIELD_VALUE(0, DE_DESTINATION, Y, y)); /* dpr4 */
+		  ((x << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) |
+		  (y & DE_DESTINATION_Y_MASK)); /* dpr4 */
 
 	write_dpr(accel, DE_DIMENSION,
-			FIELD_VALUE(0, DE_DIMENSION, X, width)|
-			FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr8 */
+		  ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) |
+		  (height & DE_DIMENSION_Y_ET_MASK)); /* dpr8 */
 
-	deCtrl =
-		FIELD_SET(0, DE_CONTROL, STATUS, START)|
-		FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)|
-		FIELD_SET(0, DE_CONTROL, LAST_PIXEL, ON)|
-		FIELD_SET(0, DE_CONTROL, COMMAND, RECTANGLE_FILL)|
-		FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2)|
-		FIELD_VALUE(0, DE_CONTROL, ROP, rop); /* dpr0xc */
+	deCtrl = DE_CONTROL_STATUS | DE_CONTROL_LAST_PIXEL |
+		DE_CONTROL_COMMAND_RECTANGLE_FILL | DE_CONTROL_ROP_SELECT |
+		(rop & DE_CONTROL_ROP_MASK); /* dpr0xc */
 
 	write_dpr(accel, DE_CONTROL, deCtrl);
 	return 0;
@@ -237,18 +226,18 @@
        Note that input pitch is BYTE value, but the 2D Pitch register uses
        pixel values. Need Byte to pixel conversion.
     */
-	{
-		write_dpr(accel, DE_PITCH,
-				FIELD_VALUE(0, DE_PITCH, DESTINATION, (dPitch/Bpp)) |
-				FIELD_VALUE(0, DE_PITCH, SOURCE,      (sPitch/Bpp))); /* dpr10 */
-	}
+	write_dpr(accel, DE_PITCH,
+		  ((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
+		   DE_PITCH_DESTINATION_MASK) |
+		  (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
 
     /* Screen Window width in Pixels.
        2D engine uses this value to calculate the linear address in frame buffer for a given point.
     */
 	write_dpr(accel, DE_WINDOW_WIDTH,
-	FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) |
-	FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE,      (sPitch/Bpp))); /* dpr3c */
+		  ((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
+		   DE_WINDOW_WIDTH_DST_MASK) |
+		  (sPitch / Bpp & DE_WINDOW_WIDTH_SRC_MASK)); /* dpr3c */
 
 	if (accel->de_wait() != 0)
 		return -1;
@@ -256,24 +245,18 @@
 	{
 
 	write_dpr(accel, DE_SOURCE,
-		  FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
-		  FIELD_VALUE(0, DE_SOURCE, X_K1, sx)   |
-		  FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */
+		  ((sx << DE_SOURCE_X_K1_SHIFT) & DE_SOURCE_X_K1_MASK) |
+		  (sy & DE_SOURCE_Y_K2_MASK)); /* dpr0 */
 	write_dpr(accel, DE_DESTINATION,
-		  FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
-		  FIELD_VALUE(0, DE_DESTINATION, X,    dx)  |
-		  FIELD_VALUE(0, DE_DESTINATION, Y,    dy)); /* dpr04 */
+		  ((dx << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) |
+		  (dy & DE_DESTINATION_Y_MASK)); /* dpr04 */
 	write_dpr(accel, DE_DIMENSION,
-		  FIELD_VALUE(0, DE_DIMENSION, X,    width) |
-		  FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+		  ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) |
+		  (height & DE_DIMENSION_Y_ET_MASK)); /* dpr08 */
 
-	de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
-		  FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
-		  FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
-		  ((nDirection == RIGHT_TO_LEFT) ?
-		  FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT)
-		  : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) |
-		  FIELD_SET(0, DE_CONTROL, STATUS, START);
+	de_ctrl = (rop2 & DE_CONTROL_ROP_MASK) | DE_CONTROL_ROP_SELECT |
+		((nDirection == RIGHT_TO_LEFT) ? DE_CONTROL_DIRECTION : 0) |
+		DE_CONTROL_COMMAND_BITBLT | DE_CONTROL_STATUS;
 	write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
 
 	}
@@ -287,10 +270,8 @@
 
 	de_ctrl = read_dpr(accel, DE_CONTROL);
 
-	de_ctrl &=
-		   FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
-		   FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)|
-		   FIELD_MASK(DE_CONTROL_TRANSPARENCY);
+	de_ctrl &= (DE_CONTROL_TRANSPARENCY_MATCH |
+		    DE_CONTROL_TRANSPARENCY_SELECT | DE_CONTROL_TRANSPARENCY);
 
 	return de_ctrl;
 }
@@ -305,7 +286,7 @@
 		 u32 dx,
 		 u32 dy,       /* Starting coordinate of destination surface */
 		 u32 width,
-		 u32 height,   /* width and height of rectange in pixel value */
+		 u32 height,   /* width and height of rectangle in pixel value */
 		 u32 fColor,   /* Foreground color (corresponding to a 1 in the monochrome data */
 		 u32 bColor,   /* Background color (corresponding to a 0 in the monochrome data */
 		 u32 rop2)     /* ROP value */
@@ -338,42 +319,39 @@
        Note that input pitch is BYTE value, but the 2D Pitch register uses
        pixel values. Need Byte to pixel conversion.
     */
-	{
-		write_dpr(accel, DE_PITCH,
-				FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch/bytePerPixel) |
-				FIELD_VALUE(0, DE_PITCH, SOURCE,      dPitch/bytePerPixel)); /* dpr10 */
-	}
+	write_dpr(accel, DE_PITCH,
+		  ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
+		   DE_PITCH_DESTINATION_MASK) |
+		  (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
 
 	/* Screen Window width in Pixels.
 	 2D engine uses this value to calculate the linear address in frame buffer for a given point.
 	 */
 	write_dpr(accel, DE_WINDOW_WIDTH,
-		  FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) |
-		  FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE,      (dPitch/bytePerPixel)));
+		  ((dPitch / bytePerPixel << DE_WINDOW_WIDTH_DST_SHIFT) &
+		   DE_WINDOW_WIDTH_DST_MASK) |
+		  (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
 
 	 /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
 	    For mono bitmap, use startBit for X_K1. */
 	write_dpr(accel, DE_SOURCE,
-		  FIELD_SET(0, DE_SOURCE, WRAP, DISABLE)       |
-		  FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */
+		  (startBit << DE_SOURCE_X_K1_SHIFT) &
+		  DE_SOURCE_X_K1_MONO_MASK); /* dpr00 */
 
 	write_dpr(accel, DE_DESTINATION,
-		  FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
-		  FIELD_VALUE(0, DE_DESTINATION, X,    dx)    |
-		  FIELD_VALUE(0, DE_DESTINATION, Y,    dy)); /* dpr04 */
+		  ((dx << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) |
+		  (dy & DE_DESTINATION_Y_MASK)); /* dpr04 */
 
 	write_dpr(accel, DE_DIMENSION,
-		  FIELD_VALUE(0, DE_DIMENSION, X,    width) |
-		  FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+		  ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) |
+		  (height & DE_DIMENSION_Y_ET_MASK)); /* dpr08 */
 
 	write_dpr(accel, DE_FOREGROUND, fColor);
 	write_dpr(accel, DE_BACKGROUND, bColor);
 
-	de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2)         |
-		FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2)    |
-		FIELD_SET(0, DE_CONTROL, COMMAND, HOST_WRITE) |
-		FIELD_SET(0, DE_CONTROL, HOST, MONO)          |
-		FIELD_SET(0, DE_CONTROL, STATUS, START);
+	de_ctrl = (rop2 & DE_CONTROL_ROP_MASK) |
+		DE_CONTROL_ROP_SELECT | DE_CONTROL_COMMAND_HOST_WRITE |
+		DE_CONTROL_HOST | DE_CONTROL_STATUS;
 
 	write_dpr(accel, DE_CONTROL, de_ctrl | deGetTransparency(accel));
 
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index f252e47..d59d005 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -21,212 +21,162 @@
 #define DE_PORT_ADDR_TYPE3 0x100000
 
 #define DE_SOURCE                                       0x0
-#define DE_SOURCE_WRAP                                  31:31
-#define DE_SOURCE_WRAP_DISABLE                          0
-#define DE_SOURCE_WRAP_ENABLE                           1
-#define DE_SOURCE_X_K1                                  29:16
-#define DE_SOURCE_Y_K2                                  15:0
-#define DE_SOURCE_X_K1_MONO				20:16
+#define DE_SOURCE_WRAP                                  BIT(31)
+#define DE_SOURCE_X_K1_SHIFT                            16
+#define DE_SOURCE_X_K1_MASK                             (0x3fff << 16)
+#define DE_SOURCE_X_K1_MONO_MASK			(0x1f << 16)
+#define DE_SOURCE_Y_K2_MASK                             0xffff
 
 #define DE_DESTINATION                                  0x4
-#define DE_DESTINATION_WRAP                             31:31
-#define DE_DESTINATION_WRAP_DISABLE                     0
-#define DE_DESTINATION_WRAP_ENABLE                      1
-#define DE_DESTINATION_X                                28:16
-#define DE_DESTINATION_Y                                15:0
+#define DE_DESTINATION_WRAP                             BIT(31)
+#define DE_DESTINATION_X_SHIFT                          16
+#define DE_DESTINATION_X_MASK                           (0x1fff << 16)
+#define DE_DESTINATION_Y_MASK                           0xffff
 
 #define DE_DIMENSION                                    0x8
-#define DE_DIMENSION_X                                  28:16
-#define DE_DIMENSION_Y_ET                               15:0
+#define DE_DIMENSION_X_SHIFT                            16
+#define DE_DIMENSION_X_MASK                             (0x1fff << 16)
+#define DE_DIMENSION_Y_ET_MASK                          0x1fff
 
 #define DE_CONTROL                                      0xC
-#define DE_CONTROL_STATUS                               31:31
-#define DE_CONTROL_STATUS_STOP                          0
-#define DE_CONTROL_STATUS_START                         1
-#define DE_CONTROL_PATTERN                              30:30
-#define DE_CONTROL_PATTERN_MONO                         0
-#define DE_CONTROL_PATTERN_COLOR                        1
-#define DE_CONTROL_UPDATE_DESTINATION_X                 29:29
-#define DE_CONTROL_UPDATE_DESTINATION_X_DISABLE         0
-#define DE_CONTROL_UPDATE_DESTINATION_X_ENABLE          1
-#define DE_CONTROL_QUICK_START                          28:28
-#define DE_CONTROL_QUICK_START_DISABLE                  0
-#define DE_CONTROL_QUICK_START_ENABLE                   1
-#define DE_CONTROL_DIRECTION                            27:27
-#define DE_CONTROL_DIRECTION_LEFT_TO_RIGHT              0
-#define DE_CONTROL_DIRECTION_RIGHT_TO_LEFT              1
-#define DE_CONTROL_MAJOR                                26:26
-#define DE_CONTROL_MAJOR_X                              0
-#define DE_CONTROL_MAJOR_Y                              1
-#define DE_CONTROL_STEP_X                               25:25
-#define DE_CONTROL_STEP_X_POSITIVE                      1
-#define DE_CONTROL_STEP_X_NEGATIVE                      0
-#define DE_CONTROL_STEP_Y                               24:24
-#define DE_CONTROL_STEP_Y_POSITIVE                      1
-#define DE_CONTROL_STEP_Y_NEGATIVE                      0
-#define DE_CONTROL_STRETCH                              23:23
-#define DE_CONTROL_STRETCH_DISABLE                      0
-#define DE_CONTROL_STRETCH_ENABLE                       1
-#define DE_CONTROL_HOST                                 22:22
-#define DE_CONTROL_HOST_COLOR                           0
-#define DE_CONTROL_HOST_MONO                            1
-#define DE_CONTROL_LAST_PIXEL                           21:21
-#define DE_CONTROL_LAST_PIXEL_OFF                       0
-#define DE_CONTROL_LAST_PIXEL_ON                        1
-#define DE_CONTROL_COMMAND                              20:16
-#define DE_CONTROL_COMMAND_BITBLT                       0
-#define DE_CONTROL_COMMAND_RECTANGLE_FILL               1
-#define DE_CONTROL_COMMAND_DE_TILE                      2
-#define DE_CONTROL_COMMAND_TRAPEZOID_FILL               3
-#define DE_CONTROL_COMMAND_ALPHA_BLEND                  4
-#define DE_CONTROL_COMMAND_RLE_STRIP                    5
-#define DE_CONTROL_COMMAND_SHORT_STROKE                 6
-#define DE_CONTROL_COMMAND_LINE_DRAW                    7
-#define DE_CONTROL_COMMAND_HOST_WRITE                   8
-#define DE_CONTROL_COMMAND_HOST_READ                    9
-#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP         10
-#define DE_CONTROL_COMMAND_ROTATE                       11
-#define DE_CONTROL_COMMAND_FONT                         12
-#define DE_CONTROL_COMMAND_TEXTURE_LOAD                 15
-#define DE_CONTROL_ROP_SELECT                           15:15
-#define DE_CONTROL_ROP_SELECT_ROP3                      0
-#define DE_CONTROL_ROP_SELECT_ROP2                      1
-#define DE_CONTROL_ROP2_SOURCE                          14:14
-#define DE_CONTROL_ROP2_SOURCE_BITMAP                   0
-#define DE_CONTROL_ROP2_SOURCE_PATTERN                  1
-#define DE_CONTROL_MONO_DATA                            13:12
-#define DE_CONTROL_MONO_DATA_NOT_PACKED                 0
-#define DE_CONTROL_MONO_DATA_8_PACKED                   1
-#define DE_CONTROL_MONO_DATA_16_PACKED                  2
-#define DE_CONTROL_MONO_DATA_32_PACKED                  3
-#define DE_CONTROL_REPEAT_ROTATE                        11:11
-#define DE_CONTROL_REPEAT_ROTATE_DISABLE                0
-#define DE_CONTROL_REPEAT_ROTATE_ENABLE                 1
-#define DE_CONTROL_TRANSPARENCY_MATCH                   10:10
-#define DE_CONTROL_TRANSPARENCY_MATCH_OPAQUE            0
-#define DE_CONTROL_TRANSPARENCY_MATCH_TRANSPARENT       1
-#define DE_CONTROL_TRANSPARENCY_SELECT                  9:9
-#define DE_CONTROL_TRANSPARENCY_SELECT_SOURCE           0
-#define DE_CONTROL_TRANSPARENCY_SELECT_DESTINATION      1
-#define DE_CONTROL_TRANSPARENCY                         8:8
-#define DE_CONTROL_TRANSPARENCY_DISABLE                 0
-#define DE_CONTROL_TRANSPARENCY_ENABLE                  1
-#define DE_CONTROL_ROP                                  7:0
+#define DE_CONTROL_STATUS                               BIT(31)
+#define DE_CONTROL_PATTERN                              BIT(30)
+#define DE_CONTROL_UPDATE_DESTINATION_X                 BIT(29)
+#define DE_CONTROL_QUICK_START                          BIT(28)
+#define DE_CONTROL_DIRECTION                            BIT(27)
+#define DE_CONTROL_MAJOR                                BIT(26)
+#define DE_CONTROL_STEP_X                               BIT(25)
+#define DE_CONTROL_STEP_Y                               BIT(24)
+#define DE_CONTROL_STRETCH                              BIT(23)
+#define DE_CONTROL_HOST                                 BIT(22)
+#define DE_CONTROL_LAST_PIXEL                           BIT(21)
+#define DE_CONTROL_COMMAND_SHIFT                        16
+#define DE_CONTROL_COMMAND_MASK                         (0x1f << 16)
+#define DE_CONTROL_COMMAND_BITBLT                       (0x0 << 16)
+#define DE_CONTROL_COMMAND_RECTANGLE_FILL               (0x1 << 16)
+#define DE_CONTROL_COMMAND_DE_TILE                      (0x2 << 16)
+#define DE_CONTROL_COMMAND_TRAPEZOID_FILL               (0x3 << 16)
+#define DE_CONTROL_COMMAND_ALPHA_BLEND                  (0x4 << 16)
+#define DE_CONTROL_COMMAND_RLE_STRIP                    (0x5 << 16)
+#define DE_CONTROL_COMMAND_SHORT_STROKE                 (0x6 << 16)
+#define DE_CONTROL_COMMAND_LINE_DRAW                    (0x7 << 16)
+#define DE_CONTROL_COMMAND_HOST_WRITE                   (0x8 << 16)
+#define DE_CONTROL_COMMAND_HOST_READ                    (0x9 << 16)
+#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP         (0xa << 16)
+#define DE_CONTROL_COMMAND_ROTATE                       (0xb << 16)
+#define DE_CONTROL_COMMAND_FONT                         (0xc << 16)
+#define DE_CONTROL_COMMAND_TEXTURE_LOAD                 (0xe << 16)
+#define DE_CONTROL_ROP_SELECT                           BIT(15)
+#define DE_CONTROL_ROP2_SOURCE                          BIT(14)
+#define DE_CONTROL_MONO_DATA_SHIFT                      12
+#define DE_CONTROL_MONO_DATA_MASK                       (0x3 << 12)
+#define DE_CONTROL_MONO_DATA_NOT_PACKED                 (0x0 << 12)
+#define DE_CONTROL_MONO_DATA_8_PACKED                   (0x1 << 12)
+#define DE_CONTROL_MONO_DATA_16_PACKED                  (0x2 << 12)
+#define DE_CONTROL_MONO_DATA_32_PACKED                  (0x3 << 12)
+#define DE_CONTROL_REPEAT_ROTATE                        BIT(11)
+#define DE_CONTROL_TRANSPARENCY_MATCH                   BIT(10)
+#define DE_CONTROL_TRANSPARENCY_SELECT                  BIT(9)
+#define DE_CONTROL_TRANSPARENCY                         BIT(8)
+#define DE_CONTROL_ROP_MASK                             0xff
 
 /* Pseudo fields. */
 
-#define DE_CONTROL_SHORT_STROKE_DIR                     27:24
-#define DE_CONTROL_SHORT_STROKE_DIR_225                 0
-#define DE_CONTROL_SHORT_STROKE_DIR_135                 1
-#define DE_CONTROL_SHORT_STROKE_DIR_315                 2
-#define DE_CONTROL_SHORT_STROKE_DIR_45                  3
-#define DE_CONTROL_SHORT_STROKE_DIR_270                 4
-#define DE_CONTROL_SHORT_STROKE_DIR_90                  5
-#define DE_CONTROL_SHORT_STROKE_DIR_180                 8
-#define DE_CONTROL_SHORT_STROKE_DIR_0                   10
-#define DE_CONTROL_ROTATION                             25:24
-#define DE_CONTROL_ROTATION_0                           0
-#define DE_CONTROL_ROTATION_270                         1
-#define DE_CONTROL_ROTATION_90                          2
-#define DE_CONTROL_ROTATION_180                         3
+#define DE_CONTROL_SHORT_STROKE_DIR_MASK                (0xf << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_225                 (0x0 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_135                 (0x1 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_315                 (0x2 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_45                  (0x3 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_270                 (0x4 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_90                  (0x5 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_180                 (0x8 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_0                   (0xa << 24)
+#define DE_CONTROL_ROTATION_MASK                        (0x3 << 24)
+#define DE_CONTROL_ROTATION_0                           (0x0 << 24)
+#define DE_CONTROL_ROTATION_270                         (0x1 << 24)
+#define DE_CONTROL_ROTATION_90                          (0x2 << 24)
+#define DE_CONTROL_ROTATION_180                         (0x3 << 24)
 
 #define DE_PITCH                                        0x000010
-#define DE_PITCH_DESTINATION                            28:16
-#define DE_PITCH_SOURCE                                 12:0
+#define DE_PITCH_DESTINATION_SHIFT                      16
+#define DE_PITCH_DESTINATION_MASK                       (0x1fff << 16)
+#define DE_PITCH_SOURCE_MASK                            0x1fff
 
 #define DE_FOREGROUND                                   0x000014
-#define DE_FOREGROUND_COLOR                             31:0
+#define DE_FOREGROUND_COLOR_MASK                        0xffffffff
 
 #define DE_BACKGROUND                                   0x000018
-#define DE_BACKGROUND_COLOR                             31:0
+#define DE_BACKGROUND_COLOR_MASK                        0xffffffff
 
 #define DE_STRETCH_FORMAT                               0x00001C
-#define DE_STRETCH_FORMAT_PATTERN_XY                    30:30
-#define DE_STRETCH_FORMAT_PATTERN_XY_NORMAL             0
-#define DE_STRETCH_FORMAT_PATTERN_XY_OVERWRITE          1
-#define DE_STRETCH_FORMAT_PATTERN_Y                     29:27
-#define DE_STRETCH_FORMAT_PATTERN_X                     25:23
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT                  21:20
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8                0
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16               1
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32               2
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24               3
-
-#define DE_STRETCH_FORMAT_ADDRESSING                    19:16
-#define DE_STRETCH_FORMAT_ADDRESSING_XY                 0
-#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR             15
-#define DE_STRETCH_FORMAT_SOURCE_HEIGHT                 11:0
+#define DE_STRETCH_FORMAT_PATTERN_XY                    BIT(30)
+#define DE_STRETCH_FORMAT_PATTERN_Y_SHIFT               27
+#define DE_STRETCH_FORMAT_PATTERN_Y_MASK                (0x7 << 27)
+#define DE_STRETCH_FORMAT_PATTERN_X_SHIFT               23
+#define DE_STRETCH_FORMAT_PATTERN_X_MASK                (0x7 << 23)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_SHIFT            20
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK             (0x3 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8                (0x0 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16               (0x1 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32               (0x2 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24               (0x3 << 20)
+#define DE_STRETCH_FORMAT_ADDRESSING_SHIFT              16
+#define DE_STRETCH_FORMAT_ADDRESSING_MASK               (0xf << 16)
+#define DE_STRETCH_FORMAT_ADDRESSING_XY                 (0x0 << 16)
+#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR             (0xf << 16)
+#define DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK            0xfff
 
 #define DE_COLOR_COMPARE                                0x000020
-#define DE_COLOR_COMPARE_COLOR                          23:0
+#define DE_COLOR_COMPARE_COLOR_MASK                     0xffffff
 
 #define DE_COLOR_COMPARE_MASK                           0x000024
-#define DE_COLOR_COMPARE_MASK_MASKS                     23:0
+#define DE_COLOR_COMPARE_MASK_MASK                      0xffffff
 
 #define DE_MASKS                                        0x000028
-#define DE_MASKS_BYTE_MASK                              31:16
-#define DE_MASKS_BIT_MASK                               15:0
+#define DE_MASKS_BYTE_MASK                              (0xffff << 16)
+#define DE_MASKS_BIT_MASK                               0xffff
 
 #define DE_CLIP_TL                                      0x00002C
-#define DE_CLIP_TL_TOP                                  31:16
-#define DE_CLIP_TL_STATUS                               13:13
-#define DE_CLIP_TL_STATUS_DISABLE                       0
-#define DE_CLIP_TL_STATUS_ENABLE                        1
-#define DE_CLIP_TL_INHIBIT                              12:12
-#define DE_CLIP_TL_INHIBIT_OUTSIDE                      0
-#define DE_CLIP_TL_INHIBIT_INSIDE                       1
-#define DE_CLIP_TL_LEFT                                 11:0
+#define DE_CLIP_TL_TOP_MASK                             (0xffff << 16)
+#define DE_CLIP_TL_STATUS                               BIT(13)
+#define DE_CLIP_TL_INHIBIT                              BIT(12)
+#define DE_CLIP_TL_LEFT_MASK                            0xfff
 
 #define DE_CLIP_BR                                      0x000030
-#define DE_CLIP_BR_BOTTOM                               31:16
-#define DE_CLIP_BR_RIGHT                                12:0
+#define DE_CLIP_BR_BOTTOM_MASK                          (0xffff << 16)
+#define DE_CLIP_BR_RIGHT_MASK                           0x1fff
 
 #define DE_MONO_PATTERN_LOW                             0x000034
-#define DE_MONO_PATTERN_LOW_PATTERN                     31:0
+#define DE_MONO_PATTERN_LOW_PATTERN_MASK                0xffffffff
 
 #define DE_MONO_PATTERN_HIGH                            0x000038
-#define DE_MONO_PATTERN_HIGH_PATTERN                    31:0
+#define DE_MONO_PATTERN_HIGH_PATTERN_MASK               0xffffffff
 
 #define DE_WINDOW_WIDTH                                 0x00003C
-#define DE_WINDOW_WIDTH_DESTINATION                     28:16
-#define DE_WINDOW_WIDTH_SOURCE                          12:0
+#define DE_WINDOW_WIDTH_DST_SHIFT                       16
+#define DE_WINDOW_WIDTH_DST_MASK                        (0x1fff << 16)
+#define DE_WINDOW_WIDTH_SRC_MASK                        0x1fff
 
 #define DE_WINDOW_SOURCE_BASE                           0x000040
-#define DE_WINDOW_SOURCE_BASE_EXT                       27:27
-#define DE_WINDOW_SOURCE_BASE_EXT_LOCAL                 0
-#define DE_WINDOW_SOURCE_BASE_EXT_EXTERNAL              1
-#define DE_WINDOW_SOURCE_BASE_CS                        26:26
-#define DE_WINDOW_SOURCE_BASE_CS_0                      0
-#define DE_WINDOW_SOURCE_BASE_CS_1                      1
-#define DE_WINDOW_SOURCE_BASE_ADDRESS                   25:0
+#define DE_WINDOW_SOURCE_BASE_EXT                       BIT(27)
+#define DE_WINDOW_SOURCE_BASE_CS                        BIT(26)
+#define DE_WINDOW_SOURCE_BASE_ADDRESS_MASK              0x3ffffff
 
 #define DE_WINDOW_DESTINATION_BASE                      0x000044
-#define DE_WINDOW_DESTINATION_BASE_EXT                  27:27
-#define DE_WINDOW_DESTINATION_BASE_EXT_LOCAL            0
-#define DE_WINDOW_DESTINATION_BASE_EXT_EXTERNAL         1
-#define DE_WINDOW_DESTINATION_BASE_CS                   26:26
-#define DE_WINDOW_DESTINATION_BASE_CS_0                 0
-#define DE_WINDOW_DESTINATION_BASE_CS_1                 1
-#define DE_WINDOW_DESTINATION_BASE_ADDRESS              25:0
+#define DE_WINDOW_DESTINATION_BASE_EXT                  BIT(27)
+#define DE_WINDOW_DESTINATION_BASE_CS                   BIT(26)
+#define DE_WINDOW_DESTINATION_BASE_ADDRESS_MASK         0x3ffffff
 
 #define DE_ALPHA                                        0x000048
-#define DE_ALPHA_VALUE                                  7:0
+#define DE_ALPHA_VALUE_MASK                             0xff
 
 #define DE_WRAP                                         0x00004C
-#define DE_WRAP_X                                       31:16
-#define DE_WRAP_Y                                       15:0
+#define DE_WRAP_X_MASK                                  (0xffff << 16)
+#define DE_WRAP_Y_MASK                                  0xffff
 
 #define DE_STATUS                                       0x000050
-#define DE_STATUS_CSC                                   1:1
-#define DE_STATUS_CSC_CLEAR                             0
-#define DE_STATUS_CSC_NOT_ACTIVE                        0
-#define DE_STATUS_CSC_ACTIVE                            1
-#define DE_STATUS_2D                                    0:0
-#define DE_STATUS_2D_CLEAR                              0
-#define DE_STATUS_2D_NOT_ACTIVE                         0
-#define DE_STATUS_2D_ACTIVE                             1
-
-
+#define DE_STATUS_CSC                                   BIT(1)
+#define DE_STATUS_2D                                    BIT(0)
 
 /* blt direction */
 #define TOP_TO_BOTTOM 0
@@ -268,7 +218,7 @@
 		 u32 dx,
 		 u32 dy,       /* Starting coordinate of destination surface */
 		 u32 width,
-		 u32 height,   /* width and height of rectange in pixel value */
+		 u32 height,   /* width and height of rectangle in pixel value */
 		 u32 fColor,   /* Foreground color (corresponding to a 1 in the monochrome data */
 		 u32 bColor,   /* Background color (corresponding to a 0 in the monochrome data */
 		 u32 rop2);
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index 3b7ce92..2d348c6 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -16,7 +16,6 @@
 #include <linux/screen_info.h>
 
 #include "sm750.h"
-#include "sm750_help.h"
 #include "sm750_cursor.h"
 
 
@@ -28,33 +27,25 @@
 
 /* cursor control for voyager and 718/750*/
 #define HWC_ADDRESS                         0x0
-#define HWC_ADDRESS_ENABLE                  31:31
-#define HWC_ADDRESS_ENABLE_DISABLE          0
-#define HWC_ADDRESS_ENABLE_ENABLE           1
-#define HWC_ADDRESS_EXT                     27:27
-#define HWC_ADDRESS_EXT_LOCAL               0
-#define HWC_ADDRESS_EXT_EXTERNAL            1
-#define HWC_ADDRESS_CS                      26:26
-#define HWC_ADDRESS_CS_0                    0
-#define HWC_ADDRESS_CS_1                    1
-#define HWC_ADDRESS_ADDRESS                 25:0
+#define HWC_ADDRESS_ENABLE                  BIT(31)
+#define HWC_ADDRESS_EXT                     BIT(27)
+#define HWC_ADDRESS_CS                      BIT(26)
+#define HWC_ADDRESS_ADDRESS_MASK            0x3ffffff
 
 #define HWC_LOCATION                        0x4
-#define HWC_LOCATION_TOP                    27:27
-#define HWC_LOCATION_TOP_INSIDE             0
-#define HWC_LOCATION_TOP_OUTSIDE            1
-#define HWC_LOCATION_Y                      26:16
-#define HWC_LOCATION_LEFT                   11:11
-#define HWC_LOCATION_LEFT_INSIDE            0
-#define HWC_LOCATION_LEFT_OUTSIDE           1
-#define HWC_LOCATION_X                      10:0
+#define HWC_LOCATION_TOP                    BIT(27)
+#define HWC_LOCATION_Y_SHIFT                16
+#define HWC_LOCATION_Y_MASK                 (0x7ff << 16)
+#define HWC_LOCATION_LEFT                   BIT(11)
+#define HWC_LOCATION_X_MASK                 0x7ff
 
 #define HWC_COLOR_12                        0x8
-#define HWC_COLOR_12_2_RGB565               31:16
-#define HWC_COLOR_12_1_RGB565               15:0
+#define HWC_COLOR_12_2_RGB565_SHIFT         16
+#define HWC_COLOR_12_2_RGB565_MASK          (0xffff << 16)
+#define HWC_COLOR_12_1_RGB565_MASK          0xffff
 
 #define HWC_COLOR_3                         0xC
-#define HWC_COLOR_3_RGB565                  15:0
+#define HWC_COLOR_3_RGB565_MASK             0xffff
 
 
 /* hw_cursor_xxx works for voyager,718 and 750 */
@@ -62,9 +53,7 @@
 {
 	u32 reg;
 
-	reg = FIELD_VALUE(0, HWC_ADDRESS, ADDRESS, cursor->offset)|
-			FIELD_SET(0, HWC_ADDRESS, EXT, LOCAL)|
-			FIELD_SET(0, HWC_ADDRESS, ENABLE, ENABLE);
+	reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
 	POKE32(HWC_ADDRESS, reg);
 }
 void hw_cursor_disable(struct lynx_cursor *cursor)
@@ -83,14 +72,17 @@
 {
 	u32 reg;
 
-	reg = FIELD_VALUE(0, HWC_LOCATION, Y, y)|
-			FIELD_VALUE(0, HWC_LOCATION, X, x);
+	reg = (((y << HWC_LOCATION_Y_SHIFT) & HWC_LOCATION_Y_MASK) |
+		(x & HWC_LOCATION_X_MASK));
 	POKE32(HWC_LOCATION, reg);
 }
 void hw_cursor_setColor(struct lynx_cursor *cursor,
 						u32 fg, u32 bg)
 {
-	POKE32(HWC_COLOR_12, (fg<<16)|(bg&0xffff));
+	u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
+		HWC_COLOR_12_2_RGB565_MASK;
+
+	POKE32(HWC_COLOR_12, reg | (bg & HWC_COLOR_12_1_RGB565_MASK));
 	POKE32(HWC_COLOR_3, 0xffe0);
 }
 
@@ -115,15 +107,6 @@
 	pstart = cursor->vstart;
 	pbuffer = pstart;
 
-/*
-	if(odd &1){
-		hw_cursor_setData2(cursor,rop,pcol,pmsk);
-	}
-	odd++;
-	if(odd > 0xfffffff0)
-		odd=0;
-*/
-
 	for (i = 0; i < count; i++) {
 		color = *pcol++;
 		mask = *pmsk++;
@@ -143,8 +126,7 @@
 		iowrite16(data, pbuffer);
 
 		/* assume pitch is 1,2,4,8,...*/
-		if ((i+1) % pitch == 0)
-		{
+		if ((i + 1) % pitch == 0) {
 			/* need a return */
 			pstart += offset;
 			pbuffer = pstart;
diff --git a/drivers/staging/sm750fb/sm750_help.h b/drivers/staging/sm750fb/sm750_help.h
deleted file mode 100644
index c070cf25..0000000
--- a/drivers/staging/sm750fb/sm750_help.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef LYNX_HELP_H__
-#define LYNX_HELP_H__
-
-/* Internal macros */
-#define _F_START(f)             (0 ? f)
-#define _F_END(f)               (1 ? f)
-#define _F_SIZE(f)              (1 + _F_END(f) - _F_START(f))
-#define _F_MASK(f)              (((1 << _F_SIZE(f)) - 1) << _F_START(f))
-#define _F_NORMALIZE(v, f)      (((v) & _F_MASK(f)) >> _F_START(f))
-#define _F_DENORMALIZE(v, f)    (((v) << _F_START(f)) & _F_MASK(f))
-
-/* Global macros */
-#define FIELD_GET(x, reg, field) \
-( \
-	_F_NORMALIZE((x), reg ## _ ## field) \
-)
-
-#define FIELD_SET(x, reg, field, value) \
-( \
-	(x & ~_F_MASK(reg ## _ ## field)) \
-	| _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
-)
-
-#define FIELD_VALUE(x, reg, field, value) \
-( \
-	(x & ~_F_MASK(reg ## _ ## field)) \
-	| _F_DENORMALIZE(value, reg ## _ ## field) \
-)
-
-#define FIELD_CLEAR(reg, field) \
-( \
-	~_F_MASK(reg ## _ ## field) \
-)
-
-/* Field Macros */
-#define FIELD_START(field)              (0 ? field)
-#define FIELD_END(field)                (1 ? field)
-#define FIELD_SIZE(field)               (1 + FIELD_END(field) - FIELD_START(field))
-#define FIELD_MASK(field)               (((1 << (FIELD_SIZE(field)-1)) | ((1 << (FIELD_SIZE(field)-1)) - 1)) << FIELD_START(field))
-
-static inline unsigned int absDiff(unsigned int a, unsigned int b)
-{
-	if (a < b)
-		return b-a;
-	else
-		return a-b;
-}
-
-/* n / d + 1 / 2 = (2n + d) / 2d */
-#define roundedDiv(num, denom)	((2 * (num) + (denom)) / (2 * (denom)))
-#define MHz(x) ((x) * 1000000)
-
-
-
-
-#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 41822c6..2daeedd 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -1,4 +1,3 @@
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -108,65 +107,62 @@
 	/* for sm718,open pci burst */
 	if (sm750_dev->devid == 0x718) {
 		POKE32(SYSTEM_CTRL,
-				FIELD_SET(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, PCI_BURST, ON));
+		       PEEK32(SYSTEM_CTRL) | SYSTEM_CTRL_PCI_BURST);
 	}
 
 	if (getChipType() != SM750LE) {
+		unsigned int val;
 		/* does user need CRT ?*/
 		if (sm750_dev->nocrt) {
 			POKE32(MISC_CTRL,
-					FIELD_SET(PEEK32(MISC_CTRL),
-					MISC_CTRL,
-					DAC_POWER, OFF));
+			       PEEK32(MISC_CTRL) | MISC_CTRL_DAC_POWER_OFF);
 			/* shut off dpms */
-			POKE32(SYSTEM_CTRL,
-					FIELD_SET(PEEK32(SYSTEM_CTRL),
-					SYSTEM_CTRL,
-					DPMS, VNHN));
+			val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK;
+			val |= SYSTEM_CTRL_DPMS_VPHN;
+			POKE32(SYSTEM_CTRL, val);
 		} else {
 			POKE32(MISC_CTRL,
-					FIELD_SET(PEEK32(MISC_CTRL),
-					MISC_CTRL,
-					DAC_POWER, ON));
+			       PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF);
 			/* turn on dpms */
-			POKE32(SYSTEM_CTRL,
-					FIELD_SET(PEEK32(SYSTEM_CTRL),
-					SYSTEM_CTRL,
-					DPMS, VPHP));
+			val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK;
+			val |= SYSTEM_CTRL_DPMS_VPHP;
+			POKE32(SYSTEM_CTRL, val);
 		}
 
+		val = PEEK32(PANEL_DISPLAY_CTRL) &
+			~(PANEL_DISPLAY_CTRL_DUAL_DISPLAY |
+			  PANEL_DISPLAY_CTRL_DOUBLE_PIXEL);
 		switch (sm750_dev->pnltype) {
-		case sm750_doubleTFT:
 		case sm750_24TFT:
+			break;
+		case sm750_doubleTFT:
+			val |= PANEL_DISPLAY_CTRL_DOUBLE_PIXEL;
+			break;
 		case sm750_dualTFT:
-		POKE32(PANEL_DISPLAY_CTRL,
-			FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL),
-						PANEL_DISPLAY_CTRL,
-						TFT_DISP,
-						sm750_dev->pnltype));
-		break;
+			val |= PANEL_DISPLAY_CTRL_DUAL_DISPLAY;
+			break;
 		}
+		POKE32(PANEL_DISPLAY_CTRL, val);
 	} else {
-		/* for 750LE ,no DVI chip initilization makes Monitor no signal */
+		/* for 750LE ,no DVI chip initialization makes Monitor no signal */
 		/* Set up GPIO for software I2C to program DVI chip in the
 		   Xilinx SP605 board, in order to have video signal.
 		 */
-	sm750_sw_i2c_init(0, 1);
+		sm750_sw_i2c_init(0, 1);
 
-
-	/* Customer may NOT use CH7301 DVI chip, which has to be
-	   initialized differently.
-	*/
-	if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
+		/* Customer may NOT use CH7301 DVI chip, which has to be
+		initialized differently.
+		*/
+		if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
 		/* The following register values for CH7301 are from
 		   Chrontel app note and our experiment.
 		*/
 			pr_info("yes,CH7301 DVI chip found\n");
-		sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
-		sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
-		sm750_sw_i2c_write_reg(0xec, 0x49, 0xC0);
+			sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
+			sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
+			sm750_sw_i2c_write_reg(0xec, 0x49, 0xC0);
 			pr_info("okay,CH7301 DVI chip setup done\n");
-	}
+		}
 	}
 
 	/* init 2d engine */
@@ -310,53 +306,51 @@
 	if (crtc->channel != sm750_secondary) {
 		/* set pitch, offset ,width,start address ,etc... */
 		POKE32(PANEL_FB_ADDRESS,
-			FIELD_SET(0, PANEL_FB_ADDRESS, STATUS, CURRENT)|
-			FIELD_SET(0, PANEL_FB_ADDRESS, EXT, LOCAL)|
-			FIELD_VALUE(0, PANEL_FB_ADDRESS, ADDRESS, crtc->oScreen));
+		       crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
 
 		reg = var->xres * (var->bits_per_pixel >> 3);
 		/* crtc->channel is not equal to par->index on numeric,be aware of that */
 		reg = ALIGN(reg, crtc->line_pad);
+		reg = (reg << PANEL_FB_WIDTH_WIDTH_SHIFT) &
+		       PANEL_FB_WIDTH_WIDTH_MASK;
+		reg |= (fix->line_length & PANEL_FB_WIDTH_OFFSET_MASK);
+		POKE32(PANEL_FB_WIDTH, reg);
 
-		POKE32(PANEL_FB_WIDTH,
-			FIELD_VALUE(0, PANEL_FB_WIDTH, WIDTH, reg)|
-			FIELD_VALUE(0, PANEL_FB_WIDTH, OFFSET, fix->line_length));
+		reg = ((var->xres - 1) << PANEL_WINDOW_WIDTH_WIDTH_SHIFT) &
+		       PANEL_WINDOW_WIDTH_WIDTH_MASK;
+		reg |= (var->xoffset & PANEL_WINDOW_WIDTH_X_MASK);
+		POKE32(PANEL_WINDOW_WIDTH, reg);
 
-		POKE32(PANEL_WINDOW_WIDTH,
-			FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres - 1)|
-			FIELD_VALUE(0, PANEL_WINDOW_WIDTH, X, var->xoffset));
-
-		POKE32(PANEL_WINDOW_HEIGHT,
-			FIELD_VALUE(0, PANEL_WINDOW_HEIGHT, HEIGHT, var->yres_virtual - 1)|
-			FIELD_VALUE(0, PANEL_WINDOW_HEIGHT, Y, var->yoffset));
+		reg = ((var->yres_virtual - 1) <<
+		       PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT);
+		reg &= PANEL_WINDOW_HEIGHT_HEIGHT_MASK;
+		reg |= (var->yoffset & PANEL_WINDOW_HEIGHT_Y_MASK);
+		POKE32(PANEL_WINDOW_HEIGHT, reg);
 
 		POKE32(PANEL_PLANE_TL, 0);
 
-		POKE32(PANEL_PLANE_BR,
-			FIELD_VALUE(0, PANEL_PLANE_BR, BOTTOM, var->yres - 1)|
-			FIELD_VALUE(0, PANEL_PLANE_BR, RIGHT, var->xres - 1));
+		reg = ((var->yres - 1) << PANEL_PLANE_BR_BOTTOM_SHIFT) &
+		       PANEL_PLANE_BR_BOTTOM_MASK;
+		reg |= ((var->xres - 1) & PANEL_PLANE_BR_RIGHT_MASK);
+		POKE32(PANEL_PLANE_BR, reg);
 
 		/* set pixel format */
 		reg = PEEK32(PANEL_DISPLAY_CTRL);
-		POKE32(PANEL_DISPLAY_CTRL,
-			FIELD_VALUE(reg,
-			PANEL_DISPLAY_CTRL, FORMAT,
-			(var->bits_per_pixel >> 4)
-			));
+		POKE32(PANEL_DISPLAY_CTRL, reg | (var->bits_per_pixel >> 4));
 	} else {
 		/* not implemented now */
 		POKE32(CRT_FB_ADDRESS, crtc->oScreen);
 		reg = var->xres * (var->bits_per_pixel >> 3);
 		/* crtc->channel is not equal to par->index on numeric,be aware of that */
-		reg = ALIGN(reg, crtc->line_pad);
-
-		POKE32(CRT_FB_WIDTH,
-			FIELD_VALUE(0, CRT_FB_WIDTH, WIDTH, reg)|
-			FIELD_VALUE(0, CRT_FB_WIDTH, OFFSET, fix->line_length));
+		reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
+		reg &= CRT_FB_WIDTH_WIDTH_MASK;
+		reg |= (fix->line_length & CRT_FB_WIDTH_OFFSET_MASK);
+		POKE32(CRT_FB_WIDTH, reg);
 
 		/* SET PIXEL FORMAT */
 		reg = PEEK32(CRT_DISPLAY_CTRL);
-		reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, FORMAT, var->bits_per_pixel >> 4);
+		reg |= ((var->bits_per_pixel >> 4) &
+			CRT_DISPLAY_CTRL_FORMAT_MASK);
 		POKE32(CRT_DISPLAY_CTRL, reg);
 
 	}
@@ -382,31 +376,36 @@
 	switch (blank) {
 	case FB_BLANK_UNBLANK:
 		dpms = CRT_DISPLAY_CTRL_DPMS_0;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+		crtdb = 0;
 		break;
 	case FB_BLANK_NORMAL:
 		dpms = CRT_DISPLAY_CTRL_DPMS_0;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	case FB_BLANK_VSYNC_SUSPEND:
 		dpms = CRT_DISPLAY_CTRL_DPMS_2;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	case FB_BLANK_HSYNC_SUSPEND:
 		dpms = CRT_DISPLAY_CTRL_DPMS_1;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	case FB_BLANK_POWERDOWN:
 		dpms = CRT_DISPLAY_CTRL_DPMS_3;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	default:
 		return -EINVAL;
 	}
 
 	if (output->paths & sm750_crt) {
-		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, DPMS, dpms));
-		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
+		unsigned int val;
+
+		val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK;
+		POKE32(CRT_DISPLAY_CTRL, val | dpms);
+
+		val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_BLANK;
+		POKE32(CRT_DISPLAY_CTRL, val | crtdb);
 	}
 	return 0;
 }
@@ -419,42 +418,45 @@
 
 	switch (blank) {
 	case FB_BLANK_UNBLANK:
-		pr_info("flag = FB_BLANK_UNBLANK\n");
+		pr_debug("flag = FB_BLANK_UNBLANK\n");
 		dpms = SYSTEM_CTRL_DPMS_VPHP;
-		pps = PANEL_DISPLAY_CTRL_DATA_ENABLE;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+		pps = PANEL_DISPLAY_CTRL_DATA;
 		break;
 	case FB_BLANK_NORMAL:
-		pr_info("flag = FB_BLANK_NORMAL\n");
+		pr_debug("flag = FB_BLANK_NORMAL\n");
 		dpms = SYSTEM_CTRL_DPMS_VPHP;
-		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	case FB_BLANK_VSYNC_SUSPEND:
 		dpms = SYSTEM_CTRL_DPMS_VNHP;
-		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	case FB_BLANK_HSYNC_SUSPEND:
 		dpms = SYSTEM_CTRL_DPMS_VPHN;
-		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	case FB_BLANK_POWERDOWN:
 		dpms = SYSTEM_CTRL_DPMS_VNHN;
-		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		crtdb = CRT_DISPLAY_CTRL_BLANK;
 		break;
 	}
 
 	if (output->paths & sm750_crt) {
+		unsigned int val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK;
 
-		POKE32(SYSTEM_CTRL, FIELD_VALUE(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, DPMS, dpms));
-		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
+		POKE32(SYSTEM_CTRL, val | dpms);
+
+		val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_BLANK;
+		POKE32(CRT_DISPLAY_CTRL, val | crtdb);
 	}
 
-	if (output->paths & sm750_panel)
-		POKE32(PANEL_DISPLAY_CTRL, FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, DATA, pps));
+	if (output->paths & sm750_panel) {
+		unsigned int val = PEEK32(PANEL_DISPLAY_CTRL);
+
+		val &= ~PANEL_DISPLAY_CTRL_DATA;
+		val |= pps;
+		POKE32(PANEL_DISPLAY_CTRL, val);
+	}
 
 	return 0;
 }
@@ -468,21 +470,21 @@
 
 	if (getChipType() == SM750LE) {
 		reg = PEEK32(DE_STATE1);
-		reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, ON);
+		reg |= DE_STATE1_DE_ABORT;
 		POKE32(DE_STATE1, reg);
 
 		reg = PEEK32(DE_STATE1);
-		reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, OFF);
+		reg &= ~DE_STATE1_DE_ABORT;
 		POKE32(DE_STATE1, reg);
 
 	} else {
 		/* engine reset */
 		reg = PEEK32(SYSTEM_CTRL);
-	    reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, ON);
+		reg |= SYSTEM_CTRL_DE_ABORT;
 		POKE32(SYSTEM_CTRL, reg);
 
 		reg = PEEK32(SYSTEM_CTRL);
-		reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, OFF);
+		reg &= ~SYSTEM_CTRL_DE_ABORT;
 		POKE32(SYSTEM_CTRL, reg);
 	}
 
@@ -493,15 +495,15 @@
 int hw_sm750le_deWait(void)
 {
 	int i = 0x10000000;
+	unsigned int mask = DE_STATE2_DE_STATUS_BUSY | DE_STATE2_DE_FIFO_EMPTY |
+		DE_STATE2_DE_MEM_FIFO_EMPTY;
 
 	while (i--) {
-		unsigned int dwVal = PEEK32(DE_STATE2);
+		unsigned int val = PEEK32(DE_STATE2);
 
-		if ((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) &&
-			(FIELD_GET(dwVal, DE_STATE2, DE_FIFO)  == DE_STATE2_DE_FIFO_EMPTY) &&
-			(FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY)) {
+		if ((val & mask) ==
+		    (DE_STATE2_DE_FIFO_EMPTY | DE_STATE2_DE_MEM_FIFO_EMPTY))
 			return 0;
-		}
 	}
 	/* timeout error */
 	return -1;
@@ -511,15 +513,16 @@
 int hw_sm750_deWait(void)
 {
 	int i = 0x10000000;
+	unsigned int mask = SYSTEM_CTRL_DE_STATUS_BUSY |
+		SYSTEM_CTRL_DE_FIFO_EMPTY |
+		SYSTEM_CTRL_DE_MEM_FIFO_EMPTY;
 
 	while (i--) {
-		unsigned int dwVal = PEEK32(SYSTEM_CTRL);
+		unsigned int val = PEEK32(SYSTEM_CTRL);
 
-		if ((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) &&
-			(FIELD_GET(dwVal, SYSTEM_CTRL, DE_FIFO)  == SYSTEM_CTRL_DE_FIFO_EMPTY) &&
-			(FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY)) {
+		if ((val & mask) ==
+		    (SYSTEM_CTRL_DE_FIFO_EMPTY | SYSTEM_CTRL_DE_MEM_FIFO_EMPTY))
 			return 0;
-		}
 	}
 	/* timeout error */
 	return -1;
@@ -541,12 +544,12 @@
 	total += crtc->oScreen;
 	if (crtc->channel == sm750_primary) {
 		POKE32(PANEL_FB_ADDRESS,
-			FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS),
-				PANEL_FB_ADDRESS, ADDRESS, total));
+		       PEEK32(PANEL_FB_ADDRESS) |
+		       (total & PANEL_FB_ADDRESS_ADDRESS_MASK));
 	} else {
 		POKE32(CRT_FB_ADDRESS,
-			FIELD_VALUE(PEEK32(CRT_FB_ADDRESS),
-				CRT_FB_ADDRESS, ADDRESS, total));
+		       PEEK32(CRT_FB_ADDRESS) |
+		       (total & CRT_FB_ADDRESS_ADDRESS_MASK));
 	}
 	return 0;
 }
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index 8565c23..723d5df 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -27,7 +27,7 @@
 	for (i = 0; i < MAX_NR_CONSOLES; i++) {
 		if (speakup_console[i] && speakup_console[i]->tty_stopped)
 			continue;
-		if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL))
+		if ((vc_cons[i].d) && (vc_cons[i].d->port.tty))
 			start_tty(vc_cons[i].d->port.tty);
 	}
 }
@@ -38,7 +38,7 @@
 	int i;
 
 	for (i = 0; i < MAX_NR_CONSOLES; i++)
-		if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL))
+		if ((vc_cons[i].d && (vc_cons[i].d->port.tty)))
 			stop_tty(vc_cons[i].d->port.tty);
 }
 
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index d1ffdf4..8498971 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -76,9 +76,9 @@
 	if (misc_registered != 0)
 		return;
 /* zero it so if register fails, deregister will not ref invalid ptrs */
-	if (misc_register(&synth_device))
+	if (misc_register(&synth_device)) {
 		pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
-	else {
+	} else {
 		pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
 			MISC_MAJOR, SYNTH_MINOR);
 		misc_registered = 1;
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 5e1f16c..8f058b4 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -12,10 +12,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/types.h>
 #include <linux/slab.h>
@@ -28,7 +24,7 @@
 #define PRESSED 1
 #define RELEASED 0
 
-static DEFINE_PER_CPU(bool, reporting_keystroke);
+static DEFINE_PER_CPU(int, reporting_keystroke);
 
 static struct input_dev *virt_keyboard;
 
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 12f880e..8960079 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -393,10 +393,7 @@
 
 char *spk_msg_get(enum msg_index_t index)
 {
-	char *ch;
-
-	ch = speakup_msgs[index];
-	return ch;
+	return speakup_msgs[index];
 }
 
 /*
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 02d5c70..ce94cb1 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -14,10 +14,6 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/keyboard.h>
@@ -74,7 +70,7 @@
 		for (i = 0; i < nstates; i++, kp++) {
 			if (!*kp)
 				continue;
-			if ((state_tbl[i]&16) != 0 && *kp == SPK_KEY)
+			if ((state_tbl[i] & 16) != 0 && *kp == SPK_KEY)
 				continue;
 			counters[*kp]++;
 		}
@@ -83,7 +79,7 @@
 		if (counters[i] == 0)
 			continue;
 		key_offsets[i] = offset;
-		offset += (counters[i]+1);
+		offset += (counters[i] + 1);
 		if (offset >= MAXKEYS)
 			break;
 	}
@@ -97,7 +93,7 @@
 			ch1 = *kp++;
 			if (!ch1)
 				continue;
-			if ((state_tbl[i]&16) != 0 && ch1 == SPK_KEY)
+			if ((state_tbl[i] & 16) != 0 && ch1 == SPK_KEY)
 				continue;
 			key = (state_tbl[i] << 8) + ch;
 			counters[ch1]--;
@@ -130,14 +126,14 @@
 	int i;
 	int num_funcs = MSG_FUNCNAMES_END - MSG_FUNCNAMES_START + 1;
 
-	state_tbl = spk_our_keys[0]+SHIFT_TBL_SIZE+2;
+	state_tbl = spk_our_keys[0] + SHIFT_TBL_SIZE + 2;
 	for (i = 0; i < num_funcs; i++) {
 		char *cur_funcname = spk_msg_get(MSG_FUNCNAMES_START + i);
 
 		if (start == *cur_funcname)
 			continue;
 		start = *cur_funcname;
-		letter_offsets[(start&31)-1] = i;
+		letter_offsets[(start & 31) - 1] = i;
 	}
 	return 0;
 }
@@ -160,12 +156,12 @@
 		ch |= 32; /* lower case */
 		if (ch < 'a' || ch > 'z')
 			return -1;
-		if (letter_offsets[ch-'a'] == -1) {
+		if (letter_offsets[ch - 'a'] == -1) {
 			synth_printf(spk_msg_get(MSG_NO_COMMAND), ch);
 			synth_printf("\n");
 			return 1;
 		}
-		cur_item = letter_offsets[ch-'a'];
+		cur_item = letter_offsets[ch - 'a'];
 	} else if (type == KT_CUR) {
 		if (ch == 0
 		    && (MSG_FUNCNAMES_START + cur_item + 1) <=
@@ -186,7 +182,7 @@
 		name = NULL;
 		if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) {
 			synth_printf("%s\n",
-				spk_msg_get(MSG_KEYNAMES_START + key-1));
+				spk_msg_get(MSG_KEYNAMES_START + key - 1));
 			return 1;
 		}
 		for (i = 0; funcvals[i] != 0 && !name; i++) {
@@ -195,7 +191,7 @@
 		}
 		if (!name)
 			return -1;
-		kp = spk_our_keys[key]+1;
+		kp = spk_our_keys[key] + 1;
 		for (i = 0; i < nstates; i++) {
 			if (ch == kp[i])
 				break;
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 30cf973..eb8d65a 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -16,10 +16,6 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #include <linux/kernel.h>
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index a5bbb33..c2c435c 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -8,7 +8,8 @@
 
 #include <linux/serial_core.h>
 /* WARNING:  Do not change this to <linux/serial.h> without testing that
- * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+ * SERIAL_PORT_DFNS does get defined to the appropriate value.
+ */
 #include <asm/serial.h>
 
 #ifndef SERIAL_PORT_DFNS
@@ -92,8 +93,6 @@
 static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
 {
 	unsigned long flags;
-/*printk(KERN_ERR "in irq\n"); */
-/*pr_warn("in IRQ\n"); */
 	int c;
 
 	spin_lock_irqsave(&speakup_info.spinlock, flags);
@@ -101,8 +100,6 @@
 
 		c = inb_p(speakup_info.port_tts+UART_RX);
 		synth->read_buff_add((u_char) c);
-/*printk(KERN_ERR "c = %d\n", c); */
-/*pr_warn("C = %d\n", c); */
 	}
 	spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 	return IRQ_HANDLED;
@@ -175,9 +172,6 @@
 	while (!((inb_p(speakup_info.port_tts + UART_MSR)) & UART_MSR_CTS)) {
 		/* CTS */
 		if (--tmout == 0) {
-			/* pr_warn("%s: timed out (cts)\n",
-			 * synth->long_name);
-			 */
 			timeouts++;
 			return 0;
 		}
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index f418893..efb791b 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -14,11 +14,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * this code is specificly written as a driver for the speakup screenreview
  * package and is not a general device driver.
  * This driver is for the Aicom Acent PC internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index af2690f..34f45d3 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * this code is specificly written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 51788f7d..3cbc8a7 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * this code is specificly written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index a9a6872..7a12b840 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 80f8358..570f0c2 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * this code is specificly written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index e0b5db9..1a5cf3d 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
@@ -71,30 +67,30 @@
  * These attributes will appear in /sys/accessibility/speakup/decext.
  */
 static struct kobj_attribute caps_start_attribute =
-	__ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute caps_stop_attribute =
-	__ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute pitch_attribute =
-	__ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute punct_attribute =
-	__ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute rate_attribute =
-	__ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute voice_attribute =
-	__ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute vol_attribute =
-	__ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 static struct kobj_attribute delay_time_attribute =
-	__ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute direct_attribute =
-	__ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute full_time_attribute =
-	__ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute jiffy_delta_attribute =
-	__ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute trigger_time_attribute =
-	__ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 /*
  * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 4893fef..d6479bd 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -24,10 +24,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/jiffies.h>
 #include <linux/sched.h>
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 09063b8..7646567 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 345efd33..38aa401 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * package it's not a general device driver.
  * This driver is for the RC Systems DoubleTalk PC internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index f668112..87d2a80 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -17,10 +17,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 6ea0273..5e2170b 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -13,10 +13,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * package it's not a general device driver.
  * This driver is for the Keynote Gold internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index cc4806b..b474e8b 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index b2eb5b1..6b1d0f5 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -14,9 +14,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
  * this code is specificly written as a driver for the speakup screenreview
  * package and is not a general device driver.
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 1007a61..e449f27 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 6c21e71..fd98d4f 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * specificly written as a driver for the speakup screenreview
  * s not a general device driver.
  */
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 9bb281d..98c4b6f 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -16,10 +16,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #ifndef _SPEAKUP_PRIVATE_H
 #define _SPEAKUP_PRIVATE_H
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index 3116ef7..130e9cb 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -16,10 +16,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef _SPEAKUP_KEYINFO_H
diff --git a/drivers/staging/speakup/spkguide.txt b/drivers/staging/speakup/spkguide.txt
index b699de3..c23549c 100644
--- a/drivers/staging/speakup/spkguide.txt
+++ b/drivers/staging/speakup/spkguide.txt
@@ -1179,7 +1179,6 @@
 
 
  Copyright (C) 2000,2001,2002  Free Software Foundation, Inc.
-     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index ab4fe8d..e1393d2 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -176,7 +176,6 @@
 int spk_set_num_var(int input, struct st_var_header *var, int how)
 {
 	int val;
-	short ret = 0;
 	int *p_val = var->p_val;
 	int l;
 	char buf[32];
@@ -186,50 +185,51 @@
 	if (!var_data)
 		return -ENODATA;
 
-	if (how == E_NEW_DEFAULT) {
+	val = var_data->u.n.value;
+	switch (how) {
+	case E_NEW_DEFAULT:
 		if (input < var_data->u.n.low || input > var_data->u.n.high)
 			return -ERANGE;
 		var_data->u.n.default_val = input;
 		return 0;
-	}
-	if (how == E_DEFAULT) {
+	case E_DEFAULT:
 		val = var_data->u.n.default_val;
-		ret = -ERESTART;
-	} else {
-		if (how == E_SET)
-			val = input;
-		else
-			val = var_data->u.n.value;
-		if (how == E_INC)
-			val += input;
-		else if (how == E_DEC)
-			val -= input;
-		if (val < var_data->u.n.low || val > var_data->u.n.high)
-			return -ERANGE;
+		break;
+	case E_SET:
+		val = input;
+		break;
+	case E_INC:
+		val += input;
+		break;
+	case E_DEC:
+		val -= input;
+		break;
 	}
+
+	if (val < var_data->u.n.low || val > var_data->u.n.high)
+		return -ERANGE;
+
 	var_data->u.n.value = val;
 	if (var->var_type == VAR_TIME && p_val != NULL) {
 		*p_val = msecs_to_jiffies(val);
-		return ret;
+		return 0;
 	}
 	if (p_val != NULL)
 		*p_val = val;
 	if (var->var_id == PUNC_LEVEL) {
 		spk_punc_mask = spk_punc_masks[val];
-		return ret;
+		return 0;
 	}
 	if (var_data->u.n.multiplier != 0)
 		val *= var_data->u.n.multiplier;
 	val += var_data->u.n.offset;
 	if (var->var_id < FIRST_SYNTH_VAR || !synth)
-		return ret;
-	if (synth->synth_adjust) {
-		int status = synth->synth_adjust(var);
+		return 0;
+	if (synth->synth_adjust)
+		return synth->synth_adjust(var);
 
-		return (status != 0) ? status : ret;
-	}
 	if (!var_data->u.n.synth_fmt)
-		return ret;
+		return 0;
 	if (var->var_id == PITCH)
 		cp = spk_pitch_buff;
 	else
@@ -240,7 +240,7 @@
 		l = sprintf(cp,
 			var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
 	synth_printf("%s", cp);
-	return ret;
+	return 0;
 }
 
 int spk_set_string_var(const char *page, struct st_var_header *var, int len)
diff --git a/drivers/staging/staging.c b/drivers/staging/staging.c
deleted file mode 100644
index 233e589..0000000
--- a/drivers/staging/staging.c
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-static int __init staging_init(void)
-{
-	return 0;
-}
-
-static void __exit staging_exit(void)
-{
-}
-
-module_init(staging_init);
-module_exit(staging_exit);
-
-MODULE_AUTHOR("Greg Kroah-Hartman");
-MODULE_DESCRIPTION("Staging Core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 824d460..54c17b1 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -870,7 +870,7 @@
  * Descriptor structure.
  * Describes the number of i2c devices on the bus that speak RMI.
  */
-static struct synaptics_rmi4_platform_data synaptics_rmi4_platformdata = {
+static const struct synaptics_rmi4_platform_data synaptics_rmi4_platformdata = {
 	.irq_type       = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
 	.x_flip		= false,
 	.y_flip		= true,
@@ -1039,7 +1039,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
 /**
  * synaptics_rmi4_suspend() - suspend the touch screen controller
  * @dev: pointer to device structure
@@ -1047,7 +1046,7 @@
  * This function is used to suspend the
  * touch panel controller and returns integer
  */
-static int synaptics_rmi4_suspend(struct device *dev)
+static int __maybe_unused synaptics_rmi4_suspend(struct device *dev)
 {
 	/* Touch sleep mode */
 	int retval;
@@ -1081,7 +1080,7 @@
  * This function is used to resume the touch panel
  * controller and returns integer.
  */
-static int synaptics_rmi4_resume(struct device *dev)
+static int __maybe_unused synaptics_rmi4_resume(struct device *dev)
 {
 	int retval;
 	unsigned char intr_status;
@@ -1112,8 +1111,6 @@
 	return 0;
 }
 
-#endif
-
 static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend,
 			 synaptics_rmi4_resume);
 
diff --git a/drivers/staging/unisys/include/guestlinuxdebug.h b/drivers/staging/unisys/include/guestlinuxdebug.h
index 82ee565..b81287f 100644
--- a/drivers/staging/unisys/include/guestlinuxdebug.h
+++ b/drivers/staging/unisys/include/guestlinuxdebug.h
@@ -17,9 +17,10 @@
 #define __GUESTLINUXDEBUG_H__
 
 /*
-* This file contains supporting interface for "vmcallinterface.h", particularly
-* regarding adding additional structure and functionality to linux
-* ISSUE_IO_VMCALL_POSTCODE_SEVERITY */
+ * This file contains supporting interface for "vmcallinterface.h", particularly
+ * regarding adding additional structure and functionality to linux
+ * ISSUE_IO_VMCALL_POSTCODE_SEVERITY
+ */
 
 /******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
 enum driver_pc {		/* POSTCODE driver identifier tuples */
@@ -133,9 +134,9 @@
 
 #define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
 #define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
-#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT	/* TODO-> Info currently
-							 * doesn't show, so we
-							 * set info=warning */
+/* TODO-> Info currently doesn't show, so we set info=warning */
+#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
+
 /* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
  * Please also note that the resulting postcode is in hex, so if you are
  * searching for the __LINE__ number, convert it first to decimal.  The line
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 162ca18..880d9f0 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -575,7 +575,7 @@
  * room)
  */
 static inline  u16
-add_physinfo_entries(u32 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
+add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
 		     u16 max_pi_arr_entries, struct phys_info pi_arr[])
 {
 	u32 len;
@@ -589,21 +589,19 @@
 		pi_arr[index].pi_pfn = inp_pfn;
 		pi_arr[index].pi_off = (u16)inp_off;
 		pi_arr[index].pi_len = (u16)inp_len;
-		    return index + 1;
+		return index + 1;
 	}
 
-	    /* this entry spans multiple pages */
-	    for (len = inp_len, i = 0; len;
-		 len -= pi_arr[index + i].pi_len, i++) {
+	/* this entry spans multiple pages */
+	for (len = inp_len, i = 0; len;
+		len -= pi_arr[index + i].pi_len, i++) {
 		if (index + i >= max_pi_arr_entries)
 			return 0;
 		pi_arr[index + i].pi_pfn = inp_pfn + i;
 		if (i == 0) {
 			pi_arr[index].pi_off = inp_off;
 			pi_arr[index].pi_len = firstlen;
-		}
-
-		else {
+		} else {
 			pi_arr[index + i].pi_off = 0;
 			pi_arr[index + i].pi_len =
 			    (u16)MINNUM(len, (u32)PI_PAGE_SIZE);
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index ec25366..03e36fb 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -55,22 +55,25 @@
 #define CONTROLVM_CRASHMSG_MAX		2
 
 struct spar_segment_state  {
-	u16 enabled:1;		/* Bit 0: May enter other states */
-	u16 active:1;		/* Bit 1: Assigned to active partition */
-	u16 alive:1;		/* Bit 2: Configure message sent to
-				 * service/server */
-	u16 revoked:1;		/* Bit 3: similar to partition state
-				 * ShuttingDown */
-	u16 allocated:1;	/* Bit 4: memory (device/port number)
-				 * has been selected by Command */
-	u16 known:1;		/* Bit 5: has been introduced to the
-				 * service/guest partition */
-	u16 ready:1;		/* Bit 6: service/Guest partition has
-				 * responded to introduction */
-	u16 operating:1;	/* Bit 7: resource is configured and
-				 * operating */
-	/* Note: don't use high bit unless we need to switch to ushort
-	 * which is non-compliant */
+	/* Bit 0: May enter other states */
+	u16 enabled:1;
+	/* Bit 1: Assigned to active partition */
+	u16 active:1;
+	/* Bit 2: Configure message sent to service/server */
+	u16 alive:1;
+	/* Bit 3: similar to partition state ShuttingDown */
+	u16 revoked:1;
+	/* Bit 4: memory (device/port number) has been selected by Command */
+	u16 allocated:1;
+	/* Bit 5: has been introduced to the service/guest partition */
+	u16 known:1;
+	/* Bit 6: service/Guest partition has responded to introduction */
+	u16 ready:1;
+	/* Bit 7: resource is configured and operating */
+	u16 operating:1;
+/* Note: don't use high bit unless we need to switch to ushort
+ * which is non-compliant
+ */
 };
 
 static const struct spar_segment_state segment_state_running = {
@@ -177,53 +180,53 @@
 	/* For requests, indicates the message type. */
 	/* For responses, indicates the type of message we are responding to. */
 
-	u32 message_size;	/* Includes size of this struct + size
-				 * of message */
-	u32 segment_index;	/* Index of segment containing Vm
-				 * message/information */
-	u32 completion_status;	/* Error status code or result of
-				 * message completion */
+	/* Includes size of this struct + size of message */
+	u32 message_size;
+	/* Index of segment containing Vm message/information */
+	u32 segment_index;
+	/* Error status code or result of  message completion */
+	u32 completion_status;
 	struct  {
-		u32 failed:1;		   /* =1 in a response to * signify
-					    * failure */
-		u32 response_expected:1;   /* =1 in all messages that expect a
-					    * response (Control ignores this
-					    * bit) */
-		u32 server:1;		   /* =1 in all bus & device-related
-					    * messages where the message
-					    * receiver is to act as the bus or
-					    * device server */
-		u32 test_message:1;	   /* =1 for testing use only
-					    * (Control and Command ignore this
-					    * bit) */
-		u32 partial_completion:1;  /* =1 if there are forthcoming
-					    * responses/acks associated
-					    * with this message */
-		u32 preserve:1;		   /* =1 this is to let us know to
-					    * preserve channel contents
-					    * (for running guests)*/
-		u32 writer_in_diag:1;	   /* =1 the DiagWriter is active in the
-					    * Diagnostic Partition*/
+		/* =1 in a response to signify failure */
+		u32 failed:1;
+		/* =1 in all messages that expect a response */
+		u32 response_expected:1;
+		/* =1 in all bus & device-related messages where the message
+		 * receiver is to act as the bus or device server
+		 */
+		u32 server:1;
+		/* =1 for testing use only (Control and Command ignore this */
+		u32 test_message:1;
+		/* =1 if there are forthcoming responses/acks associated
+		 * with this message
+		 */
+		u32 partial_completion:1;
+		/* =1 this is to let us know to preserve channel contents */
+		u32 preserve:1;
+		/* =1 the DiagWriter is active in the Diagnostic Partition */
+		u32 writer_in_diag:1;
 	} flags;
-	u32 reserved;		/* Natural alignment */
-	u64 message_handle;	/* Identifies the particular message instance,
-				 * and is used to match particular */
+	/* Natural alignment */
+	u32 reserved;
+	/* Identifies the particular message instance */
+	u64 message_handle;
 	/* request instances with the corresponding response instance. */
-	u64 payload_vm_offset;	/* Offset of payload area from start of this
-				 * instance of ControlVm segment */
-	u32 payload_max_bytes;	/* Maximum bytes allocated in payload
-				 * area of ControlVm segment */
-	u32 payload_bytes;	/* Actual number of bytes of payload
-				 * area to copy between IO/Command; */
+	/* Offset of payload area from start of this instance */
+	u64 payload_vm_offset;
+	/* Maximum bytes allocated in payload area of ControlVm segment */
+	u32 payload_max_bytes;
+	/* Actual number of bytes of payload area to copy between IO/Command */
+	u32 payload_bytes;
 	/* if non-zero, there is a payload to copy. */
 };
 
 struct controlvm_packet_device_create  {
 	u32 bus_no;		/* bus # (0..n-1) from the msg receiver's end */
 	u32 dev_no;		/* bus-relative (0..n-1) device number */
-	u64 channel_addr;	/* Guest physical address of the channel, which
-				 * can be dereferenced by the receiver of this
-				 * ControlVm command */
+	/* Guest physical address of the channel, which can be dereferenced by
+	 * the receiver of this ControlVm command
+	 */
+	u64 channel_addr;
 	u64 channel_bytes;	/* specifies size of the channel in bytes */
 	uuid_le data_type_uuid;	/* specifies format of data in channel */
 	uuid_le dev_inst_uuid;	/* instance guid for the device */
@@ -231,8 +234,8 @@
 };	/* for CONTROLVM_DEVICE_CREATE */
 
 struct controlvm_packet_device_configure  {
-	u32 bus_no;	      /* bus # (0..n-1) from the msg
-			       * receiver's perspective */
+	/* bus # (0..n-1) from the msg receiver's perspective */
+	u32 bus_no;
 	/* Control uses header SegmentIndex field to access bus number... */
 	u32 dev_no;	      /* bus-relative (0..n-1) device number */
 } ;	/* for CONTROLVM_DEVICE_CONFIGURE */
@@ -251,50 +254,50 @@
 struct controlvm_message_packet  {
 	union  {
 		struct  {
-			u32 bus_no;	/* bus # (0..n-1) from the msg
-					 * receiver's perspective */
-			u32 dev_count;	/* indicates the max number of
-					 * devices on this bus */
-			u64 channel_addr;	/* Guest physical address of
-						 * the channel, which can be
-						 * dereferenced by the receiver
-						 * of this ControlVm command */
+	/* bus # (0..n-1) from the msg receiver's perspective */
+			u32 bus_no;
+	/* indicates the max number of devices on this bus */
+			u32 dev_count;
+	/* Guest physical address of the channel, which can be
+	 * dereferenced by the receiver of this ControlVm command
+	 */
+			u64 channel_addr;
 			u64 channel_bytes;	/* size of the channel */
-			uuid_le bus_data_type_uuid;	/* indicates format of
-							 * data in bus channel*/
+	/* indicates format of data in bus channel*/
+			uuid_le bus_data_type_uuid;
 			uuid_le bus_inst_uuid;	/* instance uuid for the bus */
 		} create_bus;	/* for CONTROLVM_BUS_CREATE */
 		struct  {
-			u32 bus_no;	/* bus # (0..n-1) from the msg
-					 * receiver's perspective */
+	/* bus # (0..n-1) from the msg receiver's perspective */
+			u32 bus_no;
 			u32 reserved;	/* Natural alignment purposes */
 		} destroy_bus;	/* for CONTROLVM_BUS_DESTROY */
 		struct  {
-			u32 bus_no;	/* bus # (0..n-1) from the receiver's
-					 * perspective */
+	/* bus # (0..n-1) from the receiver's perspective */
+			u32 bus_no;
 			u32 reserved1;	/* for alignment purposes */
-			u64 guest_handle;	/* This is used to convert
-						 * guest physical address to
-						 * physical address */
+	/* This is used to convert guest physical address to physical address */
+			u64 guest_handle;
 			u64 recv_bus_irq_handle;
 				/* specifies interrupt info. It is used by SP
 				 * to register to receive interrupts from the
 				 * CP. This interrupt is used for bus level
 				 * notifications.  The corresponding
-				 * sendBusInterruptHandle is kept in CP. */
+				 * sendBusInterruptHandle is kept in CP.
+				 */
 		} configure_bus;	/* for CONTROLVM_BUS_CONFIGURE */
 		/* for CONTROLVM_DEVICE_CREATE */
 		struct controlvm_packet_device_create create_device;
 		struct  {
-			u32 bus_no;	/* bus # (0..n-1) from the msg
-					 * receiver's perspective */
+		/* bus # (0..n-1) from the msg receiver's perspective */
+			u32 bus_no;
 			u32 dev_no;	/* bus-relative (0..n-1) device # */
 		} destroy_device;	/* for CONTROLVM_DEVICE_DESTROY */
 		/* for CONTROLVM_DEVICE_CONFIGURE */
 		struct controlvm_packet_device_configure configure_device;
 		struct  {
-			u32 bus_no;	/* bus # (0..n-1) from the msg
-					 * receiver's perspective */
+		/* bus # (0..n-1) from the msg receiver's perspective */
+			u32 bus_no;
 			u32 dev_no;	/* bus-relative (0..n-1) device # */
 		} reconfigure_device;	/* for CONTROLVM_DEVICE_RECONFIGURE */
 		struct  {
@@ -307,8 +310,8 @@
 			u32 dev_no;
 			struct spar_segment_state state;
 			struct  {
-				u32 phys_device:1;	/* =1 if message is for
-							 * a physical device */
+				/* =1 if message is for a physical device */
+				u32 phys_device:1;
 			} flags;
 			u8 reserved[2];	/* Natural alignment purposes */
 		} device_change_state;	/* for CONTROLVM_DEVICE_CHANGESTATE */
@@ -320,9 +323,10 @@
 		} device_change_state_event;
 			/* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
 		struct  {
-			u32 bus_count;	/* indicates the max number of busses */
-			u32 switch_count; /* indicates the max number of
-					   * switches if a service partition */
+			/* indicates the max number of busses */
+			u32 bus_count;
+			/* indicates the max number of switches */
+			u32 switch_count;
 			enum ultra_chipset_feature features;
 			u32 platform_number;	/* Platform Number */
 		} init_chipset;	/* for CONTROLVM_CHIPSET_INIT */
@@ -330,11 +334,12 @@
 			u32 options;	/* reserved */
 			u32 test;	/* bit 0 set to run embedded selftest */
 		} chipset_selftest;	/* for CONTROLVM_CHIPSET_SELFTEST */
-		u64 addr;	/* a physical address of something, that can be
-				 * dereferenced by the receiver of this
-				 * ControlVm command (depends on command id) */
-		u64 handle;	/* a handle of something (depends on command
-				 * id) */
+		/* a physical address of something, that can be dereferenced
+		 * by the receiver of this ControlVm command
+		 */
+		u64 addr;
+		/* a handle of something (depends on command id) */
+		u64 handle;
 	};
 };
 
@@ -357,8 +362,8 @@
 	u64 gp_nvram;	/* guest phys addr of NVRAM channel */
 	u64 request_payload_offset;	/* Offset to request payload area */
 	u64 event_payload_offset;	/* Offset to event payload area */
-	u32 request_payload_bytes;	/* Bytes available in request payload
-					 * area */
+	/* Bytes available in request payload area */
+	u32 request_payload_bytes;
 	u32 event_payload_bytes;/* Bytes available in event payload area */
 	u32 control_channel_bytes;
 	u32 nvram_channel_bytes;	/* Bytes in PartitionNvram segment */
@@ -384,41 +389,37 @@
 	u64 virtual_guest_image_size;
 	u64 prototype_control_channel_offset;
 	u64 virtual_guest_partition_handle;
-
-	u16 restore_action;	/* Restore Action field to restore the guest
-				 * partition */
-	u16 dump_action;	/* For Windows guests it shows if the visordisk
-				 * is running in dump mode */
+	/* Restore Action field to restore the guest partition */
+	u16 restore_action;
+	/* For Windows guests it shows if the visordisk is in dump mode */
+	u16 dump_action;
 	u16 nvram_fail_count;
 	u16 saved_crash_message_count;	/* = CONTROLVM_CRASHMSG_MAX */
-	u32 saved_crash_message_offset;	/* Offset to request payload area needed
-					 * for crash dump */
-	u32 installation_error;	/* Type of error encountered during
-				 * installation */
+	/* Offset to request payload area needed for crash dump */
+	u32 saved_crash_message_offset;
+	/* Type of error encountered during installation */
+	u32 installation_error;
 	u32 installation_text_id;	/* Id of string to display */
-	u16 installation_remaining_steps;/* Number of remaining installation
-					  * steps (for progress bars) */
-	u8 tool_action;		/* ULTRA_TOOL_ACTIONS Installation Action
-				 * field */
+	/* Number of remaining installation  steps (for progress bars) */
+	u16 installation_remaining_steps;
+	/* ULTRA_TOOL_ACTIONS Installation Action field */
+	u8 tool_action;
 	u8 reserved;		/* alignment */
 	struct efi_spar_indication efi_spar_ind;
 	struct efi_spar_indication efi_spar_ind_supported;
 	u32 sp_reserved;
-	u8 reserved2[28];	/* Force signals to begin on 128-byte cache
-				 * line */
-	struct signal_queue_header request_queue;/* Service or guest partition
-						  * uses this queue to send
-						  * requests to Control */
-	struct signal_queue_header response_queue;/* Control uses this queue to
-						   * respond to service or guest
-						   * partition requests */
-	struct signal_queue_header event_queue;	/* Control uses this queue to
-						 * send events to service or
-						 * guest partition */
-	struct signal_queue_header event_ack_queue;/* Service or guest partition
-						    * uses this queue to ack
-						    * Control events */
-
+	/* Force signals to begin on 128-byte cache line */
+	u8 reserved2[28];
+	/* guest partition uses this queue to send requests to Control */
+	struct signal_queue_header request_queue;
+	/* Control uses this queue to respond to service or guest
+	 * partition requests
+	 */
+	struct signal_queue_header response_queue;
+	/* Control uses this queue to send events to guest partition */
+	struct signal_queue_header event_queue;
+	/* Service or guest partition  uses this queue to ack Control events */
+	struct signal_queue_header event_ack_queue;
 	 /* Request fixed-size message pool - does not include payload */
 	 struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
 
diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
index f59fd8a..abdab4ad 100644
--- a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
+++ b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
@@ -62,7 +62,7 @@
 					p++;
 					remain--;
 					chars++;
-				} else if (p == NULL) {
+				} else if (!p) {
 					chars++;
 				}
 				nonprintable_streak = 0;
@@ -72,7 +72,7 @@
 				p++;
 				remain--;
 				chars++;
-			} else if (p == NULL) {
+			} else if (!p) {
 				chars++;
 			}
 		} else {
@@ -124,7 +124,8 @@
 	}
 	if (remain < digits) {
 		/* not enough room left at <p> to hold number, so fill with
-		 * '?' */
+		 * '?'
+		 */
 		for (i = 0; i < remain; i++, p++)
 			*p = '?';
 		return remain;
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index eac97d2..533bb5b 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -221,7 +221,6 @@
 {
 	struct visor_device *dev = dev_get_drvdata(xdev);
 
-	dev_set_drvdata(xdev, NULL);
 	kfree(dev);
 }
 
@@ -701,12 +700,10 @@
 static int
 register_driver_attributes(struct visor_driver *drv)
 {
-	int rc;
 	struct driver_attribute version =
 	    __ATTR(version, S_IRUGO, DRIVER_ATTR_version, NULL);
 	drv->version_attr = version;
-	rc = driver_create_file(&drv->driver, &drv->version_attr);
-	return rc;
+	return driver_create_file(&drv->driver, &drv->version_attr);
 }
 
 static void
@@ -771,7 +768,7 @@
 	get_device(&dev->device);
 	if (!drv->probe) {
 		up(&dev->visordriver_callback_lock);
-		rc = -1;
+		rc = -ENODEV;
 		goto away;
 	}
 	rc = drv->probe(dev);
@@ -973,7 +970,7 @@
 static int
 create_visor_device(struct visor_device *dev)
 {
-	int rc = -1;
+	int rc;
 	u32 chipset_bus_no = dev->chipset_bus_no;
 	u32 chipset_dev_no = dev->chipset_dev_no;
 
@@ -995,6 +992,7 @@
 	if (!dev->periodic_work) {
 		POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
 				 DIAG_SEVERITY_ERR);
+		rc = -EINVAL;
 		goto away;
 	}
 
@@ -1032,14 +1030,15 @@
 	if (rc < 0) {
 		POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
 				 DIAG_SEVERITY_ERR);
-		goto away_register;
+		goto away_unregister;
 	}
 
 	list_add_tail(&dev->list_all, &list_all_device_instances);
 	return 0;
 
-away_register:
+away_unregister:
 	device_unregister(&dev->device);
+
 away:
 	put_device(&dev->device);
 	return rc;
@@ -1058,23 +1057,21 @@
 get_vbus_header_info(struct visorchannel *chan,
 		     struct spar_vbus_headerinfo *hdr_info)
 {
-	int rc = -1;
-
 	if (!SPAR_VBUS_CHANNEL_OK_CLIENT(visorchannel_get_header(chan)))
-		goto away;
+		return -EINVAL;
+
 	if (visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
 			      sizeof(*hdr_info)) < 0) {
-		goto away;
+		return -EIO;
 	}
 	if (hdr_info->struct_bytes < sizeof(struct spar_vbus_headerinfo))
-		goto away;
+		return -EINVAL;
+
 	if (hdr_info->device_info_struct_bytes <
 	    sizeof(struct ultra_vbus_deviceinfo)) {
-		goto away;
+		return -EINVAL;
 	}
-	rc = 0;
-away:
-	return rc;
+	return 0;
 }
 
 /* Write the contents of <info> to the struct
@@ -1197,17 +1194,14 @@
 static int
 create_bus_instance(struct visor_device *dev)
 {
-	int rc;
 	int id = dev->chipset_bus_no;
 	struct spar_vbus_headerinfo *hdr_info;
 
 	POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
 
 	hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
-	if (!hdr_info) {
-		rc = -1;
-		goto away;
-	}
+	if (!hdr_info)
+		return -ENOMEM;
 
 	dev_set_name(&dev->device, "visorbus%d", id);
 	dev->device.bus = &visorbus_type;
@@ -1217,8 +1211,8 @@
 	if (device_register(&dev->device) < 0) {
 		POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id,
 				 POSTCODE_SEVERITY_ERR);
-		rc = -1;
-		goto away_mem;
+		kfree(hdr_info);
+		return -ENODEV;
 	}
 
 	if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
@@ -1234,11 +1228,6 @@
 	list_add_tail(&dev->list_all, &list_all_bus_instances);
 	dev_set_drvdata(&dev->device, dev);
 	return 0;
-
-away_mem:
-	kfree(hdr_info);
-away:
-	return rc;
 }
 
 /** Remove a device instance for the visor bus itself.
@@ -1328,7 +1317,7 @@
 static void
 chipset_device_create(struct visor_device *dev_info)
 {
-	int rc = -1;
+	int rc;
 	u32 bus_no = dev_info->chipset_bus_no;
 	u32 dev_no = dev_info->chipset_dev_no;
 
@@ -1371,9 +1360,9 @@
 		return;
 
 	/* Notify the chipset driver that the pause is complete, which
-	* will presumably want to send some sort of response to the
-	* initiator.
-	*/
+	 * will presumably want to send some sort of response to the
+	 * initiator.
+	 */
 	(*chipset_responders.device_pause) (dev, status);
 }
 
@@ -1405,7 +1394,7 @@
 static void
 initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
 {
-	int rc = -1, x;
+	int rc;
 	struct visor_driver *drv = NULL;
 	void (*notify_func)(struct visor_device *dev, int response) = NULL;
 
@@ -1414,14 +1403,18 @@
 	else
 		notify_func = chipset_responders.device_resume;
 	if (!notify_func)
-		goto away;
+		return;
 
 	drv = to_visor_driver(dev->device.driver);
-	if (!drv)
-		goto away;
+	if (!drv) {
+		(*notify_func)(dev, -ENODEV);
+		return;
+	}
 
-	if (dev->pausing || dev->resuming)
-		goto away;
+	if (dev->pausing || dev->resuming) {
+		(*notify_func)(dev, -EBUSY);
+		return;
+	}
 
 	/* Note that even though both drv->pause() and drv->resume
 	 * specify a callback function, it is NOT necessary for us to
@@ -1431,11 +1424,13 @@
 	 * visorbus while child function drivers are still running.
 	 */
 	if (is_pause) {
-		if (!drv->pause)
-			goto away;
+		if (!drv->pause) {
+			(*notify_func)(dev, -EINVAL);
+			return;
+		}
 
 		dev->pausing = true;
-		x = drv->pause(dev, pause_state_change_complete);
+		rc = drv->pause(dev, pause_state_change_complete);
 	} else {
 		/* This should be done at BUS resume time, but an
 		 * existing problem prevents us from ever getting a bus
@@ -1444,24 +1439,20 @@
 		 * would never even get here in that case.
 		 */
 		fix_vbus_dev_info(dev);
-		if (!drv->resume)
-			goto away;
+		if (!drv->resume) {
+			(*notify_func)(dev, -EINVAL);
+			return;
+		}
 
 		dev->resuming = true;
-		x = drv->resume(dev, resume_state_change_complete);
+		rc = drv->resume(dev, resume_state_change_complete);
 	}
-	if (x < 0) {
+	if (rc < 0) {
 		if (is_pause)
 			dev->pausing = false;
 		else
 			dev->resuming = false;
-		goto away;
-	}
-	rc = 0;
-away:
-	if (rc < 0) {
-		if (notify_func)
-			(*notify_func)(dev, rc);
+		(*notify_func)(dev, -EINVAL);
 	}
 }
 
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 891b8db..a80d72c 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -461,7 +461,7 @@
 	if (!sig_read_header(channel, queue, &sig_hdr))
 		return false;
 
-	sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
+	sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
 	if (sig_hdr.head == sig_hdr.tail) {
 		sig_hdr.num_overflows++;
 		visorchannel_write(channel,
@@ -521,7 +521,7 @@
 	tail = sig_hdr.tail;
 	if (head < tail)
 		head = head + sig_hdr.max_slots;
-	slots_used = (head - tail);
+	slots_used = head - tail;
 	slots_avail = sig_hdr.max_signals - slots_used;
 	return (int)slots_avail;
 }
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 07594f4..b75b063 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -43,11 +43,10 @@
 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
 
-#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
 
 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET	0x00000000
 
-
 #define UNISYS_SPAR_LEAF_ID 0x40000000
 
 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
@@ -86,8 +85,8 @@
 */
 #define MIN_IDLE_SECONDS 10
 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-static unsigned long most_recent_message_jiffies;	/* when we got our last
-						 * controlvm message */
+/* when we got our last controlvm message */
+static unsigned long most_recent_message_jiffies;
 static int visorbusregistered;
 
 #define MAX_CHIPSET_EVENTS 2
@@ -120,7 +119,8 @@
 struct visor_controlvm_payload_info {
 	u8 *ptr;		/* pointer to base address of payload pool */
 	u64 offset;		/* offset from beginning of controlvm
-				 * channel to beginning of payload * pool */
+				 * channel to beginning of payload * pool
+				 */
 	u32 bytes;		/* number of bytes in payload pool */
 };
 
@@ -184,7 +184,8 @@
 	 * - this list is added to when controlvm messages come in that supply
 	 * file data
 	 * - this list is removed from via the hotplug program that is actually
-	 * consuming these buffers to write as file data */
+	 * consuming these buffers to write as file data
+	 */
 	struct list_head input_buffer_list;
 	spinlock_t req_list_lock;	/* lock for input_buffer_list */
 
@@ -352,7 +353,6 @@
 		struct controlvm_message_header *msg_hdr, int response,
 		struct spar_segment_state state);
 
-
 static void parser_done(struct parser_context *ctx);
 
 static struct parser_context *
@@ -377,7 +377,7 @@
 		rc = NULL;
 		goto cleanup;
 	}
-	ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
+	ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
 	if (!ctx) {
 		if (retry)
 			*retry = true;
@@ -397,7 +397,7 @@
 			rc = NULL;
 			goto cleanup;
 		}
-		p = __va((unsigned long) (addr));
+		p = __va((unsigned long)(addr));
 		memcpy(ctx->data, p, bytes);
 	} else {
 		void *mapping;
@@ -437,7 +437,7 @@
 {
 	struct spar_controlvm_parameters_header *phdr = NULL;
 
-	if (ctx == NULL)
+	if (!ctx)
 		return NULL_UUID_LE;
 	phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
 	return phdr->id;
@@ -460,8 +460,9 @@
 {
 	struct spar_controlvm_parameters_header *phdr = NULL;
 
-	if (ctx == NULL)
-		goto Away;
+	if (!ctx)
+		return;
+
 	phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
 	switch (which_string) {
 	case PARSERSTRING_INITIATOR:
@@ -483,9 +484,6 @@
 	default:
 		break;
 	}
-
-Away:
-	return;
 }
 
 static void parser_done(struct parser_context *ctx)
@@ -520,16 +518,15 @@
 		}
 	if (value_length < 0)	/* '\0' was not included in the length */
 		value_length = nscan;
-	value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
-	if (value == NULL)
+	value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
+	if (!value)
 		return NULL;
 	if (value_length > 0)
 		memcpy(value, pscan, value_length);
-	((u8 *) (value))[value_length] = '\0';
+	((u8 *)(value))[value_length] = '\0';
 	return value;
 }
 
-
 static ssize_t toolaction_show(struct device *dev,
 			       struct device_attribute *attr,
 			       char *buf)
@@ -537,8 +534,8 @@
 	u8 tool_action;
 
 	visorchannel_read(controlvm_channel,
-		offsetof(struct spar_controlvm_channel_protocol,
-			 tool_action), &tool_action, sizeof(u8));
+			  offsetof(struct spar_controlvm_channel_protocol,
+				   tool_action), &tool_action, sizeof(u8));
 	return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
 }
 
@@ -706,6 +703,7 @@
 
 	return 0;
 }
+
 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
 					       struct visor_device *from)
 {
@@ -788,13 +786,15 @@
 	POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
 
 	/* Set features to indicate we support parahotplug (if Command
-	 * also supports it). */
+	 * also supports it).
+	 */
 	features =
 	    inmsg->cmd.init_chipset.
 	    features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
 
 	/* Set the "reply" bit so Command knows this is a
-	 * features-aware driver. */
+	 * features-aware driver.
+	 */
 	features |= ULTRA_CHIPSET_FEATURE_REPLY;
 
 cleanup:
@@ -813,7 +813,7 @@
 	msg->hdr.payload_max_bytes = 0;
 	if (response < 0) {
 		msg->hdr.flags.failed = 1;
-		msg->hdr.completion_status = (u32) (-response);
+		msg->hdr.completion_status = (u32)(-response);
 	}
 }
 
@@ -872,7 +872,7 @@
 	      struct controlvm_message_header *pending_msg_hdr,
 	      int response)
 {
-	if (pending_msg_hdr == NULL)
+	if (!pending_msg_hdr)
 		return;		/* no controlvm response needed */
 
 	if (pending_msg_hdr->id != (u32)cmd_id)
@@ -890,7 +890,7 @@
 	u32 bus_no = p->chipset_bus_no;
 	u32 dev_no = p->chipset_dev_no;
 
-	if (p->pending_msg_hdr == NULL)
+	if (!p->pending_msg_hdr)
 		return;		/* no controlvm response needed */
 	if (p->pending_msg_hdr->id != cmd_id)
 		return;
@@ -911,7 +911,7 @@
 		 struct controlvm_message_header *pending_msg_hdr,
 		 int response)
 {
-	if (pending_msg_hdr == NULL)
+	if (!pending_msg_hdr)
 		return;		/* no controlvm response needed */
 
 	if (pending_msg_hdr->id != (u32)cmd_id)
@@ -1177,7 +1177,7 @@
 		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
 				 POSTCODE_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
-	} else if (bus_info->pending_msg_hdr != NULL) {
+	} else if (bus_info->pending_msg_hdr) {
 		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
 				 POSTCODE_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
@@ -2310,18 +2310,13 @@
 	}
 	most_recent_message_jiffies = jiffies;
 	poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-	rc = queue_delayed_work(periodic_controlvm_workqueue,
-				&periodic_controlvm_work, poll_jiffies);
-	if (rc < 0) {
-		POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
-				 DIAG_SEVERITY_ERR);
-		goto cleanup;
-	}
+	queue_delayed_work(periodic_controlvm_workqueue,
+			   &periodic_controlvm_work, poll_jiffies);
 
 	visorchipset_platform_device.dev.devt = major_dev;
 	if (platform_device_register(&visorchipset_platform_device) < 0) {
 		POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
-		rc = -1;
+		rc = -ENODEV;
 		goto cleanup;
 	}
 	POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index d5178b4..e93bb1d 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -167,7 +167,7 @@
 {
 	/* used to stop the thread */
 	init_completion(&thrinfo->has_stopped);
-	thrinfo->task = kthread_run(threadfn, thrcontext, name);
+	thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name);
 	if (IS_ERR(thrinfo->task)) {
 		thrinfo->id = 0;
 		return PTR_ERR(thrinfo->task);
@@ -323,9 +323,9 @@
 		goto err_del_scsipending_ent;
 
 	if (tasktype == TASK_MGMT_ABORT_TASK)
-		scsicmd->result = (DID_ABORT << 16);
+		scsicmd->result = DID_ABORT << 16;
 	else
-		scsicmd->result = (DID_RESET << 16);
+		scsicmd->result = DID_RESET << 16;
 
 	scsicmd->scsi_done(scsicmd);
 
@@ -1062,7 +1062,7 @@
 		return -EINVAL;
 
 	if (devdata->serverdown && !devdata->serverchangingstate)
-		devdata->serverchangingstate = 1;
+		devdata->serverchangingstate = true;
 
 	visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
 			   devdata, "vhba_incming");
diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h
index 3e6a52f..1bc3d20 100644
--- a/drivers/staging/unisys/visorinput/ultrainputreport.h
+++ b/drivers/staging/unisys/visorinput/ultrainputreport.h
@@ -29,33 +29,40 @@
 	inputaction_mouse_button_up = 3, /* arg1: 1=left,2=center,3=right */
 	inputaction_mouse_button_click = 4, /* arg1: 1=left,2=center,3=right */
 	inputaction_mouse_button_dclick = 5, /* arg1: 1=left,2=center,
-						3=right */
+					      * 3=right
+					      */
 	inputaction_wheel_rotate_away = 6, /* arg1: wheel rotation away from
-					      user */
+					    * user
+					    */
 	inputaction_wheel_rotate_toward = 7, /* arg1: wheel rotation toward
-						user */
+					      * user
+					      */
 	inputaction_set_max_xy = 8,	/* set screen maxXY; arg1=x, arg2=y */
 	inputaction_key_down = 64,	/* arg1: scancode, as follows:
-					   If arg1 <= 0xff, it's a 1-byte
-					   scancode and arg1 is that scancode.
-					   If arg1 > 0xff, it's a 2-byte
-					   scanecode, with the 1st byte in the
-					   low 8 bits, and the 2nd byte in the
-					   high 8 bits.  E.g., the right ALT key
-					   would appear as x'38e0'. */
+					 * If arg1 <= 0xff, it's a 1-byte
+					 * scancode and arg1 is that scancode.
+					 * If arg1 > 0xff, it's a 2-byte
+					 * scanecode, with the 1st byte in the
+					 * low 8 bits, and the 2nd byte in the
+					 * high 8 bits.  E.g., the right ALT key
+					 * would appear as x'38e0'.
+					 */
 	inputaction_key_up = 65,	/* arg1: scancode (in same format as
-					   inputaction_keyDown) */
+					 * inputaction_keyDown)
+					 */
 	inputaction_set_locking_key_state = 66,
 					/* arg1: scancode (in same format
-						 as inputaction_keyDown);
-						 MUST refer to one of the
-						 locking keys, like capslock,
-						 numlock, or scrolllock
-					   arg2: 1 iff locking key should be
-						 in the LOCKED position
-						 (e.g., light is ON) */
+					 *	 as inputaction_keyDown);
+					 *	 MUST refer to one of the
+					 *	 locking keys, like capslock,
+					 *	 numlock, or scrolllock
+					 * arg2: 1 iff locking key should be
+					 *	 in the LOCKED position
+					 *	 (e.g., light is ON)
+					 */
 	inputaction_key_down_up = 67,	/* arg1: scancode (in same format
-						 as inputaction_keyDown) */
+					 *	 as inputaction_keyDown)
+					 */
 	inputaction_last
 };
 
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 051947072..df4f688 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -36,6 +36,7 @@
  *         = 163840 bytes
  */
 #define MAX_BUF 163840
+#define NAPI_WEIGHT 64
 
 static int visornic_probe(struct visor_device *dev);
 static void visornic_remove(struct visor_device *dev);
@@ -376,8 +377,8 @@
 			__func__);
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		return -EINVAL;
-	} else
-		spin_unlock_irqrestore(&devdata->priv_lock, flags);
+	}
+	spin_unlock_irqrestore(&devdata->priv_lock, flags);
 	return 0;
 }
 
@@ -1028,7 +1029,7 @@
 			cmdrsp->net.type = NET_RCV_PROMISC;
 			cmdrsp->net.enbdis.context = netdev;
 			cmdrsp->net.enbdis.enable =
-				(netdev->flags & IFF_PROMISC);
+				netdev->flags & IFF_PROMISC;
 			visorchannel_signalinsert(devdata->dev->visorchannel,
 						  IOCHAN_TO_IOPART,
 						  cmdrsp);
@@ -1218,8 +1219,9 @@
 		/* length rcvd is greater than firstfrag in this skb rcv buf  */
 		skb->tail += RCVPOST_BUF_SIZE;	/* amount in skb->data */
 		skb->data_len = skb->len - RCVPOST_BUF_SIZE;	/* amount that
-								   will be in
-								   frag_list */
+								 *  will be in
+								 * frag_list
+								 */
 	} else {
 		/* data fits in this skb - no chaining - do
 		 * PRECAUTIONARY check
@@ -1315,12 +1317,14 @@
 				}
 				if (found_mc)
 					break;	/* accept packet, dest
-						   matches a multicast
-						   address */
+						 * matches a multicast
+						 * address
+						 */
 			}
 		} else if (skb->pkt_type == PACKET_HOST) {
 			break;	/* accept packet, h_dest must match vnic
-				   mac address */
+				 *  mac address
+				 */
 		} else if (skb->pkt_type == PACKET_OTHERHOST) {
 			/* something is not right */
 			dev_err(&devdata->netdev->dev,
@@ -1363,7 +1367,6 @@
 {
 	if (!devdata)
 		return NULL;
-	memset(devdata, '\0', sizeof(struct visornic_devdata));
 	devdata->dev = dev;
 	devdata->incarnation_id = get_jiffies_64();
 	return devdata;
@@ -1613,14 +1616,15 @@
  */
 static void
 service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
-		   int *rx_work_done)
+		   int *rx_work_done, int budget)
 {
 	unsigned long flags;
 	struct net_device *netdev;
 
+	while (*rx_work_done < budget) {
 	/* TODO: CLIENT ACQUIRE -- Don't really need this at the
-	 * moment */
-	for (;;) {
+	 * moment
+	 */
 		if (!visorchannel_signalremove(devdata->dev->visorchannel,
 					       IOCHAN_FROM_IOPART,
 					       cmdrsp))
@@ -1709,7 +1713,7 @@
 	int rx_count = 0;
 
 	send_rcv_posts_if_needed(devdata);
-	service_resp_queue(devdata->cmdrsp, devdata, &rx_count);
+	service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
 
 	/*
 	 * If there aren't any more packets to receive
@@ -1768,7 +1772,7 @@
 	}
 
 	netdev->netdev_ops = &visornic_dev_ops;
-	netdev->watchdog_timeo = (5 * HZ);
+	netdev->watchdog_timeo = 5 * HZ;
 	SET_NETDEV_DEV(netdev, &dev->device);
 
 	/* Get MAC adddress from channel and read it into the device. */
@@ -1893,6 +1897,16 @@
 		goto cleanup_napi_add;
 	}
 
+	/* Let's start our threads to get responses */
+	netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
+
+	/*
+	 * Note: Interupts have to be enable before the while
+	 * loop below because the napi routine is responsible for
+	 * setting enab_dis_acked
+	 */
+	visorbus_enable_channel_interrupts(dev);
+
 	err = register_netdev(netdev);
 	if (err) {
 		dev_err(&dev->device,
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index b6730a8..6c1b973 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -535,11 +535,9 @@
 	}
 
 	/* set MAC TD pointer */
-	MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv->PortOffset,
-			      (priv->td0_pool_dma));
+	MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
 
-	MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv->PortOffset,
-			      (priv->td1_pool_dma));
+	MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
 
 	/* set MAC Beacon TX pointer */
 	MACvSetCurrBCNTxDescAddr(priv->PortOffset,
@@ -590,11 +588,9 @@
 	MACvRx0PerPktMode(priv->PortOffset);
 	MACvRx1PerPktMode(priv->PortOffset);
 	/* set MAC RD pointer */
-	MACvSetCurrRx0DescAddr(priv->PortOffset,
-			       priv->rd0_pool_dma);
+	MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma);
 
-	MACvSetCurrRx1DescAddr(priv->PortOffset,
-			       priv->rd1_pool_dma);
+	MACvSetCurrRx1DescAddr(priv, priv->rd1_pool_dma);
 }
 
 /*
@@ -839,8 +835,6 @@
  */
 void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
-
 	switch (wLoopbackMode) {
 	case CARD_LB_NONE:
 	case CARD_LB_MAC:
@@ -850,7 +844,7 @@
 		break;
 	}
 	/* set MAC loopback */
-	MACvSetLoopbackMode(dwIoBase, LOBYTE(wLoopbackMode));
+	MACvSetLoopbackMode(priv, LOBYTE(wLoopbackMode));
 	/* set Baseband loopback */
 }
 
@@ -869,7 +863,7 @@
 {
 
 	/* reset MAC */
-	if (!MACbSafeSoftwareReset(priv->PortOffset))
+	if (!MACbSafeSoftwareReset(priv))
 		return false;
 
 	return true;
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 7a71782..9ac1ef9 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -174,64 +174,63 @@
  * Return Value: true if succeeded; false if failed.
  *
  */
-bool set_channel(void *pDeviceHandler, struct ieee80211_channel *ch)
+bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
 {
-	struct vnt_private *pDevice = pDeviceHandler;
-	bool bResult = true;
+	bool ret = true;
 
-	if (pDevice->byCurrentCh == ch->hw_value)
-		return bResult;
+	if (priv->byCurrentCh == ch->hw_value)
+		return ret;
 
 	/* Set VGA to max sensitivity */
-	if (pDevice->bUpdateBBVGA &&
-	    pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
-		pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+	if (priv->bUpdateBBVGA &&
+	    priv->byBBVGACurrent != priv->abyBBVGA[0]) {
+		priv->byBBVGACurrent = priv->abyBBVGA[0];
 
-		BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+		BBvSetVGAGainOffset(priv, priv->byBBVGACurrent);
 	}
 
 	/* clear NAV */
-	MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
+	MACvRegBitsOn(priv->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
 
 	/* TX_PE will reserve 3 us for MAX2829 A mode only,
 	   it is for better TX throughput */
 
-	if (pDevice->byRFType == RF_AIROHA7230)
-		RFbAL7230SelectChannelPostProcess(pDevice, pDevice->byCurrentCh,
+	if (priv->byRFType == RF_AIROHA7230)
+		RFbAL7230SelectChannelPostProcess(priv, priv->byCurrentCh,
 						  ch->hw_value);
 
-	pDevice->byCurrentCh = ch->hw_value;
-	bResult &= RFbSelectChannel(pDevice, pDevice->byRFType,
-				    ch->hw_value);
+	priv->byCurrentCh = ch->hw_value;
+	ret &= RFbSelectChannel(priv, priv->byRFType,
+				ch->hw_value);
 
 	/* Init Synthesizer Table */
-	if (pDevice->bEnablePSMode)
-		RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, ch->hw_value);
+	if (priv->bEnablePSMode)
+		RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value);
 
-	BBvSoftwareReset(pDevice);
+	BBvSoftwareReset(priv);
 
-	if (pDevice->byLocalID > REV_ID_VT3253_B1) {
+	if (priv->byLocalID > REV_ID_VT3253_B1) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&pDevice->lock, flags);
+		spin_lock_irqsave(&priv->lock, flags);
 
 		/* set HW default power register */
-		MACvSelectPage1(pDevice->PortOffset);
-		RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
-		VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK,
-			     pDevice->byCurPwr);
-		RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
-		VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM,
-			     pDevice->byCurPwr);
-		MACvSelectPage0(pDevice->PortOffset);
+		MACvSelectPage1(priv->PortOffset);
+		RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
+		VNSvOutPortB(priv->PortOffset + MAC_REG_PWRCCK,
+			     priv->byCurPwr);
+		RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
+		VNSvOutPortB(priv->PortOffset + MAC_REG_PWROFDM,
+			     priv->byCurPwr);
+		MACvSelectPage0(priv->PortOffset);
 
-		spin_unlock_irqrestore(&pDevice->lock, flags);
+		spin_unlock_irqrestore(&priv->lock, flags);
 	}
 
-	if (pDevice->byBBType == BB_TYPE_11B)
-		RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
+	if (priv->byBBType == BB_TYPE_11B)
+		RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
 	else
-		RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
+		RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
 
-	return bResult;
+	return ret;
 }
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index e2be6fc..2d613e7 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -27,6 +27,6 @@
 
 void vnt_init_bands(struct vnt_private *);
 
-bool set_channel(void *pDeviceHandler, struct ieee80211_channel *);
+bool set_channel(struct vnt_private *, struct ieee80211_channel *);
 
 #endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index fefbf82..8a1ed62 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -211,11 +211,11 @@
 	unsigned char byCCKPwrdBm = 0;
 	unsigned char byOFDMPwrdBm = 0;
 
-	MACbShutdown(priv->PortOffset);
+	MACbShutdown(priv);
 	BBvSoftwareReset(priv);
 
 	/* Do MACbSoftwareReset in MACvInitialize */
-	MACbSoftwareReset(priv->PortOffset);
+	MACbSoftwareReset(priv);
 
 	priv->bAES = false;
 
@@ -229,7 +229,7 @@
 	priv->byTopCCKBasicRate = RATE_1M;
 
 	/* init MAC */
-	MACvInitialize(priv->PortOffset);
+	MACvInitialize(priv);
 
 	/* Get Local ID */
 	VNSvInPortB(priv->PortOffset + MAC_REG_LOCALID, &priv->byLocalID);
@@ -357,8 +357,8 @@
 			  MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
 
 	/* set performance parameter by registry */
-	MACvSetShortRetryLimit(priv->PortOffset, priv->byShortRetryLimit);
-	MACvSetLongRetryLimit(priv->PortOffset, priv->byLongRetryLimit);
+	MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
+	MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
 
 	/* reset TSF counter */
 	VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
@@ -742,6 +742,11 @@
 		dma_map_single(&priv->pcid->dev,
 			       skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
 			       priv->rx_buf_sz, DMA_FROM_DEVICE);
+	if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) {
+		dev_kfree_skb(rd_info->skb);
+		rd_info->skb = NULL;
+		return false;
+	}
 
 	*((unsigned int *)&rd->rd0) = 0; /* FIX cast */
 
@@ -884,7 +889,7 @@
 	if (status & ISR_FETALERR) {
 		dev_err(&priv->pcid->dev, "Hardware fatal error\n");
 
-		MACbShutdown(priv->PortOffset);
+		MACbShutdown(priv);
 		return;
 	}
 }
@@ -1012,7 +1017,7 @@
 			if ((priv->op_mode == NL80211_IFTYPE_AP ||
 			    priv->op_mode == NL80211_IFTYPE_ADHOC) &&
 			    priv->vif->bss_conf.enable_beacon) {
-				MACvOneShotTimer1MicroSec(priv->PortOffset,
+				MACvOneShotTimer1MicroSec(priv,
 							  (priv->vif->bss_conf.beacon_int - MAKE_BEACON_RESERVED) << 10);
 			}
 
@@ -1197,8 +1202,8 @@
 
 	cancel_work_sync(&priv->interrupt_work);
 
-	MACbShutdown(priv->PortOffset);
-	MACbSoftwareReset(priv->PortOffset);
+	MACbShutdown(priv);
+	MACbSoftwareReset(priv);
 	CARDbRadioPowerOff(priv);
 
 	device_free_td0_ring(priv);
@@ -1636,13 +1641,13 @@
 	INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
 
 	/* do reset */
-	if (!MACbSoftwareReset(priv->PortOffset)) {
+	if (!MACbSoftwareReset(priv)) {
 		dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
 		device_free_info(priv);
 		return -ENODEV;
 	}
 	/* initial to reload eeprom */
-	MACvInitialize(priv->PortOffset);
+	MACvInitialize(priv);
 	MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
 
 	/* Get RFType */
@@ -1690,7 +1695,7 @@
 
 	pci_save_state(pcid);
 
-	MACbShutdown(priv->PortOffset);
+	MACbShutdown(priv);
 
 	pci_disable_device(pcid);
 	pci_set_power_state(pcid, pci_choose_state(pcid, state));
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index f2b3fea..ffcaf25 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -36,7 +36,7 @@
 	u32 i;
 
 	for (i = 0; i < MAX_KEY_TABLE; i++)
-		MACvDisableKeyEntry(priv->PortOffset, i);
+		MACvDisableKeyEntry(priv, i);
 
 	return 0;
 }
@@ -104,7 +104,7 @@
 			key->key[15] |= 0x80;
 	}
 
-	MACvSetKeyEntry(priv->PortOffset, key_mode, entry, key_inx,
+	MACvSetKeyEntry(priv, key_mode, entry, key_inx,
 			bssid, (u32 *)key->key, priv->byLocalID);
 
 	return 0;
@@ -126,13 +126,13 @@
 	switch (key->cipher) {
 	case 0:
 		for (u = 0 ; u < MAX_KEY_TABLE; u++)
-			MACvDisableKeyEntry(priv->PortOffset, u);
+			MACvDisableKeyEntry(priv, u);
 		return ret;
 
 	case WLAN_CIPHER_SUITE_WEP40:
 	case WLAN_CIPHER_SUITE_WEP104:
 		for (u = 0; u < MAX_KEY_TABLE; u++)
-			MACvDisableKeyEntry(priv->PortOffset, u);
+			MACvDisableKeyEntry(priv, u);
 
 		vnt_set_keymode(hw, mac_addr,
 				key, VNT_KEY_DEFAULTKEY, KEY_CTL_WEP, true);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 688c3be..79e42d0 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -70,9 +70,10 @@
  * Return Value: true if all test bits On; otherwise false
  *
  */
-bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOn(struct vnt_private *priv, unsigned char byRegOfs,
 		     unsigned char byTestBits)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned char byData;
 
 	VNSvInPortB(dwIoBase + byRegOfs, &byData);
@@ -94,9 +95,10 @@
  * Return Value: true if all test bits Off; otherwise false
  *
  */
-bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
 		      unsigned char byTestBits)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned char byData;
 
 	VNSvInPortB(dwIoBase + byRegOfs, &byData);
@@ -116,8 +118,9 @@
  * Return Value: true if interrupt is disable; otherwise false
  *
  */
-bool MACbIsIntDisable(void __iomem *dwIoBase)
+bool MACbIsIntDisable(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned long dwData;
 
 	VNSvInPortD(dwIoBase + MAC_REG_IMR, &dwData);
@@ -141,8 +144,9 @@
  * Return Value: none
  *
  */
-void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
+void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	/* set SRT */
 	VNSvOutPortB(dwIoBase + MAC_REG_SRT, byRetryLimit);
 }
@@ -162,8 +166,9 @@
  * Return Value: none
  *
  */
-void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
+void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	/* set LRT */
 	VNSvOutPortB(dwIoBase + MAC_REG_LRT, byRetryLimit);
 }
@@ -182,8 +187,9 @@
  * Return Value: none
  *
  */
-void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode)
+void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned char byOrgValue;
 
 	byLoopbackMode <<= 6;
@@ -207,8 +213,9 @@
  * Return Value: none
  *
  */
-void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
+void MACvSaveContext(struct vnt_private *priv, unsigned char *pbyCxtBuf)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	int         ii;
 
 	/* read page0 register */
@@ -239,8 +246,9 @@
  * Return Value: none
  *
  */
-void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
+void MACvRestoreContext(struct vnt_private *priv, unsigned char *pbyCxtBuf)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	int         ii;
 
 	MACvSelectPage1(dwIoBase);
@@ -293,8 +301,9 @@
  * Return Value: true if Reset Success; otherwise false
  *
  */
-bool MACbSoftwareReset(void __iomem *dwIoBase)
+bool MACbSoftwareReset(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned char byData;
 	unsigned short ww;
 
@@ -324,7 +333,7 @@
  * Return Value: true if success; otherwise false
  *
  */
-bool MACbSafeSoftwareReset(void __iomem *dwIoBase)
+bool MACbSafeSoftwareReset(struct vnt_private *priv)
 {
 	unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
 	bool bRetVal;
@@ -334,11 +343,11 @@
 	 * reset, then restore register's value
 	 */
 	/* save MAC context */
-	MACvSaveContext(dwIoBase, abyTmpRegData);
+	MACvSaveContext(priv, abyTmpRegData);
 	/* do reset */
-	bRetVal = MACbSoftwareReset(dwIoBase);
+	bRetVal = MACbSoftwareReset(priv);
 	/* restore MAC context, except CR0 */
-	MACvRestoreContext(dwIoBase, abyTmpRegData);
+	MACvRestoreContext(priv, abyTmpRegData);
 
 	return bRetVal;
 }
@@ -356,8 +365,9 @@
  * Return Value: true if success; otherwise false
  *
  */
-bool MACbSafeRxOff(void __iomem *dwIoBase)
+bool MACbSafeRxOff(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short ww;
 	unsigned long dwData;
 	unsigned char byData;
@@ -414,8 +424,9 @@
  * Return Value: true if success; otherwise false
  *
  */
-bool MACbSafeTxOff(void __iomem *dwIoBase)
+bool MACbSafeTxOff(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short ww;
 	unsigned long dwData;
 	unsigned char byData;
@@ -474,18 +485,20 @@
  * Return Value: true if success; otherwise false
  *
  */
-bool MACbSafeStop(void __iomem *dwIoBase)
+bool MACbSafeStop(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
+
 	MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
 
-	if (!MACbSafeRxOff(dwIoBase)) {
+	if (!MACbSafeRxOff(priv)) {
 		pr_debug(" MACbSafeRxOff == false)\n");
-		MACbSafeSoftwareReset(dwIoBase);
+		MACbSafeSoftwareReset(priv);
 		return false;
 	}
-	if (!MACbSafeTxOff(dwIoBase)) {
+	if (!MACbSafeTxOff(priv)) {
 		pr_debug(" MACbSafeTxOff == false)\n");
-		MACbSafeSoftwareReset(dwIoBase);
+		MACbSafeSoftwareReset(priv);
 		return false;
 	}
 
@@ -507,17 +520,18 @@
  * Return Value: true if success; otherwise false
  *
  */
-bool MACbShutdown(void __iomem *dwIoBase)
+bool MACbShutdown(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	/* disable MAC IMR */
 	MACvIntDisable(dwIoBase);
-	MACvSetLoopbackMode(dwIoBase, MAC_LB_INTERNAL);
+	MACvSetLoopbackMode(priv, MAC_LB_INTERNAL);
 	/* stop the adapter */
-	if (!MACbSafeStop(dwIoBase)) {
-		MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
+	if (!MACbSafeStop(priv)) {
+		MACvSetLoopbackMode(priv, MAC_LB_NONE);
 		return false;
 	}
-	MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
+	MACvSetLoopbackMode(priv, MAC_LB_NONE);
 	return true;
 }
 
@@ -534,8 +548,9 @@
  * Return Value: none
  *
  */
-void MACvInitialize(void __iomem *dwIoBase)
+void MACvInitialize(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	/* clear sticky bits */
 	MACvClearStckDS(dwIoBase);
 	/* disable force PME-enable */
@@ -543,7 +558,7 @@
 	/* only 3253 A */
 
 	/* do reset */
-	MACbSoftwareReset(dwIoBase);
+	MACbSoftwareReset(priv);
 
 	/* reset TSF counter */
 	VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
@@ -565,8 +580,9 @@
  * Return Value: none
  *
  */
-void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr)
+void MACvSetCurrRx0DescAddr(struct vnt_private *priv, unsigned long dwCurrDescAddr)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byData;
 	unsigned char byOrgDMACtl;
@@ -600,8 +616,9 @@
  * Return Value: none
  *
  */
-void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr)
+void MACvSetCurrRx1DescAddr(struct vnt_private *priv, unsigned long dwCurrDescAddr)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byData;
 	unsigned char byOrgDMACtl;
@@ -636,9 +653,10 @@
  * Return Value: none
  *
  */
-void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
+void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
 			      unsigned long dwCurrDescAddr)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byData;
 	unsigned char byOrgDMACtl;
@@ -673,9 +691,10 @@
  *
  */
 /* TxDMA1 = AC0DMA */
-void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase,
+void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
 			      unsigned long dwCurrDescAddr)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byData;
 	unsigned char byOrgDMACtl;
@@ -696,13 +715,13 @@
 		VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN);
 }
 
-void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
+void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
 			   unsigned long dwCurrDescAddr)
 {
 	if (iTxType == TYPE_AC0DMA)
-		MACvSetCurrAC0DescAddrEx(dwIoBase, dwCurrDescAddr);
+		MACvSetCurrAC0DescAddrEx(priv, dwCurrDescAddr);
 	else if (iTxType == TYPE_TXDMA0)
-		MACvSetCurrTx0DescAddrEx(dwIoBase, dwCurrDescAddr);
+		MACvSetCurrTx0DescAddrEx(priv, dwCurrDescAddr);
 }
 
 /*
@@ -719,8 +738,9 @@
  * Return Value: none
  *
  */
-void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay)
+void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned char byValue;
 	unsigned int uu, ii;
 
@@ -754,16 +774,20 @@
  * Return Value: none
  *
  */
-void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime)
+void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
+
 	VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0);
 	VNSvOutPortD(dwIoBase + MAC_REG_TMDATA1, uDelayTime);
 	VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, (TMCTL_TMD | TMCTL_TE));
 }
 
-void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset,
+void MACvSetMISCFifo(struct vnt_private *priv, unsigned short wOffset,
 		     unsigned long dwData)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
+
 	if (wOffset > 273)
 		return;
 	VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
@@ -771,12 +795,13 @@
 	VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
 }
 
-bool MACbPSWakeup(void __iomem *dwIoBase)
+bool MACbPSWakeup(struct vnt_private *priv)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned char byOrgValue;
 	unsigned int ww;
 	/* Read PSCTL */
-	if (MACbIsRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PS))
+	if (MACbIsRegBitsOff(priv, MAC_REG_PSCTL, PSCTL_PS))
 		return true;
 
 	/* Disable PS */
@@ -810,11 +835,12 @@
  *
  */
 
-void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
+void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
 		     unsigned int uEntryIdx, unsigned int uKeyIdx,
 		     unsigned char *pbyAddr, u32 *pdwKey,
 		     unsigned char byLocalID)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short wOffset;
 	u32 dwData;
 	int     ii;
@@ -878,8 +904,9 @@
  * Return Value: none
  *
  */
-void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx)
+void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx)
 {
+	void __iomem *dwIoBase = priv->PortOffset;
 	unsigned short wOffset;
 
 	wOffset = MISCFIFO_KEYETRY0;
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 8e0200a..192f466 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -890,57 +890,57 @@
 #define MACvSetRFLE_LatchBase(dwIoBase)                                 \
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
 
-bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
 		     unsigned char byTestBits);
-bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOff(struct vnt_private *, unsigned char byRegOfs,
 		      unsigned char byTestBits);
 
-bool MACbIsIntDisable(void __iomem *dwIoBase);
+bool MACbIsIntDisable(struct vnt_private *);
 
-void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
+void MACvSetShortRetryLimit(struct vnt_private *, unsigned char byRetryLimit);
 
-void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
-void MACvGetLongRetryLimit(void __iomem *dwIoBase,
+void MACvSetLongRetryLimit(struct vnt_private *, unsigned char byRetryLimit);
+void MACvGetLongRetryLimit(struct vnt_private *,
 			   unsigned char *pbyRetryLimit);
 
-void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode);
+void MACvSetLoopbackMode(struct vnt_private *, unsigned char byLoopbackMode);
 
-void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
-void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
+void MACvSaveContext(struct vnt_private *, unsigned char *pbyCxtBuf);
+void MACvRestoreContext(struct vnt_private *, unsigned char *pbyCxtBuf);
 
-bool MACbSoftwareReset(void __iomem *dwIoBase);
-bool MACbSafeSoftwareReset(void __iomem *dwIoBase);
-bool MACbSafeRxOff(void __iomem *dwIoBase);
-bool MACbSafeTxOff(void __iomem *dwIoBase);
-bool MACbSafeStop(void __iomem *dwIoBase);
-bool MACbShutdown(void __iomem *dwIoBase);
-void MACvInitialize(void __iomem *dwIoBase);
-void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase,
+bool MACbSoftwareReset(struct vnt_private *);
+bool MACbSafeSoftwareReset(struct vnt_private *);
+bool MACbSafeRxOff(struct vnt_private *);
+bool MACbSafeTxOff(struct vnt_private *);
+bool MACbSafeStop(struct vnt_private *);
+bool MACbShutdown(struct vnt_private *);
+void MACvInitialize(struct vnt_private *);
+void MACvSetCurrRx0DescAddr(struct vnt_private *,
 			    unsigned long dwCurrDescAddr);
-void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase,
+void MACvSetCurrRx1DescAddr(struct vnt_private *,
 			    unsigned long dwCurrDescAddr);
-void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
+void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *,
 			   unsigned long dwCurrDescAddr);
-void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
+void MACvSetCurrTx0DescAddrEx(struct vnt_private *,
 			      unsigned long dwCurrDescAddr);
-void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase,
+void MACvSetCurrAC0DescAddrEx(struct vnt_private *,
 			      unsigned long dwCurrDescAddr);
-void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase,
+void MACvSetCurrSyncDescAddrEx(struct vnt_private *,
 			       unsigned long dwCurrDescAddr);
-void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase,
+void MACvSetCurrATIMDescAddrEx(struct vnt_private *,
 			       unsigned long dwCurrDescAddr);
-void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay);
-void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
+void MACvTimer0MicroSDelay(struct vnt_private *, unsigned int uDelay);
+void MACvOneShotTimer1MicroSec(struct vnt_private *, unsigned int uDelayTime);
 
-void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset,
+void MACvSetMISCFifo(struct vnt_private *, unsigned short wOffset,
 		     unsigned long dwData);
 
-bool MACbPSWakeup(void __iomem *dwIoBase);
+bool MACbPSWakeup(struct vnt_private *);
 
-void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
+void MACvSetKeyEntry(struct vnt_private *, unsigned short wKeyCtl,
 		     unsigned int uEntryIdx, unsigned int uKeyIdx,
 		     unsigned char *pbyAddr, u32 *pdwKey,
 		     unsigned char byLocalID);
-void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx);
+void MACvDisableKeyEntry(struct vnt_private *, unsigned int uEntryIdx);
 
 #endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 06e6b9d..bc8ca98 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -64,44 +64,43 @@
 
 void
 PSvEnablePowerSaving(
-	void *hDeviceContext,
+	struct vnt_private *priv,
 	unsigned short wListenInterval
 )
 {
-	struct vnt_private *pDevice = hDeviceContext;
-	u16 wAID = pDevice->current_aid | BIT(14) | BIT(15);
+	u16 wAID = priv->current_aid | BIT(14) | BIT(15);
 
 	/* set period of power up before TBTT */
-	VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
-	if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
+	VNSvOutPortW(priv->PortOffset + MAC_REG_PWBT, C_PWBT);
+	if (priv->op_mode != NL80211_IFTYPE_ADHOC) {
 		/* set AID */
-		VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID);
+		VNSvOutPortW(priv->PortOffset + MAC_REG_AIDATIM, wAID);
 	} else {
 		/* set ATIM Window */
 #if 0 /* TODO atim window */
-		MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
+		MACvWriteATIMW(priv->PortOffset, pMgmt->wCurrATIMWindow);
 #endif
 	}
 	/* Set AutoSleep */
-	MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+	MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
 	/* Set HWUTSF */
-	MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+	MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
 
 	if (wListenInterval >= 2) {
 		/* clear always listen beacon */
-		MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+		MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
 		/* first time set listen next beacon */
-		MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
+		MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
 	} else {
 		/* always listen beacon */
-		MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+		MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
 	}
 
 	/* enable power saving hw function */
-	MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
-	pDevice->bEnablePSMode = true;
+	MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
+	priv->bEnablePSMode = true;
 
-	pDevice->bPWBitOn = true;
+	priv->bPWBitOn = true;
 	pr_debug("PS:Power Saving Mode Enable...\n");
 }
 
@@ -117,23 +116,21 @@
 
 void
 PSvDisablePowerSaving(
-	void *hDeviceContext
+	struct vnt_private *priv
 )
 {
-	struct vnt_private *pDevice = hDeviceContext;
-
 	/* disable power saving hw function */
-	MACbPSWakeup(pDevice->PortOffset);
+	MACbPSWakeup(priv);
 	/* clear AutoSleep */
-	MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+	MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
 	/* clear HWUTSF */
-	MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+	MACvRegBitsOff(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
 	/* set always listen beacon */
-	MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+	MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
 
-	pDevice->bEnablePSMode = false;
+	priv->bEnablePSMode = false;
 
-	pDevice->bPWBitOn = false;
+	priv->bPWBitOn = false;
 }
 
 
@@ -149,27 +146,26 @@
 
 bool
 PSbIsNextTBTTWakeUp(
-	void *hDeviceContext
+	struct vnt_private *priv
 )
 {
-	struct vnt_private *pDevice = hDeviceContext;
-	struct ieee80211_hw *hw = pDevice->hw;
+	struct ieee80211_hw *hw = priv->hw;
 	struct ieee80211_conf *conf = &hw->conf;
-	bool bWakeUp = false;
+	bool wake_up = false;
 
 	if (conf->listen_interval > 1) {
-		if (!pDevice->wake_up_count)
-			pDevice->wake_up_count = conf->listen_interval;
+		if (!priv->wake_up_count)
+			priv->wake_up_count = conf->listen_interval;
 
-		--pDevice->wake_up_count;
+		--priv->wake_up_count;
 
-		if (pDevice->wake_up_count == 1) {
+		if (priv->wake_up_count == 1) {
 			/* Turn on wake up to listen next beacon */
-			MACvRegBitsOn(pDevice->PortOffset,
+			MACvRegBitsOn(priv->PortOffset,
 				      MAC_REG_PSCTL, PSCTL_LNBCN);
-			bWakeUp = true;
+			wake_up = true;
 		}
 	}
 
-	return bWakeUp;
+	return wake_up;
 }
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index 538e685..d82dd8d 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -29,25 +29,27 @@
 #ifndef __POWER_H__
 #define __POWER_H__
 
+#include "device.h"
+
 #define C_PWBT                   1000    /* micro sec. power up before TBTT */
 #define PS_FAST_INTERVAL         1       /* Fast power saving listen interval */
 #define PS_MAX_INTERVAL          4       /* MAX power saving listen interval */
 
 void
 PSvDisablePowerSaving(
-	void *hDeviceContext
+	struct vnt_private *
 );
 
 void
 PSvEnablePowerSaving(
-	void *hDeviceContext,
+	struct vnt_private *,
 	unsigned short wListenInterval
 );
 
 
 bool
 PSbIsNextTBTTWakeUp(
-	void *hDeviceContext
+	struct vnt_private *
 );
 
 #endif /* __POWER_H__ */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 4c22bb3..ae10da2 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -420,9 +420,9 @@
 {
 	void __iomem *dwIoBase = priv->PortOffset;
 	int     ii;
-	bool bResult;
+	bool ret;
 
-	bResult = true;
+	ret = true;
 
 	/* 3-wire control for normal mode */
 	VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
@@ -432,21 +432,21 @@
 	BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
 
 	for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
 
 	/* PLL On */
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	/* Calibration */
-	MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
+	MACvTimer0MicroSDelay(priv, 150);/* 150us */
 	/* TXDCOC:active, RCK:disable */
-	bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
+	ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+	MACvTimer0MicroSDelay(priv, 30);/* 30us */
 	/* TXDCOC:disable, RCK:active */
-	bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
+	ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+	MACvTimer0MicroSDelay(priv, 30);/* 30us */
 	/* TXDCOC:disable, RCK:disable */
-	bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
+	ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
 
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
 							 SOFTPWRCTL_SWPE2    |
@@ -459,7 +459,7 @@
 	/* 3-wire control for power saving mode */
 	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
 
-	return bResult;
+	return ret;
 }
 
 /* Need to Pull PLLON low when writing channel registers through
@@ -467,27 +467,27 @@
 static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
 {
 	void __iomem *dwIoBase = priv->PortOffset;
-	bool bResult;
+	bool ret;
 
-	bResult = true;
+	ret = true;
 
 	/* PLLON Off */
 	MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
-	bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
-	bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
-	bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
+	ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
+	ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
+	ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
 
 	/* PLLOn On */
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	/* Set Channel[7] = 0 to tell H/W channel is changing now. */
 	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
-	MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230);
+	MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230);
 	/* Set Channel[7] = 1 to tell H/W channel change is done. */
 	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
 
-	return bResult;
+	return ret;
 }
 
 /*
@@ -540,9 +540,9 @@
 {
 	void __iomem *dwIoBase = priv->PortOffset;
 	int     ii;
-	bool bResult;
+	bool ret;
 
-	bResult = true;
+	ret = true;
 
 	/* 3-wire control for normal mode */
 	VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
@@ -556,18 +556,18 @@
 	IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
 
 	for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
-		bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
-	MACvTimer0MicroSDelay(dwIoBase, 30); /* delay 30 us */
+		ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
+	MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
 
 	/* PLL On */
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
-	MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
-	bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
-	bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
-	bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+	MACvTimer0MicroSDelay(priv, 150);/* 150us */
+	ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+	MACvTimer0MicroSDelay(priv, 30);/* 30us */
+	ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+	MACvTimer0MicroSDelay(priv, 30);/* 30us */
+	ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
 
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
 							 SOFTPWRCTL_SWPE2    |
@@ -577,26 +577,26 @@
 	/* 3-wire control for power saving mode */
 	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
 
-	return bResult;
+	return ret;
 }
 
 static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
 {
 	void __iomem *dwIoBase = priv->PortOffset;
-	bool bResult;
+	bool ret;
 
-	bResult = true;
+	ret = true;
 
-	bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
-	bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
+	ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
+	ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
 
 	/* Set Channel[7] = 0 to tell H/W channel is changing now. */
 	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
-	MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230);
+	MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
 	/* Set Channel[7] = 1 to tell H/W channel change is done. */
 	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
 
-	return bResult;
+	return ret;
 }
 
 /*
@@ -612,30 +612,28 @@
  * Return Value: true if succeeded; false if failed.
  *
  */
-bool RFbInit(
-	struct vnt_private *priv
-)
+bool RFbInit(struct vnt_private *priv)
 {
-	bool bResult = true;
+	bool ret = true;
 
 	switch (priv->byRFType) {
 	case RF_AIROHA:
 	case RF_AL2230S:
 		priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
-		bResult = RFbAL2230Init(priv);
+		ret = RFbAL2230Init(priv);
 		break;
 	case RF_AIROHA7230:
 		priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
-		bResult = s_bAL7230Init(priv);
+		ret = s_bAL7230Init(priv);
 		break;
 	case RF_NOTHING:
-		bResult = true;
+		ret = true;
 		break;
 	default:
-		bResult = false;
+		ret = false;
 		break;
 	}
-	return bResult;
+	return ret;
 }
 
 /*
@@ -654,26 +652,26 @@
 bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
 		      u16 byChannel)
 {
-	bool bResult = true;
+	bool ret = true;
 
 	switch (byRFType) {
 	case RF_AIROHA:
 	case RF_AL2230S:
-		bResult = RFbAL2230SelectChannel(priv, byChannel);
+		ret = RFbAL2230SelectChannel(priv, byChannel);
 		break;
 		/*{{ RobertYu: 20050104 */
 	case RF_AIROHA7230:
-		bResult = s_bAL7230SelectChannel(priv, byChannel);
+		ret = s_bAL7230SelectChannel(priv, byChannel);
 		break;
 		/*}} RobertYu */
 	case RF_NOTHING:
-		bResult = true;
+		ret = true;
 		break;
 	default:
-		bResult = false;
+		ret = false;
 		break;
 	}
-	return bResult;
+	return ret;
 }
 
 /*
@@ -711,11 +709,11 @@
 			return false;
 
 		for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
-			MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
+			MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
 
-		MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
+		MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
 		ii++;
-		MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
+		MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
 		break;
 
 		/* Need to check, PLLON need to be low for channel setting */
@@ -728,17 +726,17 @@
 
 		if (uChannel <= CB_MAX_CHANNEL_24G) {
 			for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
-				MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
+				MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
 		} else {
 			for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
-				MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
+				MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
 		}
 
-		MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
+		MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
 		ii++;
-		MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
+		MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
 		ii++;
-		MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
+		MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
 		break;
 
 	case RF_NOTHING:
@@ -748,7 +746,7 @@
 		return false;
 	}
 
-	MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount));
+	MACvSetMISCFifo(priv, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount));
 
 	return true;
 }
@@ -772,7 +770,7 @@
 	u16 uCH
 )
 {
-	bool bResult = true;
+	bool ret = true;
 	unsigned char byPwr = 0;
 	unsigned char byDec = 0;
 
@@ -818,11 +816,11 @@
 	if (priv->byCurPwr == byPwr)
 		return true;
 
-	bResult = RFbRawSetPower(priv, byPwr, rate);
-	if (bResult)
+	ret = RFbRawSetPower(priv, byPwr, rate);
+	if (ret)
 		priv->byCurPwr = byPwr;
 
-	return bResult;
+	return ret;
 }
 
 /*
@@ -845,7 +843,7 @@
 	unsigned int rate
 )
 {
-	bool bResult = true;
+	bool ret = true;
 	unsigned long dwMax7230Pwr = 0;
 
 	if (byPwr >=  priv->byMaxPwrLevel)
@@ -853,22 +851,22 @@
 
 	switch (priv->byRFType) {
 	case RF_AIROHA:
-		bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
 		if (rate <= RATE_11M)
-			bResult &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
 		else
-			bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
 
 		break;
 
 	case RF_AL2230S:
-		bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
 		if (rate <= RATE_11M) {
-			bResult &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
-			bResult &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
 		} else {
-			bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
-			bResult &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
 		}
 
 		break;
@@ -879,13 +877,13 @@
 		dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) |
 			(BY_AL7230_REG_LEN << 3)  | IFREGCTL_REGW;
 
-		bResult &= IFRFbWriteEmbedded(priv, dwMax7230Pwr);
+		ret &= IFRFbWriteEmbedded(priv, dwMax7230Pwr);
 		break;
 
 	default:
 		break;
 	}
-	return bResult;
+	return ret;
 }
 
 /*+
@@ -934,32 +932,32 @@
 				       u16 byOldChannel,
 				       u16 byNewChannel)
 {
-	bool bResult;
+	bool ret;
 
-	bResult = true;
+	ret = true;
 
 	/* if change between 11 b/g and 11a need to update the following
 	 * register
 	 * Channel Index 1~14 */
 	if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) {
 		/* Change from 2.4G to 5G [Reg] */
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
 	} else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) {
 		/* Change from 5G to 2.4G [Reg] */
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
-		bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
+		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
 	}
 
-	return bResult;
+	return ret;
 }
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 76b5f41..4832666 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -259,8 +259,8 @@
 };
 
 /* flags for options */
-#define DEVICE_FLAGS_UNPLUG		BIT(0)
-#define DEVICE_FLAGS_DISCONNECTED	BIT(1)
+#define DEVICE_FLAGS_UNPLUG		0
+#define DEVICE_FLAGS_DISCONNECTED	1
 
 struct vnt_private {
 	/* mac80211 */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ee8d1e1..f9afab7 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -74,10 +74,10 @@
 #define LONG_RETRY_DEF     4
 
 /* BasebandType[] baseband type selected
-   0: indicate 802.11a type
-   1: indicate 802.11b type
-   2: indicate 802.11g type
-*/
+ * 0: indicate 802.11a type
+ * 1: indicate 802.11b type
+ * 2: indicate 802.11g type
+ */
 
 #define BBP_TYPE_DEF     2
 
@@ -284,7 +284,8 @@
 			calib_rx_iq = priv->eeprom[EEP_OFS_CALIB_RX_IQ];
 			if (calib_tx_iq || calib_tx_dc || calib_rx_iq) {
 				/* CR255, enable TX/RX IQ and
-				   DC compensation mode */
+				 * DC compensation mode
+				 */
 				vnt_control_out_u8(priv,
 						   MESSAGE_REQUEST_BBREG,
 						   0xff,
@@ -306,7 +307,8 @@
 						   calib_rx_iq);
 			} else {
 				/* CR255, turn off
-				   BB Calibration compensation */
+				 * BB Calibration compensation
+				 */
 				vnt_control_out_u8(priv,
 						   MESSAGE_REQUEST_BBREG,
 						   0xff,
@@ -429,7 +431,7 @@
 	for (ii = 0; ii < priv->num_tx_context; ii++) {
 		tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
 								GFP_KERNEL);
-		if (tx_context == NULL)
+		if (!tx_context)
 			goto free_tx;
 
 		priv->tx_context[ii] = tx_context;
@@ -437,7 +439,7 @@
 		tx_context->pkt_no = ii;
 
 		/* allocate URBs */
-		tx_context->urb = usb_alloc_urb(0, GFP_ATOMIC);
+		tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
 		if (!tx_context->urb) {
 			dev_err(&priv->usb->dev, "alloc tx urb failed\n");
 			goto free_tx;
@@ -459,14 +461,14 @@
 		rcb->priv = priv;
 
 		/* allocate URBs */
-		rcb->urb = usb_alloc_urb(0, GFP_ATOMIC);
-		if (rcb->urb == NULL) {
+		rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!rcb->urb) {
 			dev_err(&priv->usb->dev, "Failed to alloc rx urb\n");
 			goto free_rx_tx;
 		}
 
 		rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
-		if (rcb->skb == NULL)
+		if (!rcb->skb)
 			goto free_rx_tx;
 
 		rcb->in_use = false;
@@ -476,14 +478,14 @@
 			goto free_rx_tx;
 	}
 
-	priv->interrupt_urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (priv->interrupt_urb == NULL) {
+	priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!priv->interrupt_urb) {
 		dev_err(&priv->usb->dev, "Failed to alloc int urb\n");
 		goto free_rx_tx;
 	}
 
 	priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
-	if (priv->int_buf.data_buf == NULL) {
+	if (!priv->int_buf.data_buf) {
 		usb_free_urb(priv->interrupt_urb);
 		goto free_rx_tx;
 	}
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index c025dab..e322b7d 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -103,7 +103,7 @@
 
 	/* disable power saving hw function */
 	vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
-	                0, 0, NULL);
+			0, 0, NULL);
 
 	/* clear AutoSleep */
 	vnt_mac_reg_bits_off(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 351a99f..f546553 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -101,9 +101,9 @@
 static void vnt_start_interrupt_urb_complete(struct urb *urb)
 {
 	struct vnt_private *priv = urb->context;
-	int status;
+	int status = urb->status;
 
-	switch (urb->status) {
+	switch (status) {
 	case 0:
 	case -ETIMEDOUT:
 		break;
@@ -116,9 +116,7 @@
 		break;
 	}
 
-	status = urb->status;
-
-	if (status != STATUS_SUCCESS) {
+	if (status) {
 		priv->int_buf.in_use = false;
 
 		dev_dbg(&priv->usb->dev, "%s status = %d\n", __func__, status);
@@ -207,10 +205,9 @@
 int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb)
 {
 	int status = 0;
-	struct urb *urb;
+	struct urb *urb = rcb->urb;
 
-	urb = rcb->urb;
-	if (rcb->skb == NULL) {
+	if (!rcb->skb) {
 		dev_dbg(&priv->usb->dev, "rcb->skb is null\n");
 		return status;
 	}
@@ -224,7 +221,7 @@
 			  rcb);
 
 	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status != 0) {
+	if (status) {
 		dev_dbg(&priv->usb->dev, "Submit Rx URB failed %d\n", status);
 		return STATUS_FAILURE;
 	}
@@ -269,15 +266,13 @@
 		   struct vnt_usb_send_context *context)
 {
 	int status;
-	struct urb *urb;
+	struct urb *urb = context->urb;
 
 	if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) {
 		context->in_use = false;
 		return STATUS_RESOURCES;
 	}
 
-	urb = context->urb;
-
 	usb_fill_bulk_urb(urb,
 			  priv->usb,
 			  usb_sndbulkpipe(priv->usb, 3),
@@ -287,7 +282,7 @@
 			  context);
 
 	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status != 0) {
+	if (status) {
 		dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status);
 
 		context->in_use = false;
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index 20a5cb9..acc3f3e 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,11 +1,9 @@
 obj-$(CONFIG_WILC1000) += wilc1000.o
 
-ccflags-y += -DSTA_FIRMWARE=\"atmel/wilc1000_fw.bin\" \
-		-DAP_FIRMWARE=\"atmel/wilc1000_ap_fw.bin\" \
-		-DP2P_CONCURRENCY_FIRMWARE=\"atmel/wilc1000_p2p_fw.bin\"
+ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
+		-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
 
 ccflags-y += -I$(src)/ -DWILC_ASIC_A0 -DWILC_DEBUGFS
-#ccflags-y += -DTCP_ACK_FILTER
 
 wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
 			wilc_msgqueue.o \
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 2d4d3f1..25dc108 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -1,22 +1,11 @@
-
-/*!
- *  @file	coreconfigurator.c
- *  @brief
- *  @author
- *  @sa		coreconfigurator.h
- *  @date	1 Mar 2012
- *  @version	1.0
- */
-
 #include "coreconfigurator.h"
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
 #include <linux/errno.h>
 #include <linux/slab.h>
 #define TAG_PARAM_OFFSET	(MAC_HDR_LEN + TIME_STAMP_LEN + \
-							BEACON_INTERVAL_LEN + CAP_INFO_LEN)
+				 BEACON_INTERVAL_LEN + CAP_INFO_LEN)
 
-/* Basic Frame Type Codes (2-bit) */
 enum basic_frame_type {
 	FRAME_TYPE_CONTROL     = 0x04,
 	FRAME_TYPE_DATA        = 0x08,
@@ -25,7 +14,6 @@
 	FRAME_TYPE_FORCE_32BIT = 0xFFFFFFFF
 };
 
-/* Frame Type and Subtype Codes (6-bit) */
 enum sub_frame_type {
 	ASSOC_REQ             = 0x00,
 	ASSOC_RSP             = 0x10,
@@ -65,7 +53,6 @@
 	FRAME_SUBTYPE_FORCE_32BIT  = 0xFFFFFFFF
 };
 
-/* Element ID  of various Information Elements */
 enum info_element_id {
 	ISSID               = 0,   /* Service Set Identifier         */
 	ISUPRATES           = 1,   /* Supported Rates                */
@@ -109,8 +96,6 @@
 	INFOELEM_ID_FORCE_32BIT  = 0xFFFFFFFF
 };
 
-/* This function extracts the beacon period field from the beacon or probe   */
-/* response frame.                                                           */
 static inline u16 get_beacon_period(u8 *data)
 {
 	u16 bcn_per;
@@ -147,54 +132,36 @@
 	return time_stamp;
 }
 
-/* This function extracts the 'frame type and sub type' bits from the MAC    */
-/* header of the input frame.                                                */
-/* Returns the value in the LSB of the returned value.                       */
 static inline enum sub_frame_type get_sub_type(u8 *header)
 {
 	return ((enum sub_frame_type)(header[0] & 0xFC));
 }
 
-/* This function extracts the 'to ds' bit from the MAC header of the input   */
-/* frame.                                                                    */
-/* Returns the value in the LSB of the returned value.                       */
 static inline u8 get_to_ds(u8 *header)
 {
 	return (header[1] & 0x01);
 }
 
-/* This function extracts the 'from ds' bit from the MAC header of the input */
-/* frame.                                                                    */
-/* Returns the value in the LSB of the returned value.                       */
 static inline u8 get_from_ds(u8 *header)
 {
 	return ((header[1] & 0x02) >> 1);
 }
 
-/* This function extracts the MAC Address in 'address1' field of the MAC     */
-/* header and updates the MAC Address in the allocated 'addr' variable.      */
 static inline void get_address1(u8 *pu8msa, u8 *addr)
 {
 	memcpy(addr, pu8msa + 4, 6);
 }
 
-/* This function extracts the MAC Address in 'address2' field of the MAC     */
-/* header and updates the MAC Address in the allocated 'addr' variable.      */
 static inline void get_address2(u8 *pu8msa, u8 *addr)
 {
 	memcpy(addr, pu8msa + 10, 6);
 }
 
-/* This function extracts the MAC Address in 'address3' field of the MAC     */
-/* header and updates the MAC Address in the allocated 'addr' variable.      */
 static inline void get_address3(u8 *pu8msa, u8 *addr)
 {
 	memcpy(addr, pu8msa + 16, 6);
 }
 
-/* This function extracts the BSSID from the incoming WLAN packet based on   */
-/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr'    */
-/* variable.                                                                 */
 static inline void get_BSSID(u8 *data, u8 *bssid)
 {
 	if (get_from_ds(data) == 1)
@@ -205,7 +172,6 @@
 		get_address3(data, bssid);
 }
 
-/* This function extracts the SSID from a beacon/probe response frame        */
 static inline void get_ssid(u8 *data, u8 *ssid, u8 *p_ssid_len)
 {
 	u8 len = 0;
@@ -217,8 +183,6 @@
 	j   = MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN +
 		CAP_INFO_LEN + 2;
 
-	/* If the SSID length field is set wrongly to a value greater than the   */
-	/* allowed maximum SSID length limit, reset the length to 0              */
 	if (len >= MAX_SSID_LEN)
 		len = 0;
 
@@ -230,8 +194,6 @@
 	*p_ssid_len = len;
 }
 
-/* This function extracts the capability info field from the beacon or probe */
-/* response frame.                                                           */
 static inline u16 get_cap_info(u8 *data)
 {
 	u16 cap_info = 0;
@@ -240,8 +202,6 @@
 
 	st = get_sub_type(data);
 
-	/* Location of the Capability field is different for Beacon and */
-	/* Association frames.                                          */
 	if ((st == BEACON) || (st == PROBE_RSP))
 		index += TIME_STAMP_LEN + BEACON_INTERVAL_LEN;
 
@@ -251,8 +211,6 @@
 	return cap_info;
 }
 
-/* This function extracts the capability info field from the Association */
-/* response frame.                                                                       */
 static inline u16 get_assoc_resp_cap_info(u8 *data)
 {
 	u16 cap_info;
@@ -263,8 +221,6 @@
 	return cap_info;
 }
 
-/* This function extracts the association status code from the incoming       */
-/* association response frame and returns association status code            */
 static inline u16 get_asoc_status(u8 *data)
 {
 	u16 asoc_status;
@@ -275,8 +231,6 @@
 	return asoc_status;
 }
 
-/* This function extracts association ID from the incoming association       */
-/* response frame							                                     */
 static inline u16 get_asoc_id(u8 *data)
 {
 	u16 asoc_id;
@@ -287,347 +241,149 @@
 	return asoc_id;
 }
 
-static u8 *get_tim_elm(u8 *pu8msa, u16 u16RxLen, u16 u16TagParamOffset)
+static u8 *get_tim_elm(u8 *pu8msa, u16 rx_len, u16 tag_param_offset)
 {
-	u16 u16index;
+	u16 index;
 
-	/*************************************************************************/
-	/*                       Beacon Frame - Frame Body                       */
-	/* --------------------------------------------------------------------- */
-	/* |Timestamp |BeaconInt |CapInfo |SSID |SupRates |DSParSet |TIM elm   | */
-	/* --------------------------------------------------------------------- */
-	/* |8         |2         |2       |2-34 |3-10     |3        |4-256     | */
-	/* --------------------------------------------------------------------- */
-	/*                                                                       */
-	/*************************************************************************/
+	index = tag_param_offset;
 
-	u16index = u16TagParamOffset;
-
-	/* Search for the TIM Element Field and return if the element is found */
-	while (u16index < (u16RxLen - FCS_LEN)) {
-		if (pu8msa[u16index] == ITIM)
-			return &pu8msa[u16index];
-		u16index += (IE_HDR_LEN + pu8msa[u16index + 1]);
+	while (index < (rx_len - FCS_LEN)) {
+		if (pu8msa[index] == ITIM)
+			return &pu8msa[index];
+		index += (IE_HDR_LEN + pu8msa[index + 1]);
 	}
 
 	return NULL;
 }
 
-/* This function gets the current channel information from
- * the 802.11n beacon/probe response frame */
-static u8 get_current_channel_802_11n(u8 *pu8msa, u16 u16RxLen)
+static u8 get_current_channel_802_11n(u8 *pu8msa, u16 rx_len)
 {
 	u16 index;
 
 	index = TAG_PARAM_OFFSET;
-	while (index < (u16RxLen - FCS_LEN)) {
+	while (index < (rx_len - FCS_LEN)) {
 		if (pu8msa[index] == IDSPARMS)
 			return pu8msa[index + 2];
-		/* Increment index by length information and header */
 		index += pu8msa[index + 1] + IE_HDR_LEN;
 	}
 
-	/* Return current channel information from the MIB, if beacon/probe  */
-	/* response frame does not contain the DS parameter set IE           */
-	/* return (mget_CurrentChannel() + 1); */
-	return 0;  /* no MIB here */
-}
-
-/**
- *  @brief                      parses the received 'N' message
- *  @details
- *  @param[in]  pu8MsgBuffer The message to be parsed
- *  @param[out]         ppstrNetworkInfo pointer to pointer to the structure containing the parsed Network Info
- *  @return             Error code indicating success/failure
- *  @note
- *  @author		mabubakr
- *  @date			1 Mar 2012
- *  @version		1.0
- */
-s32 wilc_parse_network_info(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo)
-{
-	tstrNetworkInfo *pstrNetworkInfo = NULL;
-	u8 u8MsgType = 0;
-	u8 u8MsgID = 0;
-	u16 u16MsgLen = 0;
-
-	u16 u16WidID = (u16)WID_NIL;
-	u16 u16WidLen  = 0;
-	u8  *pu8WidVal = NULL;
-
-	u8MsgType = pu8MsgBuffer[0];
-
-	/* Check whether the received message type is 'N' */
-	if ('N' != u8MsgType) {
-		PRINT_ER("Received Message format incorrect.\n");
-		return -EFAULT;
-	}
-
-	/* Extract message ID */
-	u8MsgID = pu8MsgBuffer[1];
-
-	/* Extract message Length */
-	u16MsgLen = MAKE_WORD16(pu8MsgBuffer[2], pu8MsgBuffer[3]);
-
-	/* Extract WID ID */
-	u16WidID = MAKE_WORD16(pu8MsgBuffer[4], pu8MsgBuffer[5]);
-
-	/* Extract WID Length */
-	u16WidLen = MAKE_WORD16(pu8MsgBuffer[6], pu8MsgBuffer[7]);
-
-	/* Assign a pointer to the WID value */
-	pu8WidVal  = &pu8MsgBuffer[8];
-
-	/* parse the WID value of the WID "WID_NEWORK_INFO" */
-	{
-		u8  *pu8msa = NULL;
-		u16 u16RxLen = 0;
-		u8 *pu8TimElm = NULL;
-		u8 *pu8IEs = NULL;
-		u16 u16IEsLen = 0;
-		u8 u8index = 0;
-		u32 u32Tsf_Lo;
-		u32 u32Tsf_Hi;
-
-		pstrNetworkInfo = kzalloc(sizeof(tstrNetworkInfo), GFP_KERNEL);
-		if (!pstrNetworkInfo)
-			return -ENOMEM;
-
-		pstrNetworkInfo->s8rssi = pu8WidVal[0];
-
-		/* Assign a pointer to msa "Mac Header Start Address" */
-		pu8msa = &pu8WidVal[1];
-
-		u16RxLen = u16WidLen - 1;
-
-		/* parse msa*/
-
-		/* Get the cap_info */
-		pstrNetworkInfo->u16CapInfo = get_cap_info(pu8msa);
-		/* Get time-stamp [Low only 32 bit] */
-		pstrNetworkInfo->u32Tsf = get_beacon_timestamp_lo(pu8msa);
-		PRINT_D(CORECONFIG_DBG, "TSF :%x\n", pstrNetworkInfo->u32Tsf);
-
-		/* Get full time-stamp [Low and High 64 bit] */
-		u32Tsf_Lo = get_beacon_timestamp_lo(pu8msa);
-		u32Tsf_Hi = get_beacon_timestamp_hi(pu8msa);
-
-		pstrNetworkInfo->u64Tsf = u32Tsf_Lo | ((u64)u32Tsf_Hi << 32);
-
-		/* Get SSID */
-		get_ssid(pu8msa, pstrNetworkInfo->au8ssid, &pstrNetworkInfo->u8SsidLen);
-
-		/* Get BSSID */
-		get_BSSID(pu8msa, pstrNetworkInfo->au8bssid);
-
-		/*
-		 * Extract current channel information from
-		 * the beacon/probe response frame
-		 */
-		pstrNetworkInfo->u8channel = get_current_channel_802_11n(pu8msa,
-							u16RxLen + FCS_LEN);
-
-		/* Get beacon period */
-		u8index = MAC_HDR_LEN + TIME_STAMP_LEN;
-
-		pstrNetworkInfo->u16BeaconPeriod = get_beacon_period(pu8msa + u8index);
-
-		u8index += BEACON_INTERVAL_LEN + CAP_INFO_LEN;
-
-		/* Get DTIM Period */
-		pu8TimElm = get_tim_elm(pu8msa, u16RxLen + FCS_LEN, u8index);
-		if (pu8TimElm)
-			pstrNetworkInfo->u8DtimPeriod = pu8TimElm[3];
-		pu8IEs = &pu8msa[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN];
-		u16IEsLen = u16RxLen - (MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN);
-
-		if (u16IEsLen > 0) {
-			pstrNetworkInfo->pu8IEs = kmemdup(pu8IEs, u16IEsLen,
-							  GFP_KERNEL);
-			if (!pstrNetworkInfo->pu8IEs)
-				return -ENOMEM;
-		}
-		pstrNetworkInfo->u16IEsLen = u16IEsLen;
-
-	}
-
-	*ppstrNetworkInfo = pstrNetworkInfo;
-
 	return 0;
 }
 
-/**
- *  @brief              Deallocates the parsed Network Info
- *  @details
- *  @param[in]  pstrNetworkInfo Network Info to be deallocated
- *  @return             Error code indicating success/failure
- *  @note
- *  @author		mabubakr
- *  @date		1 Mar 2012
- *  @version		1.0
- */
-s32 wilc_dealloc_network_info(tstrNetworkInfo *pstrNetworkInfo)
+s32 wilc_parse_network_info(u8 *msg_buffer,
+			    struct network_info **ret_network_info)
 {
-	s32 s32Error = 0;
+	struct network_info *network_info = NULL;
+	u8 msg_type = 0;
+	u8 msg_id = 0;
+	u16 msg_len = 0;
 
-	if (pstrNetworkInfo) {
-		if (pstrNetworkInfo->pu8IEs) {
-			kfree(pstrNetworkInfo->pu8IEs);
-			pstrNetworkInfo->pu8IEs = NULL;
-		} else {
-			s32Error = -EFAULT;
-		}
+	u16 wid_id = (u16)WID_NIL;
+	u16 wid_len  = 0;
+	u8 *wid_val = NULL;
 
-		kfree(pstrNetworkInfo);
-		pstrNetworkInfo = NULL;
+	msg_type = msg_buffer[0];
 
-	} else {
-		s32Error = -EFAULT;
-	}
+	if ('N' != msg_type)
+		return -EFAULT;
 
-	return s32Error;
-}
+	msg_id = msg_buffer[1];
+	msg_len = MAKE_WORD16(msg_buffer[2], msg_buffer[3]);
+	wid_id = MAKE_WORD16(msg_buffer[4], msg_buffer[5]);
+	wid_len = MAKE_WORD16(msg_buffer[6], msg_buffer[7]);
+	wid_val = &msg_buffer[8];
 
-/**
- *  @brief                      parses the received Association Response frame
- *  @details
- *  @param[in]  pu8Buffer The Association Response frame to be parsed
- *  @param[out]         ppstrConnectRespInfo pointer to pointer to the structure containing the parsed Association Response Info
- *  @return             Error code indicating success/failure
- *  @note
- *  @author		mabubakr
- *  @date			2 Apr 2012
- *  @version		1.0
- */
-s32 wilc_parse_assoc_resp_info(u8 *pu8Buffer, u32 u32BufferLen,
-			       tstrConnectRespInfo **ppstrConnectRespInfo)
-{
-	s32 s32Error = 0;
-	tstrConnectRespInfo *pstrConnectRespInfo = NULL;
-	u16 u16AssocRespLen = 0;
-	u8 *pu8IEs = NULL;
-	u16 u16IEsLen = 0;
+	{
+		u8 *msa = NULL;
+		u16 rx_len = 0;
+		u8 *tim_elm = NULL;
+		u8 *ies = NULL;
+		u16 ies_len = 0;
+		u8 index = 0;
+		u32 tsf_lo;
+		u32 tsf_hi;
 
-	pstrConnectRespInfo = kzalloc(sizeof(tstrConnectRespInfo), GFP_KERNEL);
-	if (!pstrConnectRespInfo)
-		return -ENOMEM;
-
-	/* u16AssocRespLen = pu8Buffer[0]; */
-	u16AssocRespLen = (u16)u32BufferLen;
-
-	/* get the status code */
-	pstrConnectRespInfo->u16ConnectStatus = get_asoc_status(pu8Buffer);
-	if (pstrConnectRespInfo->u16ConnectStatus == SUCCESSFUL_STATUSCODE) {
-
-		/* get the capability */
-		pstrConnectRespInfo->u16capability = get_assoc_resp_cap_info(pu8Buffer);
-
-		/* get the Association ID */
-		pstrConnectRespInfo->u16AssocID = get_asoc_id(pu8Buffer);
-
-		/* get the Information Elements */
-		pu8IEs = &pu8Buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
-		u16IEsLen = u16AssocRespLen - (CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN);
-
-		pstrConnectRespInfo->pu8RespIEs = kmemdup(pu8IEs, u16IEsLen, GFP_KERNEL);
-		if (!pstrConnectRespInfo->pu8RespIEs)
+		network_info = kzalloc(sizeof(*network_info), GFP_KERNEL);
+		if (!network_info)
 			return -ENOMEM;
 
-		pstrConnectRespInfo->u16RespIEsLen = u16IEsLen;
+		network_info->rssi = wid_val[0];
+
+		msa = &wid_val[1];
+
+		rx_len = wid_len - 1;
+		network_info->cap_info = get_cap_info(msa);
+		network_info->tsf_lo = get_beacon_timestamp_lo(msa);
+
+		tsf_lo = get_beacon_timestamp_lo(msa);
+		tsf_hi = get_beacon_timestamp_hi(msa);
+
+		network_info->tsf_hi = tsf_lo | ((u64)tsf_hi << 32);
+
+		get_ssid(msa, network_info->ssid, &network_info->ssid_len);
+		get_BSSID(msa, network_info->bssid);
+
+		network_info->ch = get_current_channel_802_11n(msa,
+							rx_len + FCS_LEN);
+
+		index = MAC_HDR_LEN + TIME_STAMP_LEN;
+
+		network_info->beacon_period = get_beacon_period(msa + index);
+
+		index += BEACON_INTERVAL_LEN + CAP_INFO_LEN;
+
+		tim_elm = get_tim_elm(msa, rx_len + FCS_LEN, index);
+		if (tim_elm)
+			network_info->dtim_period = tim_elm[3];
+		ies = &msa[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN +
+			   CAP_INFO_LEN];
+		ies_len = rx_len - (MAC_HDR_LEN + TIME_STAMP_LEN +
+				    BEACON_INTERVAL_LEN + CAP_INFO_LEN);
+
+		if (ies_len > 0) {
+			network_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+			if (!network_info->ies)
+				return -ENOMEM;
+		}
+		network_info->ies_len = ies_len;
 	}
 
-	*ppstrConnectRespInfo = pstrConnectRespInfo;
+	*ret_network_info = network_info;
 
-	return s32Error;
+	return 0;
 }
 
-/**
- *  @brief                      Deallocates the parsed Association Response Info
- *  @details
- *  @param[in]  pstrNetworkInfo Network Info to be deallocated
- *  @return             Error code indicating success/failure
- *  @note
- *  @author		mabubakr
- *  @date			2 Apr 2012
- *  @version		1.0
- */
-s32 wilc_dealloc_assoc_resp_info(tstrConnectRespInfo *pstrConnectRespInfo)
+s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
+			       struct connect_resp_info **ret_connect_resp_info)
 {
-	s32 s32Error = 0;
+	struct connect_resp_info *connect_resp_info = NULL;
+	u16 assoc_resp_len = 0;
+	u8 *ies = NULL;
+	u16 ies_len = 0;
 
-	if (pstrConnectRespInfo) {
-		if (pstrConnectRespInfo->pu8RespIEs) {
-			kfree(pstrConnectRespInfo->pu8RespIEs);
-			pstrConnectRespInfo->pu8RespIEs = NULL;
-		} else {
-			s32Error = -EFAULT;
-		}
+	connect_resp_info = kzalloc(sizeof(*connect_resp_info), GFP_KERNEL);
+	if (!connect_resp_info)
+		return -ENOMEM;
 
-		kfree(pstrConnectRespInfo);
-		pstrConnectRespInfo = NULL;
+	assoc_resp_len = (u16)buffer_len;
 
-	} else {
-		s32Error = -EFAULT;
+	connect_resp_info->status = get_asoc_status(buffer);
+	if (connect_resp_info->status == SUCCESSFUL_STATUSCODE) {
+		connect_resp_info->capability = get_assoc_resp_cap_info(buffer);
+		connect_resp_info->assoc_id = get_asoc_id(buffer);
+
+		ies = &buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
+		ies_len = assoc_resp_len - (CAP_INFO_LEN + STATUS_CODE_LEN +
+					    AID_LEN);
+
+		connect_resp_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+		if (!connect_resp_info->ies)
+			return -ENOMEM;
+
+		connect_resp_info->ies_len = ies_len;
 	}
 
-	return s32Error;
-}
+	*ret_connect_resp_info = connect_resp_info;
 
-/**
- *  @brief              sends certain Configuration Packet based on the input WIDs pstrWIDs
- *  using driver config layer
- *
- *  @details
- *  @param[in]  pstrWIDs WIDs to be sent in the configuration packet
- *  @param[in]  u32WIDsCount number of WIDs to be sent in the configuration packet
- *  @param[out]         pu8RxResp The received Packet Response
- *  @param[out]         ps32RxRespLen Length of the received Packet Response
- *  @return     Error code indicating success/failure
- *  @note
- *  @author	mabubakr
- *  @date		1 Mar 2012
- *  @version	1.0
- */
-s32 wilc_send_config_pkt(struct wilc *wilc, u8 mode, struct wid *wids,
-			 u32 count, u32 drv)
-{
-	s32 counter = 0, ret = 0;
-
-	if (mode == GET_CFG) {
-		for (counter = 0; counter < count; counter++) {
-			PRINT_INFO(CORECONFIG_DBG, "Sending CFG packet [%d][%d]\n", !counter,
-				   (counter == count - 1));
-			if (!wilc_wlan_cfg_get(wilc, !counter,
-					       wids[counter].id,
-					       (counter == count - 1),
-					       drv)) {
-				ret = -ETIMEDOUT;
-				printk("[Sendconfigpkt]Get Timed out\n");
-				break;
-			}
-		}
-		counter = 0;
-		for (counter = 0; counter < count; counter++) {
-			wids[counter].size = wilc_wlan_cfg_get_val(
-					wids[counter].id,
-					wids[counter].val,
-					wids[counter].size);
-		}
-	} else if (mode == SET_CFG) {
-		for (counter = 0; counter < count; counter++) {
-			PRINT_D(CORECONFIG_DBG, "Sending config SET PACKET WID:%x\n", wids[counter].id);
-			if (!wilc_wlan_cfg_set(wilc, !counter,
-					       wids[counter].id,
-					       wids[counter].val,
-					       wids[counter].size,
-					       (counter == count - 1),
-					       drv)) {
-				ret = -ETIMEDOUT;
-				printk("[Sendconfigpkt]Set Timed out\n");
-				break;
-			}
-		}
-	}
-
-	return ret;
+	return 0;
 }
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index fc43d04..63eca24 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -70,71 +70,60 @@
 	CONNECT_STS_FORCE_16_BIT = 0xFFFF
 } tenuConnectSts;
 
-struct wid {
-	u16 id;
-	enum wid_type type;
-	s32 size;
-	s8 *val;
-};
-
 typedef struct {
 	u8 u8Full;
 	u8 u8Index;
 	s8 as8RSSI[NUM_RSSI];
 } tstrRSSI;
 
-typedef struct {
-	s8 s8rssi;
-	u16 u16CapInfo;
-	u8 au8ssid[MAX_SSID_LEN];
-	u8 u8SsidLen;
-	u8 au8bssid[6];
-	u16 u16BeaconPeriod;
-	u8 u8DtimPeriod;
-	u8 u8channel;
-	unsigned long u32TimeRcvdInScanCached;
-	unsigned long u32TimeRcvdInScan;
-	bool bNewNetwork;
-	u8 u8Found;
-	u32 u32Tsf;
-	u8 *pu8IEs;
-	u16 u16IEsLen;
-	void *pJoinParams;
-	tstrRSSI strRssi;
-	u64 u64Tsf;
-} tstrNetworkInfo;
+struct network_info {
+	s8 rssi;
+	u16 cap_info;
+	u8 ssid[MAX_SSID_LEN];
+	u8 ssid_len;
+	u8 bssid[6];
+	u16 beacon_period;
+	u8 dtim_period;
+	u8 ch;
+	unsigned long time_scan_cached;
+	unsigned long time_scan;
+	bool new_network;
+	u8 found;
+	u32 tsf_lo;
+	u8 *ies;
+	u16 ies_len;
+	void *join_params;
+	tstrRSSI str_rssi;
+	u64 tsf_hi;
+};
 
-typedef struct {
-	u16 u16capability;
-	u16 u16ConnectStatus;
-	u16 u16AssocID;
-	u8 *pu8RespIEs;
-	u16 u16RespIEsLen;
-} tstrConnectRespInfo;
+struct connect_resp_info {
+	u16 capability;
+	u16 status;
+	u16 assoc_id;
+	u8 *ies;
+	u16 ies_len;
+};
 
-typedef struct {
-	u8 au8bssid[6];
-	u8 *pu8ReqIEs;
-	size_t ReqIEsLen;
-	u8 *pu8RespIEs;
-	u16 u16RespIEsLen;
-	u16 u16ConnectStatus;
-} tstrConnectInfo;
+struct connect_info {
+	u8 bssid[6];
+	u8 *req_ies;
+	size_t req_ies_len;
+	u8 *resp_ies;
+	u16 resp_ies_len;
+	u16 status;
+};
 
-typedef struct {
-	u16 u16reason;
+struct disconnect_info {
+	u16 reason;
 	u8 *ie;
 	size_t ie_len;
-} tstrDisconnectNotifInfo;
+};
 
-s32 wilc_send_config_pkt(struct wilc *wilc, u8 mode, struct wid *wids,
-			 u32 count, u32 drv);
-s32 wilc_parse_network_info(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo);
-s32 wilc_dealloc_network_info(tstrNetworkInfo *pstrNetworkInfo);
-
-s32 wilc_parse_assoc_resp_info(u8 *pu8Buffer, u32 u32BufferLen,
-		       tstrConnectRespInfo **ppstrConnectRespInfo);
-s32 wilc_dealloc_assoc_resp_info(tstrConnectRespInfo *pstrConnectRespInfo);
+s32 wilc_parse_network_info(u8 *msg_buffer,
+			    struct network_info **ret_network_info);
+s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
+			       struct connect_resp_info **ret_connect_resp_info);
 void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
 				 u32 u32Length);
 void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 8c77520..b2fdc93 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -46,7 +46,8 @@
 #define HOST_IF_MSG_DEL_BA_SESSION              34
 #define HOST_IF_MSG_Q_IDLE                      35
 #define HOST_IF_MSG_DEL_ALL_STA                 36
-#define HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS      34
+#define HOST_IF_MSG_SET_TX_POWER		38
+#define HOST_IF_MSG_GET_TX_POWER		39
 #define HOST_IF_MSG_EXIT                        100
 
 #define HOST_IF_SCAN_TIMEOUT                    4000
@@ -57,9 +58,8 @@
 #define BLOCK_ACK_REQ_SIZE                      0x14
 #define FALSE_FRMWR_CHANNEL			100
 
-struct cfg_param_attr {
-	struct cfg_param_val cfg_attr_info;
-};
+#define TCP_ACK_FILTER_LINK_SPEED_THRESH	54
+#define DEFAULT_LINK_SPEED			72
 
 struct host_if_wpa_attr {
 	u8 *key;
@@ -163,6 +163,10 @@
 	u8 mac[6];
 };
 
+struct tx_power {
+	u8 tx_pwr;
+};
+
 union message_body {
 	struct scan_attr scan_info;
 	struct connect_attr con_info;
@@ -188,6 +192,7 @@
 	struct reg_frame reg_frame;
 	char *data;
 	struct del_all_sta del_all_sta_info;
+	struct tx_power tx_power;
 };
 
 struct host_if_msg {
@@ -201,7 +206,7 @@
 	u8 dtim_period;
 	u16 beacon_period;
 	u16 cap_info;
-	u8 au8bssid[6];
+	u8 bssid[6];
 	char ssid[MAX_SSID_LEN];
 	u8 ssid_len;
 	u8 supp_rates[MAX_RATES_SUPPORTED + 1];
@@ -225,11 +230,11 @@
 	u8 start_time[4];
 };
 
-struct host_if_drv *terminated_handle;
+static struct host_if_drv *terminated_handle;
 bool wilc_optaining_ip;
 static u8 P2P_LISTEN_STATE;
 static struct task_struct *hif_thread_handler;
-static WILC_MsgQueueHandle hif_msg_q;
+static struct message_queue hif_msg_q;
 static struct semaphore hif_sema_thread;
 static struct semaphore hif_sema_driver;
 static struct semaphore hif_sema_wait_response;
@@ -262,7 +267,8 @@
 #define FLUSHED_JOIN_REQ 1
 #define FLUSHED_BYTE_POS 79
 
-static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo);
+static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
+static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
 
 /* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
  * special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -270,7 +276,7 @@
  */
 int wilc_get_vif_idx(struct wilc_vif *vif)
 {
-	return vif->u8IfIdx + 1;
+	return vif->idx + 1;
 }
 
 /* We need to minus 1 from idx which is from wilc device to get real index
@@ -288,10 +294,10 @@
 	return wilc->vif[index];
 }
 
-static s32 handle_set_channel(struct wilc_vif *vif,
-			      struct channel_attr *hif_set_ch)
+static void handle_set_channel(struct wilc_vif *vif,
+			       struct channel_attr *hif_set_ch)
 {
-	s32 result = 0;
+	int ret = 0;
 	struct wid wid;
 
 	wid.id = (u16)WID_CURRENT_CHANNEL;
@@ -299,17 +305,11 @@
 	wid.val = (char *)&hif_set_ch->set_ch;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Setting channel\n");
+	ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				   wilc_get_vif_idx(vif));
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
-
-	if (result) {
-		PRINT_ER("Failed to set channel\n");
-		return -EINVAL;
-	}
-
-	return result;
+	if (ret)
+		netdev_err(vif->ndev, "Failed to set channel\n");
 }
 
 static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
@@ -319,18 +319,18 @@
 	struct wid wid;
 
 	wid.id = (u16)WID_SET_DRV_HANDLER;
-	wid.type = WID_INT;
-	wid.val = (s8 *)&hif_drv_handler->handler;
-	wid.size = sizeof(u32);
+	wid.type = WID_STR;
+	wid.val = (s8 *)hif_drv_handler;
+	wid.size = sizeof(*hif_drv_handler);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
 				      hif_drv_handler->handler);
 
 	if (!hif_drv_handler->handler)
 		up(&hif_sema_driver);
 
 	if (result) {
-		PRINT_ER("Failed to set driver handler\n");
+		netdev_err(vif->ndev, "Failed to set driver handler\n");
 		return -EINVAL;
 	}
 
@@ -348,37 +348,29 @@
 	wid.val = (s8 *)&hif_op_mode->mode;
 	wid.size = sizeof(u32);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	if ((hif_op_mode->mode) == IDLE_MODE)
 		up(&hif_sema_driver);
 
 	if (result) {
-		PRINT_ER("Failed to set driver handler\n");
+		netdev_err(vif->ndev, "Failed to set driver handler\n");
 		return -EINVAL;
 	}
 
 	return result;
 }
 
-static s32 host_int_get_ipaddress(struct wilc_vif *vif,
-				  struct host_if_drv *hif_drv,
-				  u8 *u16ipadd, u8 idx);
-
 static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
 {
 	s32 result = 0;
 	struct wid wid;
 	char firmware_ip_addr[4] = {0};
-	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (ip_addr[0] < 192)
 		ip_addr[0] = 0;
 
-	PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set  IP = %pI4\n",
-		   idx, ip_addr);
-
 	memcpy(set_ip[idx], ip_addr, IP_ALEN);
 
 	wid.id = (u16)WID_IP_ADDRESS;
@@ -386,18 +378,16 @@
 	wid.val = (u8 *)ip_addr;
 	wid.size = IP_ALEN;
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
-	host_int_get_ipaddress(vif, hif_drv, firmware_ip_addr, idx);
+	host_int_get_ipaddress(vif, firmware_ip_addr, idx);
 
 	if (result) {
-		PRINT_ER("Failed to set IP address\n");
+		netdev_err(vif->ndev, "Failed to set IP address\n");
 		return -EINVAL;
 	}
 
-	PRINT_INFO(HOSTINF_DBG, "IP address set\n");
-
 	return result;
 }
 
@@ -411,10 +401,8 @@
 	wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
 	wid.size = IP_ALEN;
 
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
-
-	PRINT_INFO(HOSTINF_DBG, "%pI4\n", wid.val);
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	memcpy(get_ip[idx], wid.val, IP_ALEN);
 
@@ -424,45 +412,35 @@
 		wilc_setup_ipaddress(vif, set_ip[idx], idx);
 
 	if (result != 0) {
-		PRINT_ER("Failed to get IP address\n");
+		netdev_err(vif->ndev, "Failed to get IP address\n");
 		return -EINVAL;
 	}
 
-	PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d\n", idx);
-	PRINT_INFO(HOSTINF_DBG, "%pI4\n", get_ip[idx]);
-	PRINT_INFO(HOSTINF_DBG, "\n");
-
 	return result;
 }
 
-static s32 handle_set_mac_address(struct wilc_vif *vif,
-				  struct set_mac_addr *set_mac_addr)
+static void handle_set_mac_address(struct wilc_vif *vif,
+				   struct set_mac_addr *set_mac_addr)
 {
-	s32 result = 0;
+	int ret = 0;
 	struct wid wid;
-	u8 *mac_buf = kmalloc(ETH_ALEN, GFP_KERNEL);
+	u8 *mac_buf;
 
-	if (!mac_buf) {
-		PRINT_ER("No buffer to send mac address\n");
-		return -EFAULT;
-	}
-	memcpy(mac_buf, set_mac_addr->mac_addr, ETH_ALEN);
+	mac_buf = kmemdup(set_mac_addr->mac_addr, ETH_ALEN, GFP_KERNEL);
+	if (!mac_buf)
+		return;
 
 	wid.id = (u16)WID_MAC_ADDR;
 	wid.type = WID_STR;
 	wid.val = mac_buf;
 	wid.size = ETH_ALEN;
-	PRINT_D(GENERIC_DBG, "mac addr = :%pM\n", wid.val);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
-	if (result) {
-		PRINT_ER("Failed to set mac address\n");
-		result = -EFAULT;
-	}
+	ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				   wilc_get_vif_idx(vif));
+	if (ret)
+		netdev_err(vif->ndev, "Failed to set mac address\n");
 
 	kfree(mac_buf);
-	return result;
 }
 
 static s32 handle_get_mac_address(struct wilc_vif *vif,
@@ -476,11 +454,11 @@
 	wid.val = get_mac_addr->mac_addr;
 	wid.size = ETH_ALEN;
 
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	if (result) {
-		PRINT_ER("Failed to get mac address\n");
+		netdev_err(vif->ndev, "Failed to get mac address\n");
 		result = -EFAULT;
 	}
 	up(&hif_sema_wait_response);
@@ -494,289 +472,288 @@
 	s32 result = 0;
 	struct wid wid_list[32];
 	struct host_if_drv *hif_drv = vif->hif_drv;
-	u8 wid_cnt = 0;
+	int i = 0;
 
 	down(&hif_drv->sem_cfg_values);
 
-	PRINT_D(HOSTINF_DBG, "Setting CFG params\n");
+	if (cfg_param_attr->flag & BSS_TYPE) {
+		if (cfg_param_attr->bss_type < 6) {
+			wid_list[i].id = WID_BSS_TYPE;
+			wid_list[i].val = (s8 *)&cfg_param_attr->bss_type;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->bss_type;
+		} else {
+			netdev_err(vif->ndev, "check value 6 over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & AUTH_TYPE) {
+		if (cfg_param_attr->auth_type == 1 ||
+		    cfg_param_attr->auth_type == 2 ||
+		    cfg_param_attr->auth_type == 5) {
+			wid_list[i].id = WID_AUTH_TYPE;
+			wid_list[i].val = (s8 *)&cfg_param_attr->auth_type;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->auth_type;
+		} else {
+			netdev_err(vif->ndev, "Impossible value\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & AUTHEN_TIMEOUT) {
+		if (cfg_param_attr->auth_timeout > 0 &&
+		    cfg_param_attr->auth_timeout < 65536) {
+			wid_list[i].id = WID_AUTH_TIMEOUT;
+			wid_list[i].val = (s8 *)&cfg_param_attr->auth_timeout;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.auth_timeout = cfg_param_attr->auth_timeout;
+		} else {
+			netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & POWER_MANAGEMENT) {
+		if (cfg_param_attr->power_mgmt_mode < 5) {
+			wid_list[i].id = WID_POWER_MANAGEMENT;
+			wid_list[i].val = (s8 *)&cfg_param_attr->power_mgmt_mode;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->power_mgmt_mode;
+		} else {
+			netdev_err(vif->ndev, "Invalid power mode\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & RETRY_SHORT) {
+		if (cfg_param_attr->short_retry_limit > 0 &&
+		    cfg_param_attr->short_retry_limit < 256) {
+			wid_list[i].id = WID_SHORT_RETRY_LIMIT;
+			wid_list[i].val = (s8 *)&cfg_param_attr->short_retry_limit;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.short_retry_limit = cfg_param_attr->short_retry_limit;
+		} else {
+			netdev_err(vif->ndev, "Range(1~256) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & RETRY_LONG) {
+		if (cfg_param_attr->long_retry_limit > 0 &&
+		    cfg_param_attr->long_retry_limit < 256) {
+			wid_list[i].id = WID_LONG_RETRY_LIMIT;
+			wid_list[i].val = (s8 *)&cfg_param_attr->long_retry_limit;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.long_retry_limit = cfg_param_attr->long_retry_limit;
+		} else {
+			netdev_err(vif->ndev, "Range(1~256) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & FRAG_THRESHOLD) {
+		if (cfg_param_attr->frag_threshold > 255 &&
+		    cfg_param_attr->frag_threshold < 7937) {
+			wid_list[i].id = WID_FRAG_THRESHOLD;
+			wid_list[i].val = (s8 *)&cfg_param_attr->frag_threshold;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.frag_threshold = cfg_param_attr->frag_threshold;
+		} else {
+			netdev_err(vif->ndev, "Threshold Range fail\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & RTS_THRESHOLD) {
+		if (cfg_param_attr->rts_threshold > 255 &&
+		    cfg_param_attr->rts_threshold < 65536) {
+			wid_list[i].id = WID_RTS_THRESHOLD;
+			wid_list[i].val = (s8 *)&cfg_param_attr->rts_threshold;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.rts_threshold = cfg_param_attr->rts_threshold;
+		} else {
+			netdev_err(vif->ndev, "Threshold Range fail\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & PREAMBLE) {
+		if (cfg_param_attr->preamble_type < 3) {
+			wid_list[i].id = WID_PREAMBLE;
+			wid_list[i].val = (s8 *)&cfg_param_attr->preamble_type;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.preamble_type = cfg_param_attr->preamble_type;
+		} else {
+			netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & SHORT_SLOT_ALLOWED) {
+		if (cfg_param_attr->short_slot_allowed < 2) {
+			wid_list[i].id = WID_SHORT_SLOT_ALLOWED;
+			wid_list[i].val = (s8 *)&cfg_param_attr->short_slot_allowed;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->short_slot_allowed;
+		} else {
+			netdev_err(vif->ndev, "Short slot(2) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & TXOP_PROT_DISABLE) {
+		if (cfg_param_attr->txop_prot_disabled < 2) {
+			wid_list[i].id = WID_11N_TXOP_PROT_DISABLE;
+			wid_list[i].val = (s8 *)&cfg_param_attr->txop_prot_disabled;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->txop_prot_disabled;
+		} else {
+			netdev_err(vif->ndev, "TXOP prot disable\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & BEACON_INTERVAL) {
+		if (cfg_param_attr->beacon_interval > 0 &&
+		    cfg_param_attr->beacon_interval < 65536) {
+			wid_list[i].id = WID_BEACON_INTERVAL;
+			wid_list[i].val = (s8 *)&cfg_param_attr->beacon_interval;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.beacon_interval = cfg_param_attr->beacon_interval;
+		} else {
+			netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & DTIM_PERIOD) {
+		if (cfg_param_attr->dtim_period > 0 &&
+		    cfg_param_attr->dtim_period < 256) {
+			wid_list[i].id = WID_DTIM_PERIOD;
+			wid_list[i].val = (s8 *)&cfg_param_attr->dtim_period;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.dtim_period = cfg_param_attr->dtim_period;
+		} else {
+			netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & SITE_SURVEY) {
+		if (cfg_param_attr->site_survey_enabled < 3) {
+			wid_list[i].id = WID_SITE_SURVEY;
+			wid_list[i].val = (s8 *)&cfg_param_attr->site_survey_enabled;
+			wid_list[i].type = WID_CHAR;
+			wid_list[i].size = sizeof(char);
+			hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->site_survey_enabled;
+		} else {
+			netdev_err(vif->ndev, "Site survey disable\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & SITE_SURVEY_SCAN_TIME) {
+		if (cfg_param_attr->site_survey_scan_time > 0 &&
+		    cfg_param_attr->site_survey_scan_time < 65536) {
+			wid_list[i].id = WID_SITE_SURVEY_SCAN_TIME;
+			wid_list[i].val = (s8 *)&cfg_param_attr->site_survey_scan_time;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->site_survey_scan_time;
+		} else {
+			netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & ACTIVE_SCANTIME) {
+		if (cfg_param_attr->active_scan_time > 0 &&
+		    cfg_param_attr->active_scan_time < 65536) {
+			wid_list[i].id = WID_ACTIVE_SCAN_TIME;
+			wid_list[i].val = (s8 *)&cfg_param_attr->active_scan_time;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.active_scan_time = cfg_param_attr->active_scan_time;
+		} else {
+			netdev_err(vif->ndev, "Active time(1~65535) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & PASSIVE_SCANTIME) {
+		if (cfg_param_attr->passive_scan_time > 0 &&
+		    cfg_param_attr->passive_scan_time < 65536) {
+			wid_list[i].id = WID_PASSIVE_SCAN_TIME;
+			wid_list[i].val = (s8 *)&cfg_param_attr->passive_scan_time;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
+			hif_drv->cfg_values.passive_scan_time = cfg_param_attr->passive_scan_time;
+		} else {
+			netdev_err(vif->ndev, "Passive time(1~65535) over\n");
+			result = -EINVAL;
+			goto ERRORHANDLER;
+		}
+		i++;
+	}
+	if (cfg_param_attr->flag & CURRENT_TX_RATE) {
+		enum CURRENT_TXRATE curr_tx_rate = cfg_param_attr->curr_tx_rate;
 
-	if (cfg_param_attr->cfg_attr_info.flag & BSS_TYPE) {
-		if (cfg_param_attr->cfg_attr_info.bss_type < 6) {
-			wid_list[wid_cnt].id = WID_BSS_TYPE;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.bss_type;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->cfg_attr_info.bss_type;
-		} else {
-			PRINT_ER("check value 6 over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & AUTH_TYPE) {
-		if (cfg_param_attr->cfg_attr_info.auth_type == 1 ||
-		    cfg_param_attr->cfg_attr_info.auth_type == 2 ||
-		    cfg_param_attr->cfg_attr_info.auth_type == 5) {
-			wid_list[wid_cnt].id = WID_AUTH_TYPE;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.auth_type;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->cfg_attr_info.auth_type;
-		} else {
-			PRINT_ER("Impossible value \n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & AUTHEN_TIMEOUT) {
-		if (cfg_param_attr->cfg_attr_info.auth_timeout > 0 &&
-		    cfg_param_attr->cfg_attr_info.auth_timeout < 65536) {
-			wid_list[wid_cnt].id = WID_AUTH_TIMEOUT;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.auth_timeout;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.auth_timeout = cfg_param_attr->cfg_attr_info.auth_timeout;
-		} else {
-			PRINT_ER("Range(1 ~ 65535) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & POWER_MANAGEMENT) {
-		if (cfg_param_attr->cfg_attr_info.power_mgmt_mode < 5) {
-			wid_list[wid_cnt].id = WID_POWER_MANAGEMENT;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.power_mgmt_mode;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->cfg_attr_info.power_mgmt_mode;
-		} else {
-			PRINT_ER("Invalide power mode\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & RETRY_SHORT) {
-		if (cfg_param_attr->cfg_attr_info.short_retry_limit > 0 &&
-		    cfg_param_attr->cfg_attr_info.short_retry_limit < 256) {
-			wid_list[wid_cnt].id = WID_SHORT_RETRY_LIMIT;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.short_retry_limit;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.short_retry_limit = cfg_param_attr->cfg_attr_info.short_retry_limit;
-		} else {
-			PRINT_ER("Range(1~256) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & RETRY_LONG) {
-		if (cfg_param_attr->cfg_attr_info.long_retry_limit > 0 &&
-		    cfg_param_attr->cfg_attr_info.long_retry_limit < 256) {
-			wid_list[wid_cnt].id = WID_LONG_RETRY_LIMIT;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.long_retry_limit;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.long_retry_limit = cfg_param_attr->cfg_attr_info.long_retry_limit;
-		} else {
-			PRINT_ER("Range(1~256) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & FRAG_THRESHOLD) {
-		if (cfg_param_attr->cfg_attr_info.frag_threshold > 255 &&
-		    cfg_param_attr->cfg_attr_info.frag_threshold < 7937) {
-			wid_list[wid_cnt].id = WID_FRAG_THRESHOLD;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.frag_threshold;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.frag_threshold = cfg_param_attr->cfg_attr_info.frag_threshold;
-		} else {
-			PRINT_ER("Threshold Range fail\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & RTS_THRESHOLD) {
-		if (cfg_param_attr->cfg_attr_info.rts_threshold > 255 &&
-		    cfg_param_attr->cfg_attr_info.rts_threshold < 65536) {
-			wid_list[wid_cnt].id = WID_RTS_THRESHOLD;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.rts_threshold;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.rts_threshold = cfg_param_attr->cfg_attr_info.rts_threshold;
-		} else {
-			PRINT_ER("Threshold Range fail\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & PREAMBLE) {
-		if (cfg_param_attr->cfg_attr_info.preamble_type < 3) {
-			wid_list[wid_cnt].id = WID_PREAMBLE;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.preamble_type;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.preamble_type = cfg_param_attr->cfg_attr_info.preamble_type;
-		} else {
-			PRINT_ER("Preamle Range(0~2) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & SHORT_SLOT_ALLOWED) {
-		if (cfg_param_attr->cfg_attr_info.short_slot_allowed < 2) {
-			wid_list[wid_cnt].id = WID_SHORT_SLOT_ALLOWED;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.short_slot_allowed;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->cfg_attr_info.short_slot_allowed;
-		} else {
-			PRINT_ER("Short slot(2) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & TXOP_PROT_DISABLE) {
-		if (cfg_param_attr->cfg_attr_info.txop_prot_disabled < 2) {
-			wid_list[wid_cnt].id = WID_11N_TXOP_PROT_DISABLE;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.txop_prot_disabled;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->cfg_attr_info.txop_prot_disabled;
-		} else {
-			PRINT_ER("TXOP prot disable\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & BEACON_INTERVAL) {
-		if (cfg_param_attr->cfg_attr_info.beacon_interval > 0 &&
-		    cfg_param_attr->cfg_attr_info.beacon_interval < 65536) {
-			wid_list[wid_cnt].id = WID_BEACON_INTERVAL;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.beacon_interval;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.beacon_interval = cfg_param_attr->cfg_attr_info.beacon_interval;
-		} else {
-			PRINT_ER("Beacon interval(1~65535) fail\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & DTIM_PERIOD) {
-		if (cfg_param_attr->cfg_attr_info.dtim_period > 0 &&
-		    cfg_param_attr->cfg_attr_info.dtim_period < 256) {
-			wid_list[wid_cnt].id = WID_DTIM_PERIOD;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.dtim_period;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.dtim_period = cfg_param_attr->cfg_attr_info.dtim_period;
-		} else {
-			PRINT_ER("DTIM range(1~255) fail\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & SITE_SURVEY) {
-		if (cfg_param_attr->cfg_attr_info.site_survey_enabled < 3) {
-			wid_list[wid_cnt].id = WID_SITE_SURVEY;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.site_survey_enabled;
-			wid_list[wid_cnt].type = WID_CHAR;
-			wid_list[wid_cnt].size = sizeof(char);
-			hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->cfg_attr_info.site_survey_enabled;
-		} else {
-			PRINT_ER("Site survey disable\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & SITE_SURVEY_SCAN_TIME) {
-		if (cfg_param_attr->cfg_attr_info.site_survey_scan_time > 0 &&
-		    cfg_param_attr->cfg_attr_info.site_survey_scan_time < 65536) {
-			wid_list[wid_cnt].id = WID_SITE_SURVEY_SCAN_TIME;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.site_survey_scan_time;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->cfg_attr_info.site_survey_scan_time;
-		} else {
-			PRINT_ER("Site survey scan time(1~65535) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & ACTIVE_SCANTIME) {
-		if (cfg_param_attr->cfg_attr_info.active_scan_time > 0 &&
-		    cfg_param_attr->cfg_attr_info.active_scan_time < 65536) {
-			wid_list[wid_cnt].id = WID_ACTIVE_SCAN_TIME;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.active_scan_time;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.active_scan_time = cfg_param_attr->cfg_attr_info.active_scan_time;
-		} else {
-			PRINT_ER("Active scan time(1~65535) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & PASSIVE_SCANTIME) {
-		if (cfg_param_attr->cfg_attr_info.passive_scan_time > 0 &&
-		    cfg_param_attr->cfg_attr_info.passive_scan_time < 65536) {
-			wid_list[wid_cnt].id = WID_PASSIVE_SCAN_TIME;
-			wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.passive_scan_time;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
-			hif_drv->cfg_values.passive_scan_time = cfg_param_attr->cfg_attr_info.passive_scan_time;
-		} else {
-			PRINT_ER("Passive scan time(1~65535) over\n");
-			result = -EINVAL;
-			goto ERRORHANDLER;
-		}
-		wid_cnt++;
-	}
-	if (cfg_param_attr->cfg_attr_info.flag & CURRENT_TX_RATE) {
-		enum CURRENT_TXRATE curr_tx_rate = cfg_param_attr->cfg_attr_info.curr_tx_rate;
-
-		if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1
-		    || curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5
-		    || curr_tx_rate == MBPS_11 || curr_tx_rate == MBPS_6
-		    || curr_tx_rate == MBPS_9 || curr_tx_rate == MBPS_12
-		    || curr_tx_rate == MBPS_18 || curr_tx_rate == MBPS_24
-		    || curr_tx_rate == MBPS_36 || curr_tx_rate == MBPS_48 || curr_tx_rate == MBPS_54) {
-			wid_list[wid_cnt].id = WID_CURRENT_TX_RATE;
-			wid_list[wid_cnt].val = (s8 *)&curr_tx_rate;
-			wid_list[wid_cnt].type = WID_SHORT;
-			wid_list[wid_cnt].size = sizeof(u16);
+		if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1 ||
+		    curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5 ||
+		    curr_tx_rate == MBPS_11 || curr_tx_rate == MBPS_6 ||
+		    curr_tx_rate == MBPS_9 || curr_tx_rate == MBPS_12 ||
+		    curr_tx_rate == MBPS_18 || curr_tx_rate == MBPS_24 ||
+		    curr_tx_rate == MBPS_36 || curr_tx_rate == MBPS_48 ||
+		    curr_tx_rate == MBPS_54) {
+			wid_list[i].id = WID_CURRENT_TX_RATE;
+			wid_list[i].val = (s8 *)&curr_tx_rate;
+			wid_list[i].type = WID_SHORT;
+			wid_list[i].size = sizeof(u16);
 			hif_drv->cfg_values.curr_tx_rate = (u8)curr_tx_rate;
 		} else {
-			PRINT_ER("out of TX rate\n");
+			netdev_err(vif->ndev, "out of TX rate\n");
 			result = -EINVAL;
 			goto ERRORHANDLER;
 		}
-		wid_cnt++;
+		i++;
 	}
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, wid_list,
-				      wid_cnt, wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+				      i, wilc_get_vif_idx(vif));
 
 	if (result)
-		PRINT_ER("Error in setting CFG params\n");
+		netdev_err(vif->ndev, "Error in setting CFG params\n");
 
 ERRORHANDLER:
 	up(&hif_drv->sem_cfg_values);
@@ -804,50 +781,40 @@
 	u8 *pu8HdnNtwrksWidVal = NULL;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	PRINT_D(HOSTINF_DBG, "Setting SCAN params\n");
-	PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state\n", hif_drv->hif_state);
-
 	hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result;
 	hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg;
 
 	if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
 	    (hif_drv->hif_state < HOST_IF_CONNECTED)) {
-		PRINT_D(GENERIC_DBG, "Don't scan already in [%d] state\n",
-			hif_drv->hif_state);
-		PRINT_ER("Already scan\n");
+		netdev_err(vif->ndev, "Already scan\n");
 		result = -EBUSY;
 		goto ERRORHANDLER;
 	}
 
 	if (wilc_optaining_ip || wilc_connecting) {
-		PRINT_D(GENERIC_DBG, "[handle_scan]: Don't do obss scan until IP adresss is obtained\n");
-		PRINT_ER("Don't do obss scan\n");
+		netdev_err(vif->ndev, "Don't do obss scan\n");
 		result = -EBUSY;
 		goto ERRORHANDLER;
 	}
 
-	PRINT_D(HOSTINF_DBG, "Setting SCAN params\n");
-
 	hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
 
 	strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ;
 	strWIDList[u32WidsCount].type = WID_STR;
 
-	for (i = 0; i < pstrHostIFscanAttr->hidden_network.u8ssidnum; i++)
-		valuesize += ((pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen) + 1);
+	for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++)
+		valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1);
 	pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL);
 	strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal;
 	if (strWIDList[u32WidsCount].val) {
 		pu8Buffer = strWIDList[u32WidsCount].val;
 
-		*pu8Buffer++ = pstrHostIFscanAttr->hidden_network.u8ssidnum;
+		*pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids;
 
-		PRINT_D(HOSTINF_DBG, "In Handle_ProbeRequest number of ssid %d\n", pstrHostIFscanAttr->hidden_network.u8ssidnum);
-
-		for (i = 0; i < pstrHostIFscanAttr->hidden_network.u8ssidnum; i++) {
-			*pu8Buffer++ = pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen;
-			memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen);
-			pu8Buffer += pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen;
+		for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) {
+			*pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
+			memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len);
+			pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
 		}
 
 		strWIDList[u32WidsCount].size = (s32)(valuesize + 1);
@@ -896,14 +863,12 @@
 	else if (hif_drv->hif_state == HOST_IF_IDLE)
 		scan_while_connected = false;
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList,
+	result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
 				      u32WidsCount,
 				      wilc_get_vif_idx(vif));
 
 	if (result)
-		PRINT_ER("Failed to send scan paramters config packet\n");
-	else
-		PRINT_D(HOSTINF_DBG, "Successfully sent SCAN params config packet\n");
+		netdev_err(vif->ndev, "Failed to send scan parameters\n");
 
 ERRORHANDLER:
 	if (result) {
@@ -916,8 +881,8 @@
 
 	kfree(pstrHostIFscanAttr->ies);
 	pstrHostIFscanAttr->ies = NULL;
-	kfree(pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo);
-	pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo = NULL;
+	kfree(pstrHostIFscanAttr->hidden_network.net_info);
+	pstrHostIFscanAttr->hidden_network.net_info = NULL;
 
 	kfree(pu8HdnNtwrksWidVal);
 
@@ -932,27 +897,24 @@
 	struct wid wid;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	PRINT_D(HOSTINF_DBG, "in Handle_ScanDone()\n");
-
 	if (enuEvent == SCAN_EVENT_ABORTED) {
-		PRINT_D(GENERIC_DBG, "Abort running scan\n");
 		u8abort_running_scan = 1;
 		wid.id = (u16)WID_ABORT_RUNNING_SCAN;
 		wid.type = WID_CHAR;
 		wid.val = (s8 *)&u8abort_running_scan;
 		wid.size = sizeof(char);
 
-		result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-					 wilc_get_vif_idx(vif));
+		result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+					      wilc_get_vif_idx(vif));
 
 		if (result) {
-			PRINT_ER("Failed to set abort running scan\n");
+			netdev_err(vif->ndev, "Failed to set abort running\n");
 			result = -EFAULT;
 		}
 	}
 
 	if (!hif_drv) {
-		PRINT_ER("Driver handler is NULL\n");
+		netdev_err(vif->ndev, "Driver handler is NULL\n");
 		return result;
 	}
 
@@ -976,35 +938,31 @@
 	struct join_bss_param *ptstrJoinBssParam;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	PRINT_D(GENERIC_DBG, "Handling connect request\n");
-
 	if (memcmp(pstrHostIFconnectAttr->bssid, wilc_connected_ssid, ETH_ALEN) == 0) {
 		result = 0;
-		PRINT_ER("Trying to connect to an already connected AP, Discard connect request\n");
+		netdev_err(vif->ndev, "Discard connect request\n");
 		return result;
 	}
 
-	PRINT_INFO(HOSTINF_DBG, "Saving connection parameters in global structure\n");
-
-	ptstrJoinBssParam = (struct join_bss_param *)pstrHostIFconnectAttr->params;
+	ptstrJoinBssParam = pstrHostIFconnectAttr->params;
 	if (!ptstrJoinBssParam) {
-		PRINT_ER("Required BSSID not found\n");
+		netdev_err(vif->ndev, "Required BSSID not found\n");
 		result = -ENOENT;
 		goto ERRORHANDLER;
 	}
 
 	if (pstrHostIFconnectAttr->bssid) {
-		hif_drv->usr_conn_req.pu8bssid = kmalloc(6, GFP_KERNEL);
-		memcpy(hif_drv->usr_conn_req.pu8bssid, pstrHostIFconnectAttr->bssid, 6);
+		hif_drv->usr_conn_req.bssid = kmalloc(6, GFP_KERNEL);
+		memcpy(hif_drv->usr_conn_req.bssid, pstrHostIFconnectAttr->bssid, 6);
 	}
 
 	hif_drv->usr_conn_req.ssid_len = pstrHostIFconnectAttr->ssid_len;
 	if (pstrHostIFconnectAttr->ssid) {
-		hif_drv->usr_conn_req.pu8ssid = kmalloc(pstrHostIFconnectAttr->ssid_len + 1, GFP_KERNEL);
-		memcpy(hif_drv->usr_conn_req.pu8ssid,
+		hif_drv->usr_conn_req.ssid = kmalloc(pstrHostIFconnectAttr->ssid_len + 1, GFP_KERNEL);
+		memcpy(hif_drv->usr_conn_req.ssid,
 		       pstrHostIFconnectAttr->ssid,
 		       pstrHostIFconnectAttr->ssid_len);
-		hif_drv->usr_conn_req.pu8ssid[pstrHostIFconnectAttr->ssid_len] = '\0';
+		hif_drv->usr_conn_req.ssid[pstrHostIFconnectAttr->ssid_len] = '\0';
 	}
 
 	hif_drv->usr_conn_req.ies_len = pstrHostIFconnectAttr->ies_len;
@@ -1015,7 +973,7 @@
 		       pstrHostIFconnectAttr->ies_len);
 	}
 
-	hif_drv->usr_conn_req.u8security = pstrHostIFconnectAttr->security;
+	hif_drv->usr_conn_req.security = pstrHostIFconnectAttr->security;
 	hif_drv->usr_conn_req.auth_type = pstrHostIFconnectAttr->auth_type;
 	hif_drv->usr_conn_req.conn_result = pstrHostIFconnectAttr->result;
 	hif_drv->usr_conn_req.arg = pstrHostIFconnectAttr->arg;
@@ -1055,13 +1013,11 @@
 	strWIDList[u32WidsCount].id = (u16)WID_11I_MODE;
 	strWIDList[u32WidsCount].type = WID_CHAR;
 	strWIDList[u32WidsCount].size = sizeof(char);
-	strWIDList[u32WidsCount].val = (s8 *)&hif_drv->usr_conn_req.u8security;
+	strWIDList[u32WidsCount].val = (s8 *)&hif_drv->usr_conn_req.security;
 	u32WidsCount++;
 
 	if (memcmp("DIRECT-", pstrHostIFconnectAttr->ssid, 7))
-		mode_11i = hif_drv->usr_conn_req.u8security;
-
-	PRINT_INFO(HOSTINF_DBG, "Encrypt Mode = %x\n", hif_drv->usr_conn_req.u8security);
+		mode_11i = hif_drv->usr_conn_req.security;
 
 	strWIDList[u32WidsCount].id = (u16)WID_AUTH_TYPE;
 	strWIDList[u32WidsCount].type = WID_CHAR;
@@ -1072,11 +1028,6 @@
 	if (memcmp("DIRECT-", pstrHostIFconnectAttr->ssid, 7))
 		auth_type = (u8)hif_drv->usr_conn_req.auth_type;
 
-	PRINT_INFO(HOSTINF_DBG, "Authentication Type = %x\n",
-		   hif_drv->usr_conn_req.auth_type);
-	PRINT_D(HOSTINF_DBG, "Connecting to network of SSID %s on channel %d\n",
-		hif_drv->usr_conn_req.pu8ssid, pstrHostIFconnectAttr->ch);
-
 	strWIDList[u32WidsCount].id = (u16)WID_JOIN_REQ_EXTENDED;
 	strWIDList[u32WidsCount].type = WID_STR;
 	strWIDList[u32WidsCount].size = 112;
@@ -1103,12 +1054,11 @@
 	if ((pstrHostIFconnectAttr->ch >= 1) && (pstrHostIFconnectAttr->ch <= 14)) {
 		*(pu8CurrByte++) = pstrHostIFconnectAttr->ch;
 	} else {
-		PRINT_ER("Channel out of range\n");
+		netdev_err(vif->ndev, "Channel out of range\n");
 		*(pu8CurrByte++) = 0xFF;
 	}
 	*(pu8CurrByte++)  = (ptstrJoinBssParam->cap_info) & 0xFF;
 	*(pu8CurrByte++)  = ((ptstrJoinBssParam->cap_info) >> 8) & 0xFF;
-	PRINT_D(HOSTINF_DBG, "* Cap Info %0x*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8)));
 
 	if (pstrHostIFconnectAttr->bssid)
 		memcpy(pu8CurrByte, pstrHostIFconnectAttr->bssid, 6);
@@ -1120,26 +1070,20 @@
 
 	*(pu8CurrByte++)  = (ptstrJoinBssParam->beacon_period) & 0xFF;
 	*(pu8CurrByte++)  = ((ptstrJoinBssParam->beacon_period) >> 8) & 0xFF;
-	PRINT_D(HOSTINF_DBG, "* Beacon Period %d*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8)));
 	*(pu8CurrByte++)  =  ptstrJoinBssParam->dtim_period;
-	PRINT_D(HOSTINF_DBG, "* DTIM Period %d*\n", (*(pu8CurrByte - 1)));
 
 	memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1);
 	pu8CurrByte += (MAX_RATES_SUPPORTED + 1);
 
 	*(pu8CurrByte++)  =  ptstrJoinBssParam->wmm_cap;
-	PRINT_D(HOSTINF_DBG, "* wmm cap%d*\n", (*(pu8CurrByte - 1)));
 	*(pu8CurrByte++)  = ptstrJoinBssParam->uapsd_cap;
 
 	*(pu8CurrByte++)  = ptstrJoinBssParam->ht_capable;
 	hif_drv->usr_conn_req.ht_capable = ptstrJoinBssParam->ht_capable;
 
 	*(pu8CurrByte++)  =  ptstrJoinBssParam->rsn_found;
-	PRINT_D(HOSTINF_DBG, "* rsn found %d*\n", *(pu8CurrByte - 1));
 	*(pu8CurrByte++)  =  ptstrJoinBssParam->rsn_grp_policy;
-	PRINT_D(HOSTINF_DBG, "* rsn group policy %0x*\n", (*(pu8CurrByte - 1)));
 	*(pu8CurrByte++) =  ptstrJoinBssParam->mode_802_11i;
-	PRINT_D(HOSTINF_DBG, "* mode_802_11i %d*\n", (*(pu8CurrByte - 1)));
 
 	memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy));
 	pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_pcip_policy);
@@ -1154,8 +1098,6 @@
 	*(pu8CurrByte++) = ptstrJoinBssParam->noa_enabled;
 
 	if (ptstrJoinBssParam->noa_enabled) {
-		PRINT_D(HOSTINF_DBG, "NOA present\n");
-
 		*(pu8CurrByte++) = (ptstrJoinBssParam->tsf) & 0xFF;
 		*(pu8CurrByte++) = ((ptstrJoinBssParam->tsf) >> 8) & 0xFF;
 		*(pu8CurrByte++) = ((ptstrJoinBssParam->tsf) >> 16) & 0xFF;
@@ -1177,8 +1119,7 @@
 
 		memcpy(pu8CurrByte, ptstrJoinBssParam->start_time, sizeof(ptstrJoinBssParam->start_time));
 		pu8CurrByte += sizeof(ptstrJoinBssParam->start_time);
-	} else
-		PRINT_D(HOSTINF_DBG, "NOA not present\n");
+	}
 
 	pu8CurrByte = strWIDList[u32WidsCount].val;
 	u32WidsCount++;
@@ -1188,46 +1129,37 @@
 		join_req_vif = vif;
 	}
 
-	PRINT_D(GENERIC_DBG, "send HOST_IF_WAITING_CONN_RESP\n");
-
-	if (pstrHostIFconnectAttr->bssid) {
+	if (pstrHostIFconnectAttr->bssid)
 		memcpy(wilc_connected_ssid,
 		       pstrHostIFconnectAttr->bssid, ETH_ALEN);
-		PRINT_D(GENERIC_DBG, "save Bssid = %pM\n",
-			pstrHostIFconnectAttr->bssid);
-		PRINT_D(GENERIC_DBG, "save bssid = %pM\n", wilc_connected_ssid);
-	}
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList,
+	result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
 				      u32WidsCount,
 				      wilc_get_vif_idx(vif));
 	if (result) {
-		PRINT_ER("failed to send config packet\n");
+		netdev_err(vif->ndev, "failed to send config packet\n");
 		result = -EFAULT;
 		goto ERRORHANDLER;
 	} else {
-		PRINT_D(GENERIC_DBG, "set HOST_IF_WAITING_CONN_RESP\n");
 		hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
 	}
 
 ERRORHANDLER:
 	if (result) {
-		tstrConnectInfo strConnectInfo;
+		struct connect_info strConnectInfo;
 
 		del_timer(&hif_drv->connect_timer);
 
-		PRINT_D(HOSTINF_DBG, "could not start wilc_connecting to the required network\n");
-
-		memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+		memset(&strConnectInfo, 0, sizeof(struct connect_info));
 
 		if (pstrHostIFconnectAttr->result) {
 			if (pstrHostIFconnectAttr->bssid)
-				memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->bssid, 6);
+				memcpy(strConnectInfo.bssid, pstrHostIFconnectAttr->bssid, 6);
 
 			if (pstrHostIFconnectAttr->ies) {
-				strConnectInfo.ReqIEsLen = pstrHostIFconnectAttr->ies_len;
-				strConnectInfo.pu8ReqIEs = kmalloc(pstrHostIFconnectAttr->ies_len, GFP_KERNEL);
-				memcpy(strConnectInfo.pu8ReqIEs,
+				strConnectInfo.req_ies_len = pstrHostIFconnectAttr->ies_len;
+				strConnectInfo.req_ies = kmalloc(pstrHostIFconnectAttr->ies_len, GFP_KERNEL);
+				memcpy(strConnectInfo.req_ies,
 				       pstrHostIFconnectAttr->ies,
 				       pstrHostIFconnectAttr->ies_len);
 			}
@@ -1238,15 +1170,14 @@
 							       NULL,
 							       pstrHostIFconnectAttr->arg);
 			hif_drv->hif_state = HOST_IF_IDLE;
-			kfree(strConnectInfo.pu8ReqIEs);
-			strConnectInfo.pu8ReqIEs = NULL;
+			kfree(strConnectInfo.req_ies);
+			strConnectInfo.req_ies = NULL;
 
 		} else {
-			PRINT_ER("Connect callback function pointer is NULL\n");
+			netdev_err(vif->ndev, "Connect callback is NULL\n");
 		}
 	}
 
-	PRINT_D(HOSTINF_DBG, "Deallocating connection parameters\n");
 	kfree(pstrHostIFconnectAttr->bssid);
 	pstrHostIFconnectAttr->bssid = NULL;
 
@@ -1296,11 +1227,11 @@
 
 	u32WidsCount++;
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList,
+	result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
 				      u32WidsCount,
 				      wilc_get_vif_idx(join_req_vif));
 	if (result) {
-		PRINT_ER("failed to send config packet\n");
+		netdev_err(vif->ndev, "failed to send config packet\n");
 		result = -EINVAL;
 	}
 
@@ -1310,13 +1241,13 @@
 static s32 Handle_ConnectTimeout(struct wilc_vif *vif)
 {
 	s32 result = 0;
-	tstrConnectInfo strConnectInfo;
+	struct connect_info strConnectInfo;
 	struct wid wid;
 	u16 u16DummyReasonCode = 0;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("Driver handler is NULL\n");
+		netdev_err(vif->ndev, "Driver handler is NULL\n");
 		return result;
 	}
 
@@ -1324,18 +1255,18 @@
 
 	scan_while_connected = false;
 
-	memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+	memset(&strConnectInfo, 0, sizeof(struct connect_info));
 
 	if (hif_drv->usr_conn_req.conn_result) {
-		if (hif_drv->usr_conn_req.pu8bssid) {
-			memcpy(strConnectInfo.au8bssid,
-			       hif_drv->usr_conn_req.pu8bssid, 6);
+		if (hif_drv->usr_conn_req.bssid) {
+			memcpy(strConnectInfo.bssid,
+			       hif_drv->usr_conn_req.bssid, 6);
 		}
 
 		if (hif_drv->usr_conn_req.ies) {
-			strConnectInfo.ReqIEsLen = hif_drv->usr_conn_req.ies_len;
-			strConnectInfo.pu8ReqIEs = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
-			memcpy(strConnectInfo.pu8ReqIEs,
+			strConnectInfo.req_ies_len = hif_drv->usr_conn_req.ies_len;
+			strConnectInfo.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
+			memcpy(strConnectInfo.req_ies,
 			       hif_drv->usr_conn_req.ies,
 			       hif_drv->usr_conn_req.ies_len);
 		}
@@ -1346,10 +1277,10 @@
 						  NULL,
 						  hif_drv->usr_conn_req.arg);
 
-		kfree(strConnectInfo.pu8ReqIEs);
-		strConnectInfo.pu8ReqIEs = NULL;
+		kfree(strConnectInfo.req_ies);
+		strConnectInfo.req_ies = NULL;
 	} else {
-		PRINT_ER("Connect callback function pointer is NULL\n");
+		netdev_err(vif->ndev, "Connect callback is NULL\n");
 	}
 
 	wid.id = (u16)WID_DISCONNECT;
@@ -1357,18 +1288,16 @@
 	wid.val = (s8 *)&u16DummyReasonCode;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Sending disconnect request\n");
-
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send dissconect config packet\n");
+		netdev_err(vif->ndev, "Failed to send dissconect\n");
 
 	hif_drv->usr_conn_req.ssid_len = 0;
-	kfree(hif_drv->usr_conn_req.pu8ssid);
-	hif_drv->usr_conn_req.pu8ssid = NULL;
-	kfree(hif_drv->usr_conn_req.pu8bssid);
-	hif_drv->usr_conn_req.pu8bssid = NULL;
+	kfree(hif_drv->usr_conn_req.ssid);
+	hif_drv->usr_conn_req.ssid = NULL;
+	kfree(hif_drv->usr_conn_req.bssid);
+	hif_drv->usr_conn_req.bssid = NULL;
 	hif_drv->usr_conn_req.ies_len = 0;
 	kfree(hif_drv->usr_conn_req.ies);
 	hif_drv->usr_conn_req.ies = NULL;
@@ -1394,33 +1323,30 @@
 	u32 i;
 	bool bNewNtwrkFound;
 	s32 result = 0;
-	tstrNetworkInfo *pstrNetworkInfo = NULL;
+	struct network_info *pstrNetworkInfo = NULL;
 	void *pJoinParams = NULL;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	bNewNtwrkFound = true;
-	PRINT_INFO(HOSTINF_DBG, "Handling received network info\n");
 
 	if (hif_drv->usr_scan_req.scan_result) {
-		PRINT_D(HOSTINF_DBG, "State: Scanning, parsing network information received\n");
 		wilc_parse_network_info(pstrRcvdNetworkInfo->buffer, &pstrNetworkInfo);
 		if ((!pstrNetworkInfo) ||
 		    (!hif_drv->usr_scan_req.scan_result)) {
-			PRINT_ER("driver is null\n");
+			netdev_err(vif->ndev, "driver is null\n");
 			result = -EINVAL;
 			goto done;
 		}
 
 		for (i = 0; i < hif_drv->usr_scan_req.rcvd_ch_cnt; i++) {
-			if ((hif_drv->usr_scan_req.net_info[i].au8bssid) &&
-			    (pstrNetworkInfo->au8bssid)) {
-				if (memcmp(hif_drv->usr_scan_req.net_info[i].au8bssid,
-					   pstrNetworkInfo->au8bssid, 6) == 0) {
-					if (pstrNetworkInfo->s8rssi <= hif_drv->usr_scan_req.net_info[i].s8rssi) {
-						PRINT_D(HOSTINF_DBG, "Network previously discovered\n");
+			if ((hif_drv->usr_scan_req.net_info[i].bssid) &&
+			    (pstrNetworkInfo->bssid)) {
+				if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
+					   pstrNetworkInfo->bssid, 6) == 0) {
+					if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
 						goto done;
 					} else {
-						hif_drv->usr_scan_req.net_info[i].s8rssi = pstrNetworkInfo->s8rssi;
+						hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
 						bNewNtwrkFound = false;
 						break;
 					}
@@ -1429,30 +1355,26 @@
 		}
 
 		if (bNewNtwrkFound) {
-			PRINT_D(HOSTINF_DBG, "New network found\n");
-
 			if (hif_drv->usr_scan_req.rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
-				hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].s8rssi = pstrNetworkInfo->s8rssi;
+				hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].rssi = pstrNetworkInfo->rssi;
 
-				if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].au8bssid &&
-				    pstrNetworkInfo->au8bssid) {
-					memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].au8bssid,
-					       pstrNetworkInfo->au8bssid, 6);
+				if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid &&
+				    pstrNetworkInfo->bssid) {
+					memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
+					       pstrNetworkInfo->bssid, 6);
 
 					hif_drv->usr_scan_req.rcvd_ch_cnt++;
 
-					pstrNetworkInfo->bNewNetwork = true;
+					pstrNetworkInfo->new_network = true;
 					pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
 
 					hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
 									  hif_drv->usr_scan_req.arg,
 									  pJoinParams);
 				}
-			} else {
-				PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit\n");
 			}
 		} else {
-			pstrNetworkInfo->bNewNetwork = false;
+			pstrNetworkInfo->new_network = false;
 			hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
 							  hif_drv->usr_scan_req.arg, NULL);
 		}
@@ -1463,8 +1385,8 @@
 	pstrRcvdNetworkInfo->buffer = NULL;
 
 	if (pstrNetworkInfo) {
-		wilc_dealloc_network_info(pstrNetworkInfo);
-		pstrNetworkInfo = NULL;
+		kfree(pstrNetworkInfo->ies);
+		kfree(pstrNetworkInfo);
 	}
 
 	return result;
@@ -1487,31 +1409,29 @@
 	u8 u8MacStatus;
 	u8 u8MacStatusReasonCode;
 	u8 u8MacStatusAdditionalInfo;
-	tstrConnectInfo strConnectInfo;
-	tstrDisconnectNotifInfo strDisconnectNotifInfo;
+	struct connect_info strConnectInfo;
+	struct disconnect_info strDisconnectNotifInfo;
 	s32 s32Err = 0;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("Driver handler is NULL\n");
+		netdev_err(vif->ndev, "Driver handler is NULL\n");
 		return -ENODEV;
 	}
-	PRINT_D(GENERIC_DBG, "Current State = %d,Received state = %d\n",
-		hif_drv->hif_state, pstrRcvdGnrlAsyncInfo->buffer[7]);
 
 	if ((hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) ||
 	    (hif_drv->hif_state == HOST_IF_CONNECTED) ||
 	    hif_drv->usr_scan_req.scan_result) {
 		if (!pstrRcvdGnrlAsyncInfo->buffer ||
 		    !hif_drv->usr_conn_req.conn_result) {
-			PRINT_ER("driver is null\n");
+			netdev_err(vif->ndev, "driver is null\n");
 			return -EINVAL;
 		}
 
 		u8MsgType = pstrRcvdGnrlAsyncInfo->buffer[0];
 
 		if ('I' != u8MsgType) {
-			PRINT_ER("Received Message format incorrect.\n");
+			netdev_err(vif->ndev, "Received Message incorrect.\n");
 			return -EFAULT;
 		}
 
@@ -1522,14 +1442,11 @@
 		u8MacStatus  = pstrRcvdGnrlAsyncInfo->buffer[7];
 		u8MacStatusReasonCode = pstrRcvdGnrlAsyncInfo->buffer[8];
 		u8MacStatusAdditionalInfo = pstrRcvdGnrlAsyncInfo->buffer[9];
-		PRINT_INFO(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Info = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo);
 		if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
 			u32 u32RcvdAssocRespInfoLen = 0;
-			tstrConnectRespInfo *pstrConnectRespInfo = NULL;
+			struct connect_resp_info *pstrConnectRespInfo = NULL;
 
-			PRINT_D(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Code = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo);
-
-			memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+			memset(&strConnectInfo, 0, sizeof(struct connect_info));
 
 			if (u8MacStatus == MAC_CONNECTED) {
 				memset(rcv_assoc_resp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
@@ -1539,59 +1456,54 @@
 							    MAX_ASSOC_RESP_FRAME_SIZE,
 							    &u32RcvdAssocRespInfoLen);
 
-				PRINT_INFO(HOSTINF_DBG, "Received association response with length = %d\n", u32RcvdAssocRespInfoLen);
-
 				if (u32RcvdAssocRespInfoLen != 0) {
-					PRINT_D(HOSTINF_DBG, "Parsing association response\n");
 					s32Err = wilc_parse_assoc_resp_info(rcv_assoc_resp, u32RcvdAssocRespInfoLen,
 								    &pstrConnectRespInfo);
 					if (s32Err) {
-						PRINT_ER("wilc_parse_assoc_resp_info() returned error %d\n", s32Err);
+						netdev_err(vif->ndev, "wilc_parse_assoc_resp_info() returned error %d\n", s32Err);
 					} else {
-						strConnectInfo.u16ConnectStatus = pstrConnectRespInfo->u16ConnectStatus;
+						strConnectInfo.status = pstrConnectRespInfo->status;
 
-						if (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE) {
-							PRINT_INFO(HOSTINF_DBG, "Association response received : Successful connection status\n");
-							if (pstrConnectRespInfo->pu8RespIEs) {
-								strConnectInfo.u16RespIEsLen = pstrConnectRespInfo->u16RespIEsLen;
-								strConnectInfo.pu8RespIEs = kmalloc(pstrConnectRespInfo->u16RespIEsLen, GFP_KERNEL);
-								memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs,
-									    pstrConnectRespInfo->u16RespIEsLen);
+						if (strConnectInfo.status == SUCCESSFUL_STATUSCODE) {
+							if (pstrConnectRespInfo->ies) {
+								strConnectInfo.resp_ies_len = pstrConnectRespInfo->ies_len;
+								strConnectInfo.resp_ies = kmalloc(pstrConnectRespInfo->ies_len, GFP_KERNEL);
+								memcpy(strConnectInfo.resp_ies, pstrConnectRespInfo->ies,
+								       pstrConnectRespInfo->ies_len);
 							}
 						}
 
 						if (pstrConnectRespInfo) {
-							wilc_dealloc_assoc_resp_info(pstrConnectRespInfo);
-							pstrConnectRespInfo = NULL;
+							kfree(pstrConnectRespInfo->ies);
+							kfree(pstrConnectRespInfo);
 						}
 					}
 				}
 			}
 
 			if ((u8MacStatus == MAC_CONNECTED) &&
-			    (strConnectInfo.u16ConnectStatus != SUCCESSFUL_STATUSCODE))	{
-				PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
+			    (strConnectInfo.status != SUCCESSFUL_STATUSCODE))	{
+				netdev_err(vif->ndev, "Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
 				eth_zero_addr(wilc_connected_ssid);
 			} else if (u8MacStatus == MAC_DISCONNECTED)    {
-				PRINT_ER("Received MAC status is MAC_DISCONNECTED\n");
+				netdev_err(vif->ndev, "Received MAC status is MAC_DISCONNECTED\n");
 				eth_zero_addr(wilc_connected_ssid);
 			}
 
-			if (hif_drv->usr_conn_req.pu8bssid) {
-				PRINT_D(HOSTINF_DBG, "Retrieving actual BSSID from AP\n");
-				memcpy(strConnectInfo.au8bssid, hif_drv->usr_conn_req.pu8bssid, 6);
+			if (hif_drv->usr_conn_req.bssid) {
+				memcpy(strConnectInfo.bssid, hif_drv->usr_conn_req.bssid, 6);
 
 				if ((u8MacStatus == MAC_CONNECTED) &&
-				    (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE))	{
+				    (strConnectInfo.status == SUCCESSFUL_STATUSCODE))	{
 					memcpy(hif_drv->assoc_bssid,
-					       hif_drv->usr_conn_req.pu8bssid, ETH_ALEN);
+					       hif_drv->usr_conn_req.bssid, ETH_ALEN);
 				}
 			}
 
 			if (hif_drv->usr_conn_req.ies) {
-				strConnectInfo.ReqIEsLen = hif_drv->usr_conn_req.ies_len;
-				strConnectInfo.pu8ReqIEs = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
-				memcpy(strConnectInfo.pu8ReqIEs,
+				strConnectInfo.req_ies_len = hif_drv->usr_conn_req.ies_len;
+				strConnectInfo.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
+				memcpy(strConnectInfo.req_ies,
 				       hif_drv->usr_conn_req.ies,
 				       hif_drv->usr_conn_req.ies_len);
 			}
@@ -1604,48 +1516,42 @@
 							  hif_drv->usr_conn_req.arg);
 
 			if ((u8MacStatus == MAC_CONNECTED) &&
-			    (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE))	{
+			    (strConnectInfo.status == SUCCESSFUL_STATUSCODE))	{
 				wilc_set_power_mgmt(vif, 0, 0);
 
-				PRINT_D(HOSTINF_DBG, "MAC status : CONNECTED and Connect Status : Successful\n");
 				hif_drv->hif_state = HOST_IF_CONNECTED;
 
-				PRINT_D(GENERIC_DBG, "Obtaining an IP, Disable Scan\n");
 				wilc_optaining_ip = true;
 				mod_timer(&wilc_during_ip_timer,
 					  jiffies + msecs_to_jiffies(10000));
 			} else {
-				PRINT_D(HOSTINF_DBG, "MAC status : %d and Connect Status : %d\n", u8MacStatus, strConnectInfo.u16ConnectStatus);
 				hif_drv->hif_state = HOST_IF_IDLE;
 				scan_while_connected = false;
 			}
 
-			kfree(strConnectInfo.pu8RespIEs);
-			strConnectInfo.pu8RespIEs = NULL;
+			kfree(strConnectInfo.resp_ies);
+			strConnectInfo.resp_ies = NULL;
 
-			kfree(strConnectInfo.pu8ReqIEs);
-			strConnectInfo.pu8ReqIEs = NULL;
+			kfree(strConnectInfo.req_ies);
+			strConnectInfo.req_ies = NULL;
 			hif_drv->usr_conn_req.ssid_len = 0;
-			kfree(hif_drv->usr_conn_req.pu8ssid);
-			hif_drv->usr_conn_req.pu8ssid = NULL;
-			kfree(hif_drv->usr_conn_req.pu8bssid);
-			hif_drv->usr_conn_req.pu8bssid = NULL;
+			kfree(hif_drv->usr_conn_req.ssid);
+			hif_drv->usr_conn_req.ssid = NULL;
+			kfree(hif_drv->usr_conn_req.bssid);
+			hif_drv->usr_conn_req.bssid = NULL;
 			hif_drv->usr_conn_req.ies_len = 0;
 			kfree(hif_drv->usr_conn_req.ies);
 			hif_drv->usr_conn_req.ies = NULL;
 		} else if ((u8MacStatus == MAC_DISCONNECTED) &&
 			   (hif_drv->hif_state == HOST_IF_CONNECTED)) {
-			PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW\n");
-
-			memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+			memset(&strDisconnectNotifInfo, 0, sizeof(struct disconnect_info));
 
 			if (hif_drv->usr_scan_req.scan_result) {
-				PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >>\n\n");
 				del_timer(&hif_drv->scan_timer);
 				Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
 			}
 
-			strDisconnectNotifInfo.u16reason = 0;
+			strDisconnectNotifInfo.reason = 0;
 			strDisconnectNotifInfo.ie = NULL;
 			strDisconnectNotifInfo.ie_len = 0;
 
@@ -1659,16 +1565,16 @@
 								  &strDisconnectNotifInfo,
 								  hif_drv->usr_conn_req.arg);
 			} else {
-				PRINT_ER("Connect result callback function is NULL\n");
+				netdev_err(vif->ndev, "Connect result NULL\n");
 			}
 
 			eth_zero_addr(hif_drv->assoc_bssid);
 
 			hif_drv->usr_conn_req.ssid_len = 0;
-			kfree(hif_drv->usr_conn_req.pu8ssid);
-			hif_drv->usr_conn_req.pu8ssid = NULL;
-			kfree(hif_drv->usr_conn_req.pu8bssid);
-			hif_drv->usr_conn_req.pu8bssid = NULL;
+			kfree(hif_drv->usr_conn_req.ssid);
+			hif_drv->usr_conn_req.ssid = NULL;
+			kfree(hif_drv->usr_conn_req.bssid);
+			hif_drv->usr_conn_req.bssid = NULL;
 			hif_drv->usr_conn_req.ies_len = 0;
 			kfree(hif_drv->usr_conn_req.ies);
 			hif_drv->usr_conn_req.ies = NULL;
@@ -1688,9 +1594,6 @@
 
 		} else if ((u8MacStatus == MAC_DISCONNECTED) &&
 			   (hif_drv->usr_scan_req.scan_result)) {
-			PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW while scanning\n");
-			PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >>\n\n");
-
 			del_timer(&hif_drv->scan_timer);
 			if (hif_drv->usr_scan_req.scan_result)
 				Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
@@ -1719,8 +1622,6 @@
 	case WEP:
 
 		if (pstrHostIFkeyAttr->action & ADDKEY_AP) {
-			PRINT_D(HOSTINF_DBG, "Handling WEP key\n");
-			PRINT_D(GENERIC_DBG, "ID Hostint is %d\n", pstrHostIFkeyAttr->attr.wep.index);
 			strWIDList[0].id = (u16)WID_11I_MODE;
 			strWIDList[0].type = WID_CHAR;
 			strWIDList[0].size = sizeof(char);
@@ -1731,39 +1632,32 @@
 			strWIDList[1].size = sizeof(char);
 			strWIDList[1].val = (s8 *)&pstrHostIFkeyAttr->attr.wep.auth_type;
 
-			strWIDList[2].id = (u16)WID_KEY_ID;
-			strWIDList[2].type = WID_CHAR;
-
-			strWIDList[2].val = (s8 *)&pstrHostIFkeyAttr->attr.wep.index;
-			strWIDList[2].size = sizeof(char);
-
-			pu8keybuf = kmemdup(pstrHostIFkeyAttr->attr.wep.key,
-					    pstrHostIFkeyAttr->attr.wep.key_len,
+			pu8keybuf = kmalloc(pstrHostIFkeyAttr->attr.wep.key_len + 2,
 					    GFP_KERNEL);
-
-			if (pu8keybuf == NULL) {
-				PRINT_ER("No buffer to send Key\n");
+			if (!pu8keybuf)
 				return -ENOMEM;
-			}
+
+			pu8keybuf[0] = pstrHostIFkeyAttr->attr.wep.index;
+			pu8keybuf[1] = pstrHostIFkeyAttr->attr.wep.key_len;
+
+			memcpy(&pu8keybuf[2], pstrHostIFkeyAttr->attr.wep.key,
+			       pstrHostIFkeyAttr->attr.wep.key_len);
 
 			kfree(pstrHostIFkeyAttr->attr.wep.key);
 
-			strWIDList[3].id = (u16)WID_WEP_KEY_VALUE;
-			strWIDList[3].type = WID_STR;
-			strWIDList[3].size = pstrHostIFkeyAttr->attr.wep.key_len;
-			strWIDList[3].val = (s8 *)pu8keybuf;
+			strWIDList[2].id = (u16)WID_WEP_KEY_VALUE;
+			strWIDList[2].type = WID_STR;
+			strWIDList[2].size = pstrHostIFkeyAttr->attr.wep.key_len + 2;
+			strWIDList[2].val = (s8 *)pu8keybuf;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						strWIDList, 4,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      strWIDList, 3,
+						      wilc_get_vif_idx(vif));
 			kfree(pu8keybuf);
 		} else if (pstrHostIFkeyAttr->action & ADDKEY) {
-			PRINT_D(HOSTINF_DBG, "Handling WEP key\n");
 			pu8keybuf = kmalloc(pstrHostIFkeyAttr->attr.wep.key_len + 2, GFP_KERNEL);
-			if (!pu8keybuf) {
-				PRINT_ER("No buffer to send Key\n");
+			if (!pu8keybuf)
 				return -ENOMEM;
-			}
 			pu8keybuf[0] = pstrHostIFkeyAttr->attr.wep.index;
 			memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->attr.wep.key_len, 1);
 			memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->attr.wep.key,
@@ -1775,12 +1669,11 @@
 			wid.val = (s8 *)pu8keybuf;
 			wid.size = pstrHostIFkeyAttr->attr.wep.key_len + 2;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						&wid, 1,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      &wid, 1,
+						      wilc_get_vif_idx(vif));
 			kfree(pu8keybuf);
 		} else if (pstrHostIFkeyAttr->action & REMOVEKEY) {
-			PRINT_D(HOSTINF_DBG, "Removing key\n");
 			wid.id = (u16)WID_REMOVE_WEP_KEY;
 			wid.type = WID_STR;
 
@@ -1788,20 +1681,18 @@
 			wid.val = s8idxarray;
 			wid.size = 1;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						&wid, 1,
-						wilc_get_vif_idx(vif));
-		} else {
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      &wid, 1,
+						      wilc_get_vif_idx(vif));
+		} else if (pstrHostIFkeyAttr->action & DEFAULTKEY) {
 			wid.id = (u16)WID_KEY_ID;
 			wid.type = WID_CHAR;
 			wid.val = (s8 *)&pstrHostIFkeyAttr->attr.wep.index;
 			wid.size = sizeof(char);
 
-			PRINT_D(HOSTINF_DBG, "Setting default key index\n");
-
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						&wid, 1,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      &wid, 1,
+						      wilc_get_vif_idx(vif));
 		}
 		up(&hif_drv->sem_test_key_block);
 		break;
@@ -1810,7 +1701,6 @@
 		if (pstrHostIFkeyAttr->action & ADDKEY_AP) {
 			pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
 			if (!pu8keybuf) {
-				PRINT_ER("No buffer to send RxGTK Key\n");
 				ret = -ENOMEM;
 				goto _WPARxGtk_end_case_;
 			}
@@ -1833,18 +1723,15 @@
 			strWIDList[1].val = (s8 *)pu8keybuf;
 			strWIDList[1].size = RX_MIC_KEY_MSG_LEN;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						strWIDList, 2,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      strWIDList, 2,
+						      wilc_get_vif_idx(vif));
 
 			kfree(pu8keybuf);
 			up(&hif_drv->sem_test_key_block);
 		} else if (pstrHostIFkeyAttr->action & ADDKEY) {
-			PRINT_D(HOSTINF_DBG, "Handling group key(Rx) function\n");
-
 			pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
 			if (pu8keybuf == NULL) {
-				PRINT_ER("No buffer to send RxGTK Key\n");
 				ret = -ENOMEM;
 				goto _WPARxGtk_end_case_;
 			}
@@ -1852,7 +1739,7 @@
 			if (hif_drv->hif_state == HOST_IF_CONNECTED)
 				memcpy(pu8keybuf, hif_drv->assoc_bssid, ETH_ALEN);
 			else
-				PRINT_ER("Couldn't handle WPARxGtk while state is not HOST_IF_CONNECTED\n");
+				netdev_err(vif->ndev, "Couldn't handle\n");
 
 			memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->attr.wpa.seq, 8);
 			memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->attr.wpa.index, 1);
@@ -1865,9 +1752,9 @@
 			wid.val = (s8 *)pu8keybuf;
 			wid.size = RX_MIC_KEY_MSG_LEN;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						&wid, 1,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      &wid, 1,
+						      wilc_get_vif_idx(vif));
 
 			kfree(pu8keybuf);
 			up(&hif_drv->sem_test_key_block);
@@ -1884,7 +1771,6 @@
 		if (pstrHostIFkeyAttr->action & ADDKEY_AP) {
 			pu8keybuf = kmalloc(PTK_KEY_MSG_LEN + 1, GFP_KERNEL);
 			if (!pu8keybuf) {
-				PRINT_ER("No buffer to send PTK Key\n");
 				ret = -ENOMEM;
 				goto _WPAPtk_end_case_;
 			}
@@ -1905,15 +1791,15 @@
 			strWIDList[1].val = (s8 *)pu8keybuf;
 			strWIDList[1].size = PTK_KEY_MSG_LEN + 1;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						strWIDList, 2,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      strWIDList, 2,
+						      wilc_get_vif_idx(vif));
 			kfree(pu8keybuf);
 			up(&hif_drv->sem_test_key_block);
 		} else if (pstrHostIFkeyAttr->action & ADDKEY) {
 			pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
 			if (!pu8keybuf) {
-				PRINT_ER("No buffer to send PTK Key\n");
+				netdev_err(vif->ndev, "No buffer send PTK\n");
 				ret = -ENOMEM;
 				goto _WPAPtk_end_case_;
 			}
@@ -1928,9 +1814,9 @@
 			wid.val = (s8 *)pu8keybuf;
 			wid.size = PTK_KEY_MSG_LEN;
 
-			result = wilc_send_config_pkt(vif->wilc, SET_CFG,
-						&wid, 1,
-						wilc_get_vif_idx(vif));
+			result = wilc_send_config_pkt(vif, SET_CFG,
+						      &wid, 1,
+						      wilc_get_vif_idx(vif));
 			kfree(pu8keybuf);
 			up(&hif_drv->sem_test_key_block);
 		}
@@ -1943,12 +1829,9 @@
 		break;
 
 	case PMKSA:
-
-		PRINT_D(HOSTINF_DBG, "Handling PMKSA key\n");
-
 		pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
 		if (!pu8keybuf) {
-			PRINT_ER("No buffer to send PMKSA Key\n");
+			netdev_err(vif->ndev, "No buffer to send PMKSA Key\n");
 			return -ENOMEM;
 		}
 
@@ -1964,15 +1847,15 @@
 		wid.val = (s8 *)pu8keybuf;
 		wid.size = (pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1;
 
-		result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-					 wilc_get_vif_idx(vif));
+		result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+					      wilc_get_vif_idx(vif));
 
 		kfree(pu8keybuf);
 		break;
 	}
 
 	if (result)
-		PRINT_ER("Failed to send key config packet\n");
+		netdev_err(vif->ndev, "Failed to send key config packet\n");
 
 	return result;
 }
@@ -1990,24 +1873,22 @@
 	wid.val = (s8 *)&u16DummyReasonCode;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Sending disconnect request\n");
-
 	wilc_optaining_ip = false;
 	wilc_set_power_mgmt(vif, 0, 0);
 
 	eth_zero_addr(wilc_connected_ssid);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	if (result) {
-		PRINT_ER("Failed to send dissconect config packet\n");
+		netdev_err(vif->ndev, "Failed to send dissconect\n");
 	} else {
-		tstrDisconnectNotifInfo strDisconnectNotifInfo;
+		struct disconnect_info strDisconnectNotifInfo;
 
-		memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+		memset(&strDisconnectNotifInfo, 0, sizeof(struct disconnect_info));
 
-		strDisconnectNotifInfo.u16reason = 0;
+		strDisconnectNotifInfo.reason = 0;
 		strDisconnectNotifInfo.ie = NULL;
 		strDisconnectNotifInfo.ie_len = 0;
 
@@ -2021,10 +1902,8 @@
 		}
 
 		if (hif_drv->usr_conn_req.conn_result) {
-			if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
-				PRINT_D(HOSTINF_DBG, "Upper layer requested termination of connection\n");
+			if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
 				del_timer(&hif_drv->connect_timer);
-			}
 
 			hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
 							  NULL,
@@ -2032,7 +1911,7 @@
 							  &strDisconnectNotifInfo,
 							  hif_drv->usr_conn_req.arg);
 		} else {
-			PRINT_ER("usr_conn_req.conn_result = NULL\n");
+			netdev_err(vif->ndev, "conn_result = NULL\n");
 		}
 
 		scan_while_connected = false;
@@ -2042,10 +1921,10 @@
 		eth_zero_addr(hif_drv->assoc_bssid);
 
 		hif_drv->usr_conn_req.ssid_len = 0;
-		kfree(hif_drv->usr_conn_req.pu8ssid);
-		hif_drv->usr_conn_req.pu8ssid = NULL;
-		kfree(hif_drv->usr_conn_req.pu8bssid);
-		hif_drv->usr_conn_req.pu8bssid = NULL;
+		kfree(hif_drv->usr_conn_req.ssid);
+		hif_drv->usr_conn_req.ssid = NULL;
+		kfree(hif_drv->usr_conn_req.bssid);
+		hif_drv->usr_conn_req.bssid = NULL;
 		hif_drv->usr_conn_req.ies_len = 0;
 		kfree(hif_drv->usr_conn_req.ies);
 		hif_drv->usr_conn_req.ies = NULL;
@@ -2069,35 +1948,28 @@
 	if (!vif->hif_drv)
 		return;
 	if ((vif->hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) ||
-	    (vif->hif_drv->hif_state == HOST_IF_CONNECTING)) {
-		PRINT_D(HOSTINF_DBG, "\n\n<< correcting Supplicant state machine >>\n\n");
+	    (vif->hif_drv->hif_state == HOST_IF_CONNECTING))
 		wilc_disconnect(vif, 1);
-	}
 }
 
 static s32 Handle_GetChnl(struct wilc_vif *vif)
 {
 	s32 result = 0;
 	struct wid wid;
-	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	wid.id = (u16)WID_CURRENT_CHANNEL;
 	wid.type = WID_CHAR;
 	wid.val = (s8 *)&ch_no;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Getting channel value\n");
-
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	if (result) {
-		PRINT_ER("Failed to get channel number\n");
+		netdev_err(vif->ndev, "Failed to get channel number\n");
 		result = -EFAULT;
 	}
 
-	up(&hif_drv->sem_get_chnl);
-
 	return result;
 }
 
@@ -2111,12 +1983,10 @@
 	wid.val = &rssi;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Getting RSSI value\n");
-
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result) {
-		PRINT_ER("Failed to get RSSI value\n");
+		netdev_err(vif->ndev, "Failed to get RSSI value\n");
 		result = -EFAULT;
 	}
 
@@ -2127,7 +1997,6 @@
 {
 	s32 result = 0;
 	struct wid wid;
-	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	link_speed = 0;
 
@@ -2136,16 +2005,13 @@
 	wid.val = &link_speed;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Getting LINKSPEED value\n");
-
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result) {
-		PRINT_ER("Failed to get LINKSPEED value\n");
+		netdev_err(vif->ndev, "Failed to get LINKSPEED value\n");
 		result = -EFAULT;
 	}
 
-	up(&hif_drv->sem_get_link_speed);
 }
 
 static s32 Handle_GetStatistics(struct wilc_vif *vif,
@@ -2184,14 +2050,21 @@
 	strWIDList[u32WidsCount].val = (s8 *)&pstrStatistics->tx_fail_cnt;
 	u32WidsCount++;
 
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, strWIDList,
-				u32WidsCount,
-				wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, strWIDList,
+				      u32WidsCount,
+				      wilc_get_vif_idx(vif));
 
 	if (result)
-		PRINT_ER("Failed to send scan paramters config packet\n");
+		netdev_err(vif->ndev, "Failed to send scan parameters\n");
 
-	up(&hif_sema_wait_response);
+	if (pstrStatistics->link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH &&
+	    pstrStatistics->link_speed != DEFAULT_LINK_SPEED)
+		wilc_enable_tcp_ack_filter(true);
+	else if (pstrStatistics->link_speed != DEFAULT_LINK_SPEED)
+		wilc_enable_tcp_ack_filter(false);
+
+	if (pstrStatistics != &vif->wilc->dummy_statistics)
+		up(&hif_sema_wait_response);
 	return 0;
 }
 
@@ -2211,13 +2084,11 @@
 	stamac = wid.val;
 	memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
 
-	PRINT_D(CFG80211_DBG, "SETING STA inactive time\n");
-
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	if (result) {
-		PRINT_ER("Failed to SET incative time\n");
+		netdev_err(vif->ndev, "Failed to SET incative time\n");
 		return -EFAULT;
 	}
 
@@ -2226,16 +2097,14 @@
 	wid.val = (s8 *)&inactive_time;
 	wid.size = sizeof(u32);
 
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 
 	if (result) {
-		PRINT_ER("Failed to get incative time\n");
+		netdev_err(vif->ndev, "Failed to get incative time\n");
 		return -EFAULT;
 	}
 
-	PRINT_D(CFG80211_DBG, "Getting inactive time : %d\n", inactive_time);
-
 	up(&hif_drv->sem_inactive_time);
 
 	return result;
@@ -2248,8 +2117,6 @@
 	struct wid wid;
 	u8 *pu8CurrByte;
 
-	PRINT_D(HOSTINF_DBG, "Adding BEACON\n");
-
 	wid.id = (u16)WID_ADD_BEACON;
 	wid.type = WID_BIN;
 	wid.size = pstrSetBeaconParam->head_len + pstrSetBeaconParam->tail_len + 16;
@@ -2285,10 +2152,10 @@
 		memcpy(pu8CurrByte, pstrSetBeaconParam->tail, pstrSetBeaconParam->tail_len);
 	pu8CurrByte += pstrSetBeaconParam->tail_len;
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send add beacon config packet\n");
+		netdev_err(vif->ndev, "Failed to send add beacon\n");
 
 ERRORHANDLER:
 	kfree(wid.val);
@@ -2312,12 +2179,10 @@
 
 	pu8CurrByte = wid.val;
 
-	PRINT_D(HOSTINF_DBG, "Deleting BEACON\n");
-
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send delete beacon config packet\n");
+		netdev_err(vif->ndev, "Failed to send delete beacon\n");
 }
 
 static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer,
@@ -2327,7 +2192,6 @@
 
 	pu8CurrByte = pu8Buffer;
 
-	PRINT_D(HOSTINF_DBG, "Packing STA params\n");
 	memcpy(pu8CurrByte, pstrStationParam->bssid, ETH_ALEN);
 	pu8CurrByte +=  ETH_ALEN;
 
@@ -2375,7 +2239,6 @@
 	struct wid wid;
 	u8 *pu8CurrByte;
 
-	PRINT_D(HOSTINF_DBG, "Handling add station\n");
 	wid.id = (u16)WID_ADD_STA;
 	wid.type = WID_BIN;
 	wid.size = WILC_ADD_STA_LENGTH + pstrStationParam->rates_len;
@@ -2387,10 +2250,10 @@
 	pu8CurrByte = wid.val;
 	pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result != 0)
-		PRINT_ER("Failed to send add station config packet\n");
+		netdev_err(vif->ndev, "Failed to send add station\n");
 
 ERRORHANDLER:
 	kfree(pstrStationParam->rates);
@@ -2410,8 +2273,6 @@
 	wid.type = WID_STR;
 	wid.size = (pstrDelAllStaParam->assoc_sta * ETH_ALEN) + 1;
 
-	PRINT_D(HOSTINF_DBG, "Handling delete station\n");
-
 	wid.val = kmalloc((pstrDelAllStaParam->assoc_sta * ETH_ALEN) + 1, GFP_KERNEL);
 	if (!wid.val)
 		goto ERRORHANDLER;
@@ -2429,10 +2290,10 @@
 		pu8CurrByte += ETH_ALEN;
 	}
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send add station config packet\n");
+		netdev_err(vif->ndev, "Failed to send add station\n");
 
 ERRORHANDLER:
 	kfree(wid.val);
@@ -2451,8 +2312,6 @@
 	wid.type = WID_BIN;
 	wid.size = ETH_ALEN;
 
-	PRINT_D(HOSTINF_DBG, "Handling delete station\n");
-
 	wid.val = kmalloc(wid.size, GFP_KERNEL);
 	if (!wid.val)
 		goto ERRORHANDLER;
@@ -2461,10 +2320,10 @@
 
 	memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send add station config packet\n");
+		netdev_err(vif->ndev, "Failed to send add station\n");
 
 ERRORHANDLER:
 	kfree(wid.val);
@@ -2481,7 +2340,6 @@
 	wid.type = WID_BIN;
 	wid.size = WILC_ADD_STA_LENGTH + pstrStationParam->rates_len;
 
-	PRINT_D(HOSTINF_DBG, "Handling edit station\n");
 	wid.val = kmalloc(wid.size, GFP_KERNEL);
 	if (!wid.val)
 		goto ERRORHANDLER;
@@ -2489,10 +2347,10 @@
 	pu8CurrByte = wid.val;
 	pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send edit station config packet\n");
+		netdev_err(vif->ndev, "Failed to send edit station\n");
 
 ERRORHANDLER:
 	kfree(pstrStationParam->rates);
@@ -2518,26 +2376,20 @@
 	}
 
 	if (hif_drv->usr_scan_req.scan_result) {
-		PRINT_INFO(GENERIC_DBG, "Required to remain on chan while scanning return\n");
 		hif_drv->remain_on_ch_pending = 1;
 		result = -EBUSY;
 		goto ERRORHANDLER;
 	}
 	if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
-		PRINT_INFO(GENERIC_DBG, "Required to remain on chan while connecting return\n");
 		result = -EBUSY;
 		goto ERRORHANDLER;
 	}
 
 	if (wilc_optaining_ip || wilc_connecting) {
-		PRINT_D(GENERIC_DBG, "[handle_scan]: Don't do obss scan until IP adresss is obtained\n");
 		result = -EBUSY;
 		goto ERRORHANDLER;
 	}
 
-	PRINT_D(HOSTINF_DBG, "Setting channel :%d\n",
-		pstrHostIfRemainOnChan->ch);
-
 	u8remain_on_chan_flag = true;
 	wid.id = (u16)WID_REMAIN_ON_CHAN;
 	wid.type = WID_STR;
@@ -2551,10 +2403,10 @@
 	wid.val[0] = u8remain_on_chan_flag;
 	wid.val[1] = (s8)pstrHostIfRemainOnChan->ch;
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result != 0)
-		PRINT_ER("Failed to set remain on channel\n");
+		netdev_err(vif->ndev, "Failed to set remain on channel\n");
 
 ERRORHANDLER:
 	{
@@ -2562,7 +2414,7 @@
 		hif_drv->remain_on_ch_timer.data = (unsigned long)vif;
 		mod_timer(&hif_drv->remain_on_ch_timer,
 			  jiffies +
-			  msecs_to_jiffies(pstrHostIfRemainOnChan->u32duration));
+			  msecs_to_jiffies(pstrHostIfRemainOnChan->duration));
 
 		if (hif_drv->remain_on_ch.ready)
 			hif_drv->remain_on_ch.ready(hif_drv->remain_on_ch.arg);
@@ -2581,10 +2433,6 @@
 	struct wid wid;
 	u8 *pu8CurrByte;
 
-	PRINT_D(HOSTINF_DBG, "Handling frame register : %d FrameType: %d\n",
-		pstrHostIfRegisterFrame->reg,
-		pstrHostIfRegisterFrame->frame_type);
-
 	wid.id = (u16)WID_REGISTER_FRAME;
 	wid.type = WID_STR;
 	wid.val = kmalloc(sizeof(u16) + 2, GFP_KERNEL);
@@ -2599,10 +2447,10 @@
 
 	wid.size = sizeof(u16) + 2;
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result) {
-		PRINT_ER("Failed to frame register config packet\n");
+		netdev_err(vif->ndev, "Failed to frame register\n");
 		result = -EINVAL;
 	}
 
@@ -2617,8 +2465,6 @@
 	s32 result = 0;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	PRINT_D(HOSTINF_DBG, "CANCEL REMAIN ON CHAN\n");
-
 	if (P2P_LISTEN_STATE) {
 		u8remain_on_chan_flag = false;
 		wid.id = (u16)WID_REMAIN_ON_CHAN;
@@ -2627,17 +2473,17 @@
 		wid.val = kmalloc(wid.size, GFP_KERNEL);
 
 		if (!wid.val) {
-			PRINT_ER("Failed to allocate memory\n");
+			netdev_err(vif->ndev, "Failed to allocate memory\n");
 			return -ENOMEM;
 		}
 
 		wid.val[0] = u8remain_on_chan_flag;
 		wid.val[1] = FALSE_FRMWR_CHANNEL;
 
-		result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-					 wilc_get_vif_idx(vif));
+		result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+					      wilc_get_vif_idx(vif));
 		if (result != 0) {
-			PRINT_ER("Failed to set remain on channel\n");
+			netdev_err(vif->ndev, "Failed to set remain channel\n");
 			goto _done_;
 		}
 
@@ -2647,7 +2493,7 @@
 		}
 		P2P_LISTEN_STATE = 0;
 	} else {
-		PRINT_D(GENERIC_DBG, "Not in listen state\n");
+		netdev_dbg(vif->ndev, "Not in listen state\n");
 		result = -EFAULT;
 	}
 
@@ -2670,7 +2516,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 }
 
 static void Handle_PowerManagement(struct wilc_vif *vif,
@@ -2686,16 +2532,14 @@
 		s8PowerMode = MIN_FAST_PS;
 	else
 		s8PowerMode = NO_POWERSAVE;
-	PRINT_D(HOSTINF_DBG, "Handling power mgmt to %d\n", s8PowerMode);
+
 	wid.val = &s8PowerMode;
 	wid.size = sizeof(char);
 
-	PRINT_D(HOSTINF_DBG, "Handling Power Management\n");
-
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send power management config packet\n");
+		netdev_err(vif->ndev, "Failed to send power management\n");
 }
 
 static void Handle_SetMulticastFilter(struct wilc_vif *vif,
@@ -2705,8 +2549,6 @@
 	struct wid wid;
 	u8 *pu8CurrByte;
 
-	PRINT_D(HOSTINF_DBG, "Setup Multicast Filter\n");
-
 	wid.id = (u16)WID_SETUP_MULTICAST_FILTER;
 	wid.type = WID_BIN;
 	wid.size = sizeof(struct set_multicast) + ((strHostIfSetMulti->cnt) * ETH_ALEN);
@@ -2729,59 +2571,54 @@
 		memcpy(pu8CurrByte, wilc_multicast_mac_addr_list,
 		       ((strHostIfSetMulti->cnt) * ETH_ALEN));
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result)
-		PRINT_ER("Failed to send setup multicast config packet\n");
+		netdev_err(vif->ndev, "Failed to send setup multicast\n");
 
 ERRORHANDLER:
 	kfree(wid.val);
 }
 
-static s32 Handle_DelAllRxBASessions(struct wilc_vif *vif,
-				     struct ba_session_info *strHostIfBASessionInfo)
+static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
 {
-	s32 result = 0;
+	int ret;
 	struct wid wid;
-	char *ptr = NULL;
 
-	PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n",
-		strHostIfBASessionInfo->bssid[0],
-		strHostIfBASessionInfo->bssid[1],
-		strHostIfBASessionInfo->bssid[2],
-		strHostIfBASessionInfo->tid);
+	wid.id = (u16)WID_TX_POWER;
+	wid.type = WID_CHAR;
+	wid.val = &tx_pwr;
+	wid.size = sizeof(char);
 
-	wid.id = (u16)WID_DEL_ALL_RX_BA;
-	wid.type = WID_STR;
-	wid.val = kmalloc(BLOCK_ACK_REQ_SIZE, GFP_KERNEL);
-	wid.size = BLOCK_ACK_REQ_SIZE;
-	ptr = wid.val;
-	*ptr++ = 0x14;
-	*ptr++ = 0x3;
-	*ptr++ = 0x2;
-	memcpy(ptr, strHostIfBASessionInfo->bssid, ETH_ALEN);
-	ptr += ETH_ALEN;
-	*ptr++ = strHostIfBASessionInfo->tid;
-	*ptr++ = 0;
-	*ptr++ = 32;
+	ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+				   wilc_get_vif_idx(vif));
+	if (ret)
+		netdev_err(vif->ndev, "Failed to set TX PWR\n");
+}
 
-	result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
-	if (result)
-		PRINT_D(HOSTINF_DBG, "Couldn't delete BA Session\n");
+static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
+{
+	s32 ret = 0;
+	struct wid wid;
 
-	kfree(wid.val);
+	wid.id = (u16)WID_TX_POWER;
+	wid.type = WID_CHAR;
+	wid.val = (s8 *)tx_pwr;
+	wid.size = sizeof(char);
+
+	ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				   wilc_get_vif_idx(vif));
+	if (ret)
+		netdev_err(vif->ndev, "Failed to get TX PWR\n");
 
 	up(&hif_sema_wait_response);
-
-	return result;
 }
 
 static int hostIFthread(void *pvArg)
 {
 	u32 u32Ret;
 	struct host_if_msg msg;
-	struct wilc *wilc = (struct wilc*)pvArg;
+	struct wilc *wilc = pvArg;
 	struct wilc_vif *vif;
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
@@ -2789,13 +2626,10 @@
 	while (1) {
 		wilc_mq_recv(&hif_msg_q, &msg, sizeof(struct host_if_msg), &u32Ret);
 		vif = msg.vif;
-		if (msg.id == HOST_IF_MSG_EXIT) {
-			PRINT_D(GENERIC_DBG, "THREAD: Exiting HostIfThread\n");
+		if (msg.id == HOST_IF_MSG_EXIT)
 			break;
-		}
 
 		if ((!wilc_initialized)) {
-			PRINT_D(GENERIC_DBG, "--WAIT--");
 			usleep_range(200 * 1000, 200 * 1000);
 			wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 			continue;
@@ -2803,7 +2637,6 @@
 
 		if (msg.id == HOST_IF_MSG_CONNECT &&
 		    vif->hif_drv->usr_scan_req.scan_result) {
-			PRINT_D(HOSTINF_DBG, "Requeue connect request till scan done received\n");
 			wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 			usleep_range(2 * 1000, 2 * 1000);
 			continue;
@@ -2853,7 +2686,6 @@
 
 		case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
 			del_timer(&vif->hif_drv->scan_timer);
-			PRINT_D(HOSTINF_DBG, "scan completed successfully\n");
 
 			if (!wilc_wlan_get_num_conn_ifcs(wilc))
 				wilc_chip_sleep_manually(wilc);
@@ -2908,13 +2740,11 @@
 			break;
 
 		case HOST_IF_MSG_SCAN_TIMER_FIRED:
-			PRINT_D(HOSTINF_DBG, "Scan Timeout\n");
 
 			Handle_ScanDone(msg.vif, SCAN_EVENT_ABORTED);
 			break;
 
 		case HOST_IF_MSG_CONNECT_TIMER_FIRED:
-			PRINT_D(HOSTINF_DBG, "Connect Timeout\n");
 			Handle_ConnectTimeout(msg.vif);
 			break;
 
@@ -2932,14 +2762,12 @@
 			break;
 
 		case HOST_IF_MSG_SET_IPADDRESS:
-			PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_IPADDRESS\n");
 			handle_set_ip_address(vif,
 					      msg.body.ip_info.ip_addr,
 					      msg.body.ip_info.idx);
 			break;
 
 		case HOST_IF_MSG_GET_IPADDRESS:
-			PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_IPADDRESS\n");
 			handle_get_ip_address(vif, msg.body.ip_info.idx);
 			break;
 
@@ -2954,12 +2782,10 @@
 			break;
 
 		case HOST_IF_MSG_REMAIN_ON_CHAN:
-			PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_REMAIN_ON_CHAN\n");
 			Handle_RemainOnChan(msg.vif, &msg.body.remain_on_ch);
 			break;
 
 		case HOST_IF_MSG_REGISTER_FRAME:
-			PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_REGISTER_FRAME\n");
 			Handle_RegisterFrame(msg.vif, &msg.body.reg_frame);
 			break;
 
@@ -2968,25 +2794,26 @@
 			break;
 
 		case HOST_IF_MSG_SET_MULTICAST_FILTER:
-			PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_MULTICAST_FILTER\n");
 			Handle_SetMulticastFilter(msg.vif, &msg.body.multicast_info);
 			break;
 
-		case HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS:
-			Handle_DelAllRxBASessions(msg.vif, &msg.body.session_info);
-			break;
-
 		case HOST_IF_MSG_DEL_ALL_STA:
 			Handle_DelAllSta(msg.vif, &msg.body.del_all_sta_info);
 			break;
 
+		case HOST_IF_MSG_SET_TX_POWER:
+			handle_set_tx_pwr(msg.vif, msg.body.tx_power.tx_pwr);
+			break;
+
+		case HOST_IF_MSG_GET_TX_POWER:
+			handle_get_tx_pwr(msg.vif, &msg.body.tx_power.tx_pwr);
+			break;
 		default:
-			PRINT_ER("[Host Interface] undefined Received Msg ID\n");
+			netdev_err(vif->ndev, "[Host Interface] undefined\n");
 			break;
 		}
 	}
 
-	PRINT_D(HOSTINF_DBG, "Releasing thread exit semaphore\n");
 	up(&hif_sema_thread);
 	return 0;
 }
@@ -3035,7 +2862,7 @@
 
 	if (!hif_drv) {
 		result = -EFAULT;
-		PRINT_ER("Failed to send setup multicast config packet\n");
+		netdev_err(vif->ndev, "Failed to send setup multicast\n");
 		return result;
 	}
 
@@ -3049,7 +2876,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Error in sending message queue : Request to remove WEP key\n");
+		netdev_err(vif->ndev, "Request to remove WEP key\n");
 	down(&hif_drv->sem_test_key_block);
 
 	return result;
@@ -3063,7 +2890,7 @@
 
 	if (!hif_drv) {
 		result = -EFAULT;
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return result;
 	}
 
@@ -3077,7 +2904,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Error in sending message queue : Default key index\n");
+		netdev_err(vif->ndev, "Default key index\n");
 	down(&hif_drv->sem_test_key_block);
 
 	return result;
@@ -3091,7 +2918,7 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3110,7 +2937,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Error in sending message queue :WEP Key\n");
+		netdev_err(vif->ndev, "STA - WEP Key\n");
 	down(&hif_drv->sem_test_key_block);
 
 	return result;
@@ -3122,19 +2949,14 @@
 	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
-	int i;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
-	if (INFO) {
-		for (i = 0; i < len; i++)
-			PRINT_INFO(HOSTAPD_DBG, "KEY is %x\n", key[i]);
-	}
 	msg.id = HOST_IF_MSG_KEY;
 	msg.body.key_info.type = WEP;
 	msg.body.key_info.action = ADDKEY_AP;
@@ -3151,7 +2973,7 @@
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 
 	if (result)
-		PRINT_ER("Error in sending message queue :WEP Key\n");
+		netdev_err(vif->ndev, "AP - WEP Key\n");
 	down(&hif_drv->sem_test_key_block);
 
 	return result;
@@ -3165,10 +2987,9 @@
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 	u8 key_len = ptk_key_len;
-	int i;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3193,20 +3014,11 @@
 	if (!msg.body.key_info.attr.wpa.key)
 		return -ENOMEM;
 
-	if (rx_mic) {
+	if (rx_mic)
 		memcpy(msg.body.key_info.attr.wpa.key + 16, rx_mic, RX_MIC_KEY_LEN);
-		if (INFO) {
-			for (i = 0; i < RX_MIC_KEY_LEN; i++)
-				PRINT_INFO(CFG80211_DBG, "PairwiseRx[%d] = %x\n", i, rx_mic[i]);
-		}
-	}
-	if (tx_mic) {
+
+	if (tx_mic)
 		memcpy(msg.body.key_info.attr.wpa.key + 24, tx_mic, TX_MIC_KEY_LEN);
-		if (INFO) {
-			for (i = 0; i < TX_MIC_KEY_LEN; i++)
-				PRINT_INFO(CFG80211_DBG, "PairwiseTx[%d] = %x\n", i, tx_mic[i]);
-		}
-	}
 
 	msg.body.key_info.attr.wpa.key_len = key_len;
 	msg.body.key_info.attr.wpa.mac_addr = mac_addr;
@@ -3216,7 +3028,7 @@
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 
 	if (result)
-		PRINT_ER("Error in sending message queue:  PTK Key\n");
+		netdev_err(vif->ndev, "PTK Key\n");
 
 	down(&hif_drv->sem_test_key_block);
 
@@ -3234,7 +3046,7 @@
 	u8 key_len = gtk_key_len;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 	memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3284,23 +3096,23 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Error in sending message queue:  RX GTK\n");
+		netdev_err(vif->ndev, "RX GTK\n");
 
 	down(&hif_drv->sem_test_key_block);
 
 	return result;
 }
 
-s32 wilc_set_pmkid_info(struct wilc_vif *vif,
-			struct host_if_pmkid_attr *pu8PmkidInfoArray)
+int wilc_set_pmkid_info(struct wilc_vif *vif,
+			struct host_if_pmkid_attr *pmkid)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
-	u32 i;
+	int i;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3311,34 +3123,34 @@
 	msg.body.key_info.action = ADDKEY;
 	msg.vif = vif;
 
-	for (i = 0; i < pu8PmkidInfoArray->numpmkid; i++) {
+	for (i = 0; i < pmkid->numpmkid; i++) {
 		memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].bssid,
-		       &pu8PmkidInfoArray->pmkidlist[i].bssid, ETH_ALEN);
+		       &pmkid->pmkidlist[i].bssid, ETH_ALEN);
 		memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].pmkid,
-		       &pu8PmkidInfoArray->pmkidlist[i].pmkid, PMKID_LEN);
+		       &pmkid->pmkidlist[i].pmkid, PMKID_LEN);
 	}
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER(" Error in sending messagequeue: PMKID Info\n");
+		netdev_err(vif->ndev, "PMKID Info\n");
 
 	return result;
 }
 
-s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress)
+int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
 	msg.id = HOST_IF_MSG_GET_MAC_ADDRESS;
-	msg.body.get_mac_info.mac_addr = pu8MacAddress;
+	msg.body.get_mac_info.mac_addr = mac_addr;
 	msg.vif = vif;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("Failed to send get mac address\n");
+		netdev_err(vif->ndev, "Failed to send get mac address\n");
 		return -EFAULT;
 	}
 
@@ -3346,42 +3158,23 @@
 	return result;
 }
 
-s32 wilc_set_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress)
+int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
+		      size_t ssid_len, const u8 *ies, size_t ies_len,
+		      wilc_connect_result connect_result, void *user_arg,
+		      u8 security, enum AUTHTYPE auth_type,
+		      u8 channel, void *join_params)
 {
-	s32 result = 0;
-	struct host_if_msg msg;
-
-	PRINT_D(GENERIC_DBG, "mac addr = %x:%x:%x\n", pu8MacAddress[0], pu8MacAddress[1], pu8MacAddress[2]);
-
-	memset(&msg, 0, sizeof(struct host_if_msg));
-	msg.id = HOST_IF_MSG_SET_MAC_ADDRESS;
-	memcpy(msg.body.set_mac_info.mac_addr, pu8MacAddress, ETH_ALEN);
-	msg.vif = vif;
-
-	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-	if (result)
-		PRINT_ER("Failed to send message queue: Set mac address\n");
-
-	return result;
-}
-
-s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid,
-		      size_t ssidLen, const u8 *pu8IEs, size_t IEsLen,
-		      wilc_connect_result pfConnectResult, void *pvUserArg,
-		      u8 u8security, enum AUTHTYPE tenuAuth_type,
-		      u8 u8channel, void *pJoinParams)
-{
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	if (!hif_drv || !pfConnectResult) {
-		PRINT_ER("Driver is null\n");
+	if (!hif_drv || !connect_result) {
+		netdev_err(vif->ndev, "Driver is null\n");
 		return -EFAULT;
 	}
 
-	if (!pJoinParams) {
-		PRINT_ER("Unable to Join - JoinParams is NULL\n");
+	if (!join_params) {
+		netdev_err(vif->ndev, "Unable to Join - JoinParams is NULL\n");
 		return -EFAULT;
 	}
 
@@ -3389,39 +3182,39 @@
 
 	msg.id = HOST_IF_MSG_CONNECT;
 
-	msg.body.con_info.security = u8security;
-	msg.body.con_info.auth_type = tenuAuth_type;
-	msg.body.con_info.ch = u8channel;
-	msg.body.con_info.result = pfConnectResult;
-	msg.body.con_info.arg = pvUserArg;
-	msg.body.con_info.params = pJoinParams;
+	msg.body.con_info.security = security;
+	msg.body.con_info.auth_type = auth_type;
+	msg.body.con_info.ch = channel;
+	msg.body.con_info.result = connect_result;
+	msg.body.con_info.arg = user_arg;
+	msg.body.con_info.params = join_params;
 	msg.vif = vif;
 
-	if (pu8bssid) {
-		msg.body.con_info.bssid = kmalloc(6, GFP_KERNEL);
-		memcpy(msg.body.con_info.bssid, pu8bssid, 6);
+	if (bssid) {
+		msg.body.con_info.bssid = kmemdup(bssid, 6, GFP_KERNEL);
+		if (!msg.body.con_info.bssid)
+			return -ENOMEM;
 	}
 
-	if (pu8ssid) {
-		msg.body.con_info.ssid_len = ssidLen;
-		msg.body.con_info.ssid = kmalloc(ssidLen, GFP_KERNEL);
-		memcpy(msg.body.con_info.ssid, pu8ssid, ssidLen);
+	if (ssid) {
+		msg.body.con_info.ssid_len = ssid_len;
+		msg.body.con_info.ssid = kmemdup(ssid, ssid_len, GFP_KERNEL);
+		if (!msg.body.con_info.ssid)
+			return -ENOMEM;
 	}
 
-	if (pu8IEs) {
-		msg.body.con_info.ies_len = IEsLen;
-		msg.body.con_info.ies = kmalloc(IEsLen, GFP_KERNEL);
-		memcpy(msg.body.con_info.ies, pu8IEs, IEsLen);
+	if (ies) {
+		msg.body.con_info.ies_len = ies_len;
+		msg.body.con_info.ies = kmemdup(ies, ies_len, GFP_KERNEL);
+		if (!msg.body.con_info.ies)
+			return -ENOMEM;
 	}
 	if (hif_drv->hif_state < HOST_IF_CONNECTING)
 		hif_drv->hif_state = HOST_IF_CONNECTING;
-	else
-		PRINT_D(GENERIC_DBG, "Don't set state to 'connecting' : %d\n",
-			hif_drv->hif_state);
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("Failed to send message queue: Set join request\n");
+		netdev_err(vif->ndev, "send message: Set join request\n");
 		return -EFAULT;
 	}
 
@@ -3432,40 +3225,14 @@
 	return result;
 }
 
-s32 wilc_flush_join_req(struct wilc_vif *vif)
+int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
 {
-	s32 result = 0;
-	struct host_if_msg msg;
-	struct host_if_drv *hif_drv = vif->hif_drv;
-
-	if (!join_req)
-		return -EFAULT;
-
-	if (!hif_drv) {
-		PRINT_ER("Driver is null\n");
-		return -EFAULT;
-	}
-
-	msg.id = HOST_IF_MSG_FLUSH_CONNECT;
-	msg.vif = vif;
-
-	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-	if (result) {
-		PRINT_ER("Failed to send message queue: Flush join request\n");
-		return -EFAULT;
-	}
-
-	return result;
-}
-
-s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode)
-{
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("Driver is null\n");
+		netdev_err(vif->ndev, "Driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3476,7 +3243,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Failed to send message queue: disconnect\n");
+		netdev_err(vif->ndev, "Failed to send message: disconnect\n");
 
 	down(&hif_drv->sem_test_disconn_block);
 
@@ -3493,7 +3260,7 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("Driver is null\n");
+		netdev_err(vif->ndev, "Driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3502,16 +3269,15 @@
 	wid.val = pu8AssocRespInfo;
 	wid.size = u32MaxAssocRespInfoLen;
 
-	result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
-				 wilc_get_vif_idx(vif));
+	result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+				      wilc_get_vif_idx(vif));
 	if (result) {
 		*pu32RcvdAssocRespInfoLen = 0;
-		PRINT_ER("Failed to send association response config packet\n");
+		netdev_err(vif->ndev, "Failed to send association response\n");
 		return -EINVAL;
-	} else {
-		*pu32RcvdAssocRespInfoLen = wid.size;
 	}
 
+	*pu32RcvdAssocRespInfoLen = wid.size;
 	return result;
 }
 
@@ -3522,7 +3288,7 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3533,32 +3299,14 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 		return -EINVAL;
 	}
 
 	return 0;
 }
 
-int wilc_wait_msg_queue_idle(void)
-{
-	int result = 0;
-	struct host_if_msg msg;
-
-	memset(&msg, 0, sizeof(struct host_if_msg));
-	msg.id = HOST_IF_MSG_Q_IDLE;
-	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-	if (result) {
-		PRINT_ER("wilc mq send fail\n");
-		result = -EINVAL;
-	}
-
-	down(&hif_sema_wait_response);
-
-	return result;
-}
-
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index)
+int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
 {
 	int result = 0;
 	struct host_if_msg msg;
@@ -3566,11 +3314,12 @@
 	memset(&msg, 0, sizeof(struct host_if_msg));
 	msg.id = HOST_IF_MSG_SET_WFIDRV_HANDLER;
 	msg.body.drv.handler = index;
+	msg.body.drv.mac_idx = mac_idx;
 	msg.vif = vif;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 		result = -EINVAL;
 	}
 
@@ -3589,7 +3338,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 		result = -EINVAL;
 	}
 
@@ -3604,7 +3353,7 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -3616,7 +3365,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Failed to send get host channel param's message queue ");
+		netdev_err(vif->ndev, "Failed to send get host ch param\n");
 
 	down(&hif_drv->sem_inactive_time);
 
@@ -3625,9 +3374,9 @@
 	return result;
 }
 
-s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi)
+int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
@@ -3637,53 +3386,55 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("Failed to send get host channel param's message queue ");
+		netdev_err(vif->ndev, "Failed to send get host ch param\n");
 		return -EFAULT;
 	}
 
 	down(&hif_drv->sem_get_rssi);
 
-	if (!ps8Rssi) {
-		PRINT_ER("RSS pointer value is null");
+	if (!rssi_level) {
+		netdev_err(vif->ndev, "RSS pointer value is null\n");
 		return -EFAULT;
 	}
 
-	*ps8Rssi = rssi;
+	*rssi_level = rssi;
 
 	return result;
 }
 
-s32 wilc_get_statistics(struct wilc_vif *vif, struct rf_info *pstrStatistics)
+int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 	msg.id = HOST_IF_MSG_GET_STATISTICS;
-	msg.body.data = (char *)pstrStatistics;
+	msg.body.data = (char *)stats;
 	msg.vif = vif;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("Failed to send get host channel param's message queue ");
+		netdev_err(vif->ndev, "Failed to send get host channel\n");
 		return -EFAULT;
 	}
 
-	down(&hif_sema_wait_response);
+	if (stats != &vif->wilc->dummy_statistics)
+		down(&hif_sema_wait_response);
 	return result;
 }
 
-s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType,
-	      u8 *pu8ChnlFreqList, u8 u8ChnlListLen, const u8 *pu8IEs,
-	      size_t IEsLen, wilc_scan_result ScanResult, void *pvUserArg,
-	      struct hidden_network *pstrHiddenNetwork)
+int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
+	      u8 *ch_freq_list, u8 ch_list_len, const u8 *ies,
+	      size_t ies_len, wilc_scan_result scan_result, void *user_arg,
+	      struct hidden_network *hidden_network)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
+	struct scan_attr *scan_info = &msg.body.scan_info;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	if (!hif_drv || !ScanResult) {
-		PRINT_ER("hif_drv or ScanResult = NULL\n");
+	if (!hif_drv || !scan_result) {
+		netdev_err(vif->ndev, "hif_drv or scan_result = NULL\n");
 		return -EFAULT;
 	}
 
@@ -3691,34 +3442,35 @@
 
 	msg.id = HOST_IF_MSG_SCAN;
 
-	if (pstrHiddenNetwork) {
-		msg.body.scan_info.hidden_network.pstrHiddenNetworkInfo = pstrHiddenNetwork->pstrHiddenNetworkInfo;
-		msg.body.scan_info.hidden_network.u8ssidnum = pstrHiddenNetwork->u8ssidnum;
-
-	} else
-		PRINT_D(HOSTINF_DBG, "pstrHiddenNetwork IS EQUAL TO NULL\n");
+	if (hidden_network) {
+		scan_info->hidden_network.net_info = hidden_network->net_info;
+		scan_info->hidden_network.n_ssids = hidden_network->n_ssids;
+	}
 
 	msg.vif = vif;
-	msg.body.scan_info.src = u8ScanSource;
-	msg.body.scan_info.type = u8ScanType;
-	msg.body.scan_info.result = ScanResult;
-	msg.body.scan_info.arg = pvUserArg;
+	scan_info->src = scan_source;
+	scan_info->type = scan_type;
+	scan_info->result = scan_result;
+	scan_info->arg = user_arg;
 
-	msg.body.scan_info.ch_list_len = u8ChnlListLen;
-	msg.body.scan_info.ch_freq_list = kmalloc(u8ChnlListLen, GFP_KERNEL);
-	memcpy(msg.body.scan_info.ch_freq_list, pu8ChnlFreqList, u8ChnlListLen);
+	scan_info->ch_list_len = ch_list_len;
+	scan_info->ch_freq_list = kmemdup(ch_freq_list,
+					  ch_list_len,
+					  GFP_KERNEL);
+	if (!scan_info->ch_freq_list)
+		return -ENOMEM;
 
-	msg.body.scan_info.ies_len = IEsLen;
-	msg.body.scan_info.ies = kmalloc(IEsLen, GFP_KERNEL);
-	memcpy(msg.body.scan_info.ies, pu8IEs, IEsLen);
+	scan_info->ies_len = ies_len;
+	scan_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+	if (!scan_info->ies)
+		return -ENOMEM;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result) {
-		PRINT_ER("Error in sending message queue\n");
+		netdev_err(vif->ndev, "Error in sending message queue\n");
 		return -EINVAL;
 	}
 
-	PRINT_D(HOSTINF_DBG, ">> Starting the SCAN timer\n");
 	hif_drv->scan_timer.data = (unsigned long)vif;
 	mod_timer(&hif_drv->scan_timer,
 		  jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
@@ -3726,21 +3478,21 @@
 	return result;
 }
 
-s32 wilc_hif_set_cfg(struct wilc_vif *vif,
-		     struct cfg_param_val *pstrCfgParamVal)
+int wilc_hif_set_cfg(struct wilc_vif *vif,
+		     struct cfg_param_attr *cfg_param)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("hif_drv NULL\n");
+		netdev_err(vif->ndev, "hif_drv NULL\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 	msg.id = HOST_IF_MSG_CFG_PARAMS;
-	msg.body.cfg_info.cfg_attr_info = *pstrCfgParamVal;
+	msg.body.cfg_info = *cfg_param;
 	msg.vif = vif;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
@@ -3753,32 +3505,20 @@
 	struct wilc_vif *vif = (struct wilc_vif *)arg;
 
 	if (!vif->hif_drv) {
-		PRINT_ER("Driver handler is NULL\n");
+		netdev_err(vif->ndev, "Driver handler is NULL\n");
 		return;
 	}
 
-	if (vif->hif_drv->hif_state == HOST_IF_CONNECTED) {
-		s32 result = 0;
-		struct host_if_msg msg;
+	if (vif->hif_drv->hif_state == HOST_IF_CONNECTED)
+		wilc_get_statistics(vif, &vif->wilc->dummy_statistics);
 
-		memset(&msg, 0, sizeof(struct host_if_msg));
-
-		msg.id = HOST_IF_MSG_GET_RSSI;
-		msg.vif = vif;
-
-		result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-		if (result) {
-			PRINT_ER("Failed to send get host channel param's message queue ");
-			return;
-		}
-	}
 	periodic_rssi.data = (unsigned long)vif;
 	mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
 }
 
-s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
+int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_drv *hif_drv;
 	struct wilc_vif *vif;
 	struct wilc *wilc;
@@ -3787,8 +3527,6 @@
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
 
-	PRINT_D(HOSTINF_DBG, "Initializing host interface for client %d\n", clients_count + 1);
-
 	scan_while_connected = false;
 
 	sema_init(&hif_sema_wait_response, 0);
@@ -3807,7 +3545,6 @@
 
 	wilc_optaining_ip = false;
 
-	PRINT_D(HOSTINF_DBG, "Global handle pointer value=%p\n", hif_drv);
 	if (clients_count == 0)	{
 		sema_init(&hif_sema_thread, 0);
 		sema_init(&hif_sema_driver, 0);
@@ -3817,17 +3554,13 @@
 	sema_init(&hif_drv->sem_test_key_block, 0);
 	sema_init(&hif_drv->sem_test_disconn_block, 0);
 	sema_init(&hif_drv->sem_get_rssi, 0);
-	sema_init(&hif_drv->sem_get_link_speed, 0);
-	sema_init(&hif_drv->sem_get_chnl, 0);
 	sema_init(&hif_drv->sem_inactive_time, 0);
 
-	PRINT_D(HOSTINF_DBG, "INIT: CLIENT COUNT %d\n", clients_count);
-
 	if (clients_count == 0)	{
 		result = wilc_mq_create(&hif_msg_q);
 
 		if (result < 0) {
-			PRINT_ER("Failed to creat MQ\n");
+			netdev_err(vif->ndev, "Failed to creat MQ\n");
 			goto _fail_;
 		}
 
@@ -3835,7 +3568,7 @@
 						 "WILC_kthread");
 
 		if (IS_ERR(hif_thread_handler)) {
-			PRINT_ER("Failed to creat Thread\n");
+			netdev_err(vif->ndev, "Failed to creat Thread\n");
 			result = -EFAULT;
 			goto _fail_mq_;
 		}
@@ -3860,13 +3593,6 @@
 
 	hif_drv->p2p_timeout = 0;
 
-	PRINT_INFO(HOSTINF_DBG, "Initialization values, Site survey value: %d\n Scan source: %d\n Active scan time: %d\n Passive scan time: %d\nCurrent tx Rate = %d\n",
-		   hif_drv->cfg_values.site_survey_enabled,
-		   hif_drv->cfg_values.scan_source,
-		   hif_drv->cfg_values.active_scan_time,
-		   hif_drv->cfg_values.passive_scan_time,
-		   hif_drv->cfg_values.curr_tx_rate);
-
 	up(&hif_drv->sem_cfg_values);
 
 	clients_count++;
@@ -3879,34 +3605,27 @@
 	return result;
 }
 
-s32 wilc_deinit(struct wilc_vif *vif)
+int wilc_deinit(struct wilc_vif *vif)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv)	{
-		PRINT_ER("hif_drv = NULL\n");
-		return 0;
+		netdev_err(vif->ndev, "hif_drv = NULL\n");
+		return -EFAULT;
 	}
 
 	down(&hif_sema_deinit);
 
 	terminated_handle = hif_drv;
-	PRINT_D(HOSTINF_DBG, "De-initializing host interface for client %d\n", clients_count);
 
-	if (del_timer_sync(&hif_drv->scan_timer))
-		PRINT_D(HOSTINF_DBG, ">> Scan timer is active\n");
-
-	if (del_timer_sync(&hif_drv->connect_timer))
-		PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
-
-	if (del_timer_sync(&periodic_rssi))
-		PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
-
+	del_timer_sync(&hif_drv->scan_timer);
+	del_timer_sync(&hif_drv->connect_timer);
+	del_timer_sync(&periodic_rssi);
 	del_timer_sync(&hif_drv->remain_on_ch_timer);
 
-	wilc_set_wfi_drv_handler(vif, 0);
+	wilc_set_wfi_drv_handler(vif, 0, 0);
 	down(&hif_sema_driver);
 
 	if (hif_drv->usr_scan_req.scan_result) {
@@ -3922,15 +3641,13 @@
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
 	if (clients_count == 1)	{
-		if (del_timer_sync(&periodic_rssi))
-			PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
-
+		del_timer_sync(&periodic_rssi);
 		msg.id = HOST_IF_MSG_EXIT;
 		msg.vif = vif;
 
 		result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 		if (result != 0)
-			PRINT_ER("Error in sending deinit's message queue message function: Error(%d)\n", result);
+			netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
 
 		down(&hif_sema_thread);
 
@@ -3961,7 +3678,7 @@
 	hif_drv = vif->hif_drv;
 
 	if (!hif_drv || hif_drv == terminated_handle)	{
-		PRINT_ER("NetworkInfo received but driver not init[%p]\n", hif_drv);
+		netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
 		return;
 	}
 
@@ -3976,7 +3693,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Error in sending network info message queue message parameters: Error(%d)\n", result);
+		netdev_err(vif->ndev, "message parameters (%d)\n", result);
 }
 
 void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
@@ -3998,16 +3715,14 @@
 	}
 
 	hif_drv = vif->hif_drv;
-	PRINT_D(HOSTINF_DBG, "General asynchronous info packet received\n");
 
 	if (!hif_drv || hif_drv == terminated_handle) {
-		PRINT_D(HOSTINF_DBG, "Wifi driver handler is equal to NULL\n");
 		up(&hif_sema_deinit);
 		return;
 	}
 
 	if (!hif_drv->usr_conn_req.conn_result) {
-		PRINT_ER("Received mac status is not needed when there is no current Connect Reques\n");
+		netdev_err(vif->ndev, "there is no current Connect Request\n");
 		up(&hif_sema_deinit);
 		return;
 	}
@@ -4023,7 +3738,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("Error in sending message queue asynchronous message info: Error(%d)\n", result);
+		netdev_err(vif->ndev, "synchronous info (%d)\n", result);
 
 	up(&hif_sema_deinit);
 }
@@ -4043,8 +3758,6 @@
 		return;
 	hif_drv = vif->hif_drv;
 
-	PRINT_D(GENERIC_DBG, "Scan notification received %p\n", hif_drv);
-
 	if (!hif_drv || hif_drv == terminated_handle)
 		return;
 
@@ -4056,24 +3769,22 @@
 
 		result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 		if (result)
-			PRINT_ER("Error in sending message queue scan complete parameters: Error(%d)\n", result);
+			netdev_err(vif->ndev, "complete param (%d)\n", result);
 	}
-
-	return;
 }
 
-s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID,
-			   u32 u32duration, u16 chan,
-			   wilc_remain_on_chan_expired RemainOnChanExpired,
-			   wilc_remain_on_chan_ready RemainOnChanReady,
-			   void *pvUserArg)
+int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
+			   u32 duration, u16 chan,
+			   wilc_remain_on_chan_expired expired,
+			   wilc_remain_on_chan_ready ready,
+			   void *user_arg)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -4081,28 +3792,28 @@
 
 	msg.id = HOST_IF_MSG_REMAIN_ON_CHAN;
 	msg.body.remain_on_ch.ch = chan;
-	msg.body.remain_on_ch.expired = RemainOnChanExpired;
-	msg.body.remain_on_ch.ready = RemainOnChanReady;
-	msg.body.remain_on_ch.arg = pvUserArg;
-	msg.body.remain_on_ch.u32duration = u32duration;
-	msg.body.remain_on_ch.id = u32SessionID;
+	msg.body.remain_on_ch.expired = expired;
+	msg.body.remain_on_ch.ready = ready;
+	msg.body.remain_on_ch.arg = user_arg;
+	msg.body.remain_on_ch.duration = duration;
+	msg.body.remain_on_ch.id = session_id;
 	msg.vif = vif;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 
 	return result;
 }
 
-s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID)
+int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -4111,104 +3822,98 @@
 	memset(&msg, 0, sizeof(struct host_if_msg));
 	msg.id = HOST_IF_MSG_LISTEN_TIMER_FIRED;
 	msg.vif = vif;
-	msg.body.remain_on_ch.id = u32SessionID;
+	msg.body.remain_on_ch.id = session_id;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 
 	return result;
 }
 
-s32 wilc_frame_register(struct wilc_vif *vif, u16 u16FrameType, bool bReg)
+int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
 	msg.id = HOST_IF_MSG_REGISTER_FRAME;
-	switch (u16FrameType) {
+	switch (frame_type) {
 	case ACTION:
-		PRINT_D(HOSTINF_DBG, "ACTION\n");
 		msg.body.reg_frame.reg_id = ACTION_FRM_IDX;
 		break;
 
 	case PROBE_REQ:
-		PRINT_D(HOSTINF_DBG, "PROBE REQ\n");
 		msg.body.reg_frame.reg_id = PROBE_REQ_IDX;
 		break;
 
 	default:
-		PRINT_D(HOSTINF_DBG, "Not valid frame type\n");
 		break;
 	}
-	msg.body.reg_frame.frame_type = u16FrameType;
-	msg.body.reg_frame.reg = bReg;
+	msg.body.reg_frame.frame_type = frame_type;
+	msg.body.reg_frame.reg = reg;
 	msg.vif = vif;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 
 	return result;
 }
 
-s32 wilc_add_beacon(struct wilc_vif *vif, u32 u32Interval, u32 u32DTIMPeriod,
-		    u32 u32HeadLen, u8 *pu8Head, u32 u32TailLen, u8 *pu8Tail)
+int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
+		    u32 head_len, u8 *head, u32 tail_len, u8 *tail)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
-	struct beacon_attr *pstrSetBeaconParam = &msg.body.beacon_info;
+	struct beacon_attr *beacon_info = &msg.body.beacon_info;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
-	PRINT_D(HOSTINF_DBG, "Setting adding beacon message queue params\n");
-
 	msg.id = HOST_IF_MSG_ADD_BEACON;
 	msg.vif = vif;
-	pstrSetBeaconParam->interval = u32Interval;
-	pstrSetBeaconParam->dtim_period = u32DTIMPeriod;
-	pstrSetBeaconParam->head_len = u32HeadLen;
-	pstrSetBeaconParam->head = kmemdup(pu8Head, u32HeadLen, GFP_KERNEL);
-	if (!pstrSetBeaconParam->head) {
+	beacon_info->interval = interval;
+	beacon_info->dtim_period = dtim_period;
+	beacon_info->head_len = head_len;
+	beacon_info->head = kmemdup(head, head_len, GFP_KERNEL);
+	if (!beacon_info->head) {
 		result = -ENOMEM;
 		goto ERRORHANDLER;
 	}
-	pstrSetBeaconParam->tail_len = u32TailLen;
+	beacon_info->tail_len = tail_len;
 
-	if (u32TailLen > 0) {
-		pstrSetBeaconParam->tail = kmemdup(pu8Tail, u32TailLen,
-						   GFP_KERNEL);
-		if (!pstrSetBeaconParam->tail) {
+	if (tail_len > 0) {
+		beacon_info->tail = kmemdup(tail, tail_len, GFP_KERNEL);
+		if (!beacon_info->tail) {
 			result = -ENOMEM;
 			goto ERRORHANDLER;
 		}
 	} else {
-		pstrSetBeaconParam->tail = NULL;
+		beacon_info->tail = NULL;
 	}
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc mq send fail\n");
+		netdev_err(vif->ndev, "wilc mq send fail\n");
 
 ERRORHANDLER:
 	if (result) {
-		kfree(pstrSetBeaconParam->head);
+		kfree(beacon_info->head);
 
-		kfree(pstrSetBeaconParam->tail);
+		kfree(beacon_info->tail);
 	}
 
 	return result;
@@ -4221,17 +3926,16 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	msg.id = HOST_IF_MSG_DEL_BEACON;
 	msg.vif = vif;
-	PRINT_D(HOSTINF_DBG, "Setting deleting beacon message queue params\n");
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
 	return result;
 }
@@ -4244,14 +3948,12 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
-	PRINT_D(HOSTINF_DBG, "Setting adding station message queue params\n");
-
 	msg.id = HOST_IF_MSG_ADD_STATION;
 	msg.vif = vif;
 
@@ -4266,7 +3968,7 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 	return result;
 }
 
@@ -4278,14 +3980,12 @@
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
-	PRINT_D(HOSTINF_DBG, "Setting deleting station message queue params\n");
-
 	msg.id = HOST_IF_MSG_DEL_STATION;
 	msg.vif = vif;
 
@@ -4296,160 +3996,141 @@
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 	return result;
 }
 
-s32 wilc_del_allstation(struct wilc_vif *vif, u8 pu8MacAddr[][ETH_ALEN])
+int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
-	struct del_all_sta *pstrDelAllStationMsg = &msg.body.del_all_sta_info;
+	struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
 	struct host_if_drv *hif_drv = vif->hif_drv;
-	u8 au8Zero_Buff[ETH_ALEN] = {0};
-	u32 i;
-	u8 u8AssocNumb = 0;
+	u8 zero_addr[ETH_ALEN] = {0};
+	int i;
+	u8 assoc_sta = 0;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
-	PRINT_D(HOSTINF_DBG, "Setting deauthenticating station message queue params\n");
-
 	msg.id = HOST_IF_MSG_DEL_ALL_STA;
 	msg.vif = vif;
 
 	for (i = 0; i < MAX_NUM_STA; i++) {
-		if (memcmp(pu8MacAddr[i], au8Zero_Buff, ETH_ALEN)) {
-			memcpy(pstrDelAllStationMsg->del_all_sta[i], pu8MacAddr[i], ETH_ALEN);
-			PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n",
-				pstrDelAllStationMsg->del_all_sta[i][0],
-				pstrDelAllStationMsg->del_all_sta[i][1],
-				pstrDelAllStationMsg->del_all_sta[i][2],
-				pstrDelAllStationMsg->del_all_sta[i][3],
-				pstrDelAllStationMsg->del_all_sta[i][4],
-				pstrDelAllStationMsg->del_all_sta[i][5]);
-			u8AssocNumb++;
+		if (memcmp(mac_addr[i], zero_addr, ETH_ALEN)) {
+			memcpy(del_all_sta_info->del_all_sta[i], mac_addr[i], ETH_ALEN);
+			assoc_sta++;
 		}
 	}
-	if (!u8AssocNumb) {
-		PRINT_D(CFG80211_DBG, "NO ASSOCIATED STAS\n");
+	if (!assoc_sta)
 		return result;
-	}
 
-	pstrDelAllStationMsg->assoc_sta = u8AssocNumb;
+	del_all_sta_info->assoc_sta = assoc_sta;
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
 	down(&hif_sema_wait_response);
 
 	return result;
 }
 
-s32 wilc_edit_station(struct wilc_vif *vif,
-		      struct add_sta_param *pstrStaParams)
+int wilc_edit_station(struct wilc_vif *vif,
+		      struct add_sta_param *sta_param)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
-	struct add_sta_param *pstrAddStationMsg = &msg.body.add_sta_info;
+	struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
-	PRINT_D(HOSTINF_DBG, "Setting editing station message queue params\n");
-
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
 	msg.id = HOST_IF_MSG_EDIT_STATION;
 	msg.vif = vif;
 
-	memcpy(pstrAddStationMsg, pstrStaParams, sizeof(struct add_sta_param));
-	if (pstrAddStationMsg->rates_len > 0) {
-		u8 *rates = kmalloc(pstrAddStationMsg->rates_len, GFP_KERNEL);
-
-		if (!rates)
+	memcpy(add_sta_info, sta_param, sizeof(struct add_sta_param));
+	if (add_sta_info->rates_len > 0) {
+		add_sta_info->rates = kmemdup(sta_param->rates,
+					      add_sta_info->rates_len,
+					      GFP_KERNEL);
+		if (!add_sta_info->rates)
 			return -ENOMEM;
-
-		memcpy(rates, pstrStaParams->rates,
-		       pstrAddStationMsg->rates_len);
-		pstrAddStationMsg->rates = rates;
 	}
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
 	return result;
 }
 
-s32 wilc_set_power_mgmt(struct wilc_vif *vif, bool bIsEnabled, u32 u32Timeout)
+int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
-	struct power_mgmt_param *pstrPowerMgmtParam = &msg.body.pwr_mgmt_info;
+	struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d <<\n\n", bIsEnabled);
-
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
-	PRINT_D(HOSTINF_DBG, "Setting Power management message queue params\n");
+	if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
+		return 0;
 
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
 	msg.id = HOST_IF_MSG_POWER_MGMT;
 	msg.vif = vif;
 
-	pstrPowerMgmtParam->enabled = bIsEnabled;
-	pstrPowerMgmtParam->timeout = u32Timeout;
+	pwr_mgmt_info->enabled = enabled;
+	pwr_mgmt_info->timeout = timeout;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 	return result;
 }
 
-s32 wilc_setup_multicast_filter(struct wilc_vif *vif, bool bIsEnabled,
-				u32 u32count)
+int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
+				u32 count)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
-	struct set_multicast *pstrMulticastFilterParam = &msg.body.multicast_info;
+	struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
-	PRINT_D(HOSTINF_DBG, "Setting Multicast Filter params\n");
-
 	memset(&msg, 0, sizeof(struct host_if_msg));
 
 	msg.id = HOST_IF_MSG_SET_MULTICAST_FILTER;
 	msg.vif = vif;
 
-	pstrMulticastFilterParam->enabled = bIsEnabled;
-	pstrMulticastFilterParam->cnt = u32count;
+	multicast_filter_param->enabled = enabled;
+	multicast_filter_param->cnt = count;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 	return result;
 }
 
-static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
+static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
 {
 	struct join_bss_param *pNewJoinBssParam = NULL;
 	u8 *pu8IEs;
@@ -4464,17 +4145,18 @@
 	u8 authTotalCount = 0;
 	u8 i, j;
 
-	pu8IEs = ptstrNetworkInfo->pu8IEs;
-	u16IEsLen = ptstrNetworkInfo->u16IEsLen;
+	pu8IEs = ptstrNetworkInfo->ies;
+	u16IEsLen = ptstrNetworkInfo->ies_len;
 
 	pNewJoinBssParam = kzalloc(sizeof(struct join_bss_param), GFP_KERNEL);
 	if (pNewJoinBssParam) {
-		pNewJoinBssParam->dtim_period = ptstrNetworkInfo->u8DtimPeriod;
-		pNewJoinBssParam->beacon_period = ptstrNetworkInfo->u16BeaconPeriod;
-		pNewJoinBssParam->cap_info = ptstrNetworkInfo->u16CapInfo;
-		memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6);
-		memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1);
-		pNewJoinBssParam->ssid_len = ptstrNetworkInfo->u8SsidLen;
+		pNewJoinBssParam->dtim_period = ptstrNetworkInfo->dtim_period;
+		pNewJoinBssParam->beacon_period = ptstrNetworkInfo->beacon_period;
+		pNewJoinBssParam->cap_info = ptstrNetworkInfo->cap_info;
+		memcpy(pNewJoinBssParam->bssid, ptstrNetworkInfo->bssid, 6);
+		memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->ssid,
+		       ptstrNetworkInfo->ssid_len + 1);
+		pNewJoinBssParam->ssid_len = ptstrNetworkInfo->ssid_len;
 		memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3);
 		memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3);
 
@@ -4523,7 +4205,7 @@
 				 (pu8IEs[index + 5] == 0x09) && (pu8IEs[index + 6] == 0x0c)) {
 				u16 u16P2P_count;
 
-				pNewJoinBssParam->tsf = ptstrNetworkInfo->u32Tsf;
+				pNewJoinBssParam->tsf = ptstrNetworkInfo->tsf_lo;
 				pNewJoinBssParam->noa_enabled = 1;
 				pNewJoinBssParam->idx = pu8IEs[index + 9];
 
@@ -4534,10 +4216,6 @@
 					pNewJoinBssParam->opp_enabled = 0;
 				}
 
-				PRINT_D(GENERIC_DBG, "P2P Dump\n");
-				for (i = 0; i < pu8IEs[index + 7]; i++)
-					PRINT_D(GENERIC_DBG, " %x\n", pu8IEs[index + 9 + i]);
-
 				pNewJoinBssParam->cnt = pu8IEs[index + 11];
 				u16P2P_count = index + 12;
 
@@ -4606,53 +4284,14 @@
 	return (void *)pNewJoinBssParam;
 }
 
-void wilc_free_join_params(void *pJoinParams)
+int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
 {
-	if ((struct bss_param *)pJoinParams)
-		kfree((struct bss_param *)pJoinParams);
-	else
-		PRINT_ER("Unable to FREE null pointer\n");
-}
-
-s32 wilc_del_all_rx_ba_session(struct wilc_vif *vif, char *pBSSID, char TID)
-{
-	s32 result = 0;
-	struct host_if_msg msg;
-	struct ba_session_info *pBASessionInfo = &msg.body.session_info;
-	struct host_if_drv *hif_drv = vif->hif_drv;
-
-	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
-		return -EFAULT;
-	}
-
-	memset(&msg, 0, sizeof(struct host_if_msg));
-
-	msg.id = HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS;
-
-	memcpy(pBASessionInfo->bssid, pBSSID, ETH_ALEN);
-	pBASessionInfo->tid = TID;
-	msg.vif = vif;
-
-	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
-
-	down(&hif_sema_wait_response);
-
-	return result;
-}
-
-s32 wilc_setup_ipaddress(struct wilc_vif *vif, u8 *u16ipadd, u8 idx)
-{
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
 	struct host_if_drv *hif_drv = vif->hif_drv;
 
-	return 0;
-
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -4660,26 +4299,25 @@
 
 	msg.id = HOST_IF_MSG_SET_IPADDRESS;
 
-	msg.body.ip_info.ip_addr = u16ipadd;
+	msg.body.ip_info.ip_addr = ip_addr;
 	msg.vif = vif;
 	msg.body.ip_info.idx = idx;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
 	return result;
 }
 
-static s32 host_int_get_ipaddress(struct wilc_vif *vif,
-				  struct host_if_drv *hif_drv,
-				  u8 *u16ipadd, u8 idx)
+static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
 {
-	s32 result = 0;
+	int result = 0;
 	struct host_if_msg msg;
+	struct host_if_drv *hif_drv = vif->hif_drv;
 
 	if (!hif_drv) {
-		PRINT_ER("driver is null\n");
+		netdev_err(vif->ndev, "driver is null\n");
 		return -EFAULT;
 	}
 
@@ -4687,13 +4325,51 @@
 
 	msg.id = HOST_IF_MSG_GET_IPADDRESS;
 
-	msg.body.ip_info.ip_addr = u16ipadd;
+	msg.body.ip_info.ip_addr = ip_addr;
 	msg.vif = vif;
 	msg.body.ip_info.idx = idx;
 
 	result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
 	if (result)
-		PRINT_ER("wilc_mq_send fail\n");
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
 	return result;
 }
+
+int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
+{
+	int ret = 0;
+	struct host_if_msg msg;
+
+	memset(&msg, 0, sizeof(struct host_if_msg));
+
+	msg.id = HOST_IF_MSG_SET_TX_POWER;
+	msg.body.tx_power.tx_pwr = tx_power;
+	msg.vif = vif;
+
+	ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+	if (ret)
+		netdev_err(vif->ndev, "wilc_mq_send fail\n");
+
+	return ret;
+}
+
+int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
+{
+	int ret = 0;
+	struct host_if_msg msg;
+
+	memset(&msg, 0, sizeof(struct host_if_msg));
+
+	msg.id = HOST_IF_MSG_GET_TX_POWER;
+	msg.vif = vif;
+
+	ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+	if (ret)
+		netdev_err(vif->ndev, "Failed to get TX PWR\n");
+
+	down(&hif_sema_wait_response);
+	*tx_power = msg.body.tx_power.tx_pwr;
+
+	return ret;
+}
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 8faac27..795c18c 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -96,7 +96,7 @@
 	MBPS_54		= 54
 };
 
-struct cfg_param_val {
+struct cfg_param_attr {
 	u32 flag;
 	u8 ht_enable;
 	u8 bss_type;
@@ -144,8 +144,8 @@
 };
 
 struct found_net_info {
-	u8 au8bssid[6];
-	s8 s8rssi;
+	u8 bssid[6];
+	s8 rssi;
 };
 
 enum scan_event {
@@ -168,13 +168,13 @@
 	PMKSA,
 };
 
-typedef void (*wilc_scan_result)(enum scan_event, tstrNetworkInfo *,
-				  void *, void *);
+typedef void (*wilc_scan_result)(enum scan_event, struct network_info *,
+				 void *, void *);
 
 typedef void (*wilc_connect_result)(enum conn_event,
-				     tstrConnectInfo *,
+				     struct connect_info *,
 				     u8,
-				     tstrDisconnectNotifInfo *,
+				     struct disconnect_info *,
 				     void *);
 
 typedef void (*wilc_remain_on_chan_expired)(void *, u32);
@@ -186,13 +186,13 @@
 };
 
 struct hidden_net_info {
-	u8  *pu8ssid;
-	u8 u8ssidlen;
+	u8  *ssid;
+	u8 ssid_len;
 };
 
 struct hidden_network {
-	struct hidden_net_info *pstrHiddenNetworkInfo;
-	u8 u8ssidnum;
+	struct hidden_net_info *net_info;
+	u8 n_ssids;
 };
 
 struct user_scan_req {
@@ -203,9 +203,9 @@
 };
 
 struct user_conn_req {
-	u8 *pu8bssid;
-	u8 *pu8ssid;
-	u8 u8security;
+	u8 *bssid;
+	u8 *ssid;
+	u8 security;
 	enum AUTHTYPE auth_type;
 	size_t ssid_len;
 	u8 *ies;
@@ -217,6 +217,7 @@
 
 struct drv_handler {
 	u32 handler;
+	u8 mac_idx;
 };
 
 struct op_mode {
@@ -240,7 +241,7 @@
 
 struct remain_ch {
 	u16 ch;
-	u32 u32duration;
+	u32 duration;
 	wilc_remain_on_chan_expired expired;
 	wilc_remain_on_chan_ready ready;
 	void *arg;
@@ -271,14 +272,12 @@
 	enum host_if_state hif_state;
 
 	u8 assoc_bssid[ETH_ALEN];
-	struct cfg_param_val cfg_values;
+	struct cfg_param_attr cfg_values;
 
 	struct semaphore sem_cfg_values;
 	struct semaphore sem_test_key_block;
 	struct semaphore sem_test_disconn_block;
 	struct semaphore sem_get_rssi;
-	struct semaphore sem_get_link_speed;
-	struct semaphore sem_get_chnl;
 	struct semaphore sem_inactive_time;
 
 	struct timer_list scan_timer;
@@ -312,68 +311,60 @@
 			     u8 index);
 int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
 			    u8 index, u8 mode, enum AUTHTYPE auth_type);
-s32 wilc_add_ptk(struct wilc_vif *vif, const u8 *pu8Ptk, u8 u8PtkKeylen,
-		 const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic,
-		 u8 mode, u8 u8Ciphermode, u8 u8Idx);
+int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
+		 const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
+		 u8 mode, u8 cipher_mode, u8 index);
 s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
 			   u32 *pu32InactiveTime);
-s32 wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *pu8RxGtk, u8 u8GtkKeylen,
-		    u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC,
-		    const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode,
-		    u8 u8Ciphermode);
-s32 wilc_add_tx_gtk(struct host_if_drv *hWFIDrv, u8 u8KeyLen,
-			u8 *pu8TxGtk, u8 u8KeyIdx);
-s32 wilc_set_pmkid_info(struct wilc_vif *vif,
-			struct host_if_pmkid_attr *pu8PmkidInfoArray);
-s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress);
-s32 wilc_set_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress);
-int wilc_wait_msg_queue_idle(void);
-s32 wilc_set_start_scan_req(struct host_if_drv *hWFIDrv, u8 scanSource);
-s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid,
-		      size_t ssidLen, const u8 *pu8IEs, size_t IEsLen,
-		      wilc_connect_result pfConnectResult, void *pvUserArg,
-		      u8 u8security, enum AUTHTYPE tenuAuth_type,
-		      u8 u8channel, void *pJoinParams);
-s32 wilc_flush_join_req(struct wilc_vif *vif);
-s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode);
+int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
+		    u8 index, u32 key_rsc_len, const u8 *key_rsc,
+		    const u8 *rx_mic, const u8 *tx_mic, u8 mode,
+		    u8 cipher_mode);
+int wilc_set_pmkid_info(struct wilc_vif *vif,
+			struct host_if_pmkid_attr *pmkid);
+int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
+int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
+		      size_t ssid_len, const u8 *ies, size_t ies_len,
+		      wilc_connect_result connect_result, void *user_arg,
+		      u8 security, enum AUTHTYPE auth_type,
+		      u8 channel, void *join_params);
+int wilc_disconnect(struct wilc_vif *vif, u16 reason_code);
 int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel);
-s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi);
-s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType,
-	      u8 *pu8ChnlFreqList, u8 u8ChnlListLen, const u8 *pu8IEs,
-	      size_t IEsLen, wilc_scan_result ScanResult, void *pvUserArg,
-	      struct hidden_network *pstrHiddenNetwork);
-s32 wilc_hif_set_cfg(struct wilc_vif *vif,
-		     struct cfg_param_val *pstrCfgParamVal);
-s32 wilc_init(struct net_device *dev, struct host_if_drv **phWFIDrv);
-s32 wilc_deinit(struct wilc_vif *vif);
-s32 wilc_add_beacon(struct wilc_vif *vif, u32 u32Interval, u32 u32DTIMPeriod,
-		    u32 u32HeadLen, u8 *pu8Head, u32 u32TailLen, u8 *pu8Tail);
+int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level);
+int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
+	      u8 *ch_freq_list, u8 ch_list_len, const u8 *ies,
+	      size_t ies_len, wilc_scan_result scan_result, void *user_arg,
+	      struct hidden_network *hidden_network);
+int wilc_hif_set_cfg(struct wilc_vif *vif,
+		     struct cfg_param_attr *cfg_param);
+int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler);
+int wilc_deinit(struct wilc_vif *vif);
+int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
+		    u32 head_len, u8 *head, u32 tail_len, u8 *tail);
 int wilc_del_beacon(struct wilc_vif *vif);
 int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param);
-s32 wilc_del_allstation(struct wilc_vif *vif, u8 pu8MacAddr[][ETH_ALEN]);
+int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN]);
 int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr);
-s32 wilc_edit_station(struct wilc_vif *vif,
-		      struct add_sta_param *pstrStaParams);
-s32 wilc_set_power_mgmt(struct wilc_vif *vif, bool bIsEnabled, u32 u32Timeout);
-s32 wilc_setup_multicast_filter(struct wilc_vif *vif, bool bIsEnabled,
-				u32 u32count);
-s32 wilc_setup_ipaddress(struct wilc_vif *vif, u8 *u16ipadd, u8 idx);
-s32 wilc_del_all_rx_ba_session(struct wilc_vif *vif, char *pBSSID, char TID);
-s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID,
-			   u32 u32duration, u16 chan,
-			   wilc_remain_on_chan_expired RemainOnChanExpired,
-			   wilc_remain_on_chan_ready RemainOnChanReady,
-			   void *pvUserArg);
-s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID);
-s32 wilc_frame_register(struct wilc_vif *vif, u16 u16FrameType, bool bReg);
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index);
+int wilc_edit_station(struct wilc_vif *vif,
+		      struct add_sta_param *sta_param);
+int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout);
+int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
+				u32 count);
+int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
+int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
+			   u32 duration, u16 chan,
+			   wilc_remain_on_chan_expired expired,
+			   wilc_remain_on_chan_ready ready,
+			   void *user_arg);
+int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id);
+int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
+int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx);
 int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode);
-
-void wilc_free_join_params(void *pJoinParams);
-
-s32 wilc_get_statistics(struct wilc_vif *vif, struct rf_info *pstrStatistics);
+int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
 void wilc_resolve_disconnect_aberration(struct wilc_vif *vif);
 int wilc_get_vif_idx(struct wilc_vif *vif);
+int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
+int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
 
 extern bool wilc_optaining_ip;
 extern u8 wilc_connected_ssid[6];
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index e550027..7d9e5de 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -7,22 +7,20 @@
  *  @version	1.0
  */
 #include "wilc_wfi_cfgoperations.h"
-#include "linux_wlan_common.h"
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
 
-
 struct wilc_wfi_radiotap_hdr {
 	struct ieee80211_radiotap_header hdr;
 	u8 rate;
-} __attribute__((packed));
+} __packed;
 
 struct wilc_wfi_radiotap_cb_hdr {
 	struct ieee80211_radiotap_header hdr;
 	u8 rate;
 	u8 dump;
 	u16 tx_flags;
-} __attribute__((packed));
+} __packed;
 
 static struct net_device *wilc_wfi_mon; /* global monitor netdev */
 
@@ -53,15 +51,11 @@
 	struct wilc_wfi_radiotap_hdr *hdr;
 	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
 
-	PRINT_INFO(HOSTAPD_DBG, "In monitor interface receive function\n");
-
-	if (wilc_wfi_mon == NULL)
+	if (!wilc_wfi_mon)
 		return;
 
-	if (!netif_running(wilc_wfi_mon)) {
-		PRINT_INFO(HOSTAPD_DBG, "Monitor interface already RUNNING\n");
+	if (!netif_running(wilc_wfi_mon))
 		return;
-	}
 
 	/* Get WILC header */
 	memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
@@ -71,18 +65,15 @@
 	pkt_offset = GET_PKT_OFFSET(header);
 
 	if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
-
 		/* hostapd callback mgmt frame */
 
 		skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_cb_hdr));
-		if (skb == NULL) {
-			PRINT_INFO(HOSTAPD_DBG, "Monitor if : No memory to allocate skb");
+		if (!skb)
 			return;
-		}
 
 		memcpy(skb_put(skb, size), buff, size);
 
-		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb, sizeof(*cb_hdr));
+		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb, sizeof(*cb_hdr));
 		memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
 
 		cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
@@ -103,29 +94,21 @@
 		}
 
 	} else {
-
 		skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_hdr));
 
-		if (skb == NULL) {
-			PRINT_INFO(HOSTAPD_DBG, "Monitor if : No memory to allocate skb");
+		if (!skb)
 			return;
-		}
 
 		memcpy(skb_put(skb, size), buff, size);
-		hdr = (struct wilc_wfi_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
+		hdr = (struct wilc_wfi_radiotap_hdr *)skb_push(skb, sizeof(*hdr));
 		memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
 		hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
 		hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
-		PRINT_INFO(HOSTAPD_DBG, "Radiotap len %d\n", hdr->hdr.it_len);
 		hdr->hdr.it_present = cpu_to_le32
 				(1 << IEEE80211_RADIOTAP_RATE);                   /* | */
-		PRINT_INFO(HOSTAPD_DBG, "Presentflags %d\n", hdr->hdr.it_present);
 		hdr->rate = 5; /* txrate->bitrate / 5; */
-
 	}
 
-
-
 	skb->dev = wilc_wfi_mon;
 	skb_set_mac_header(skb, 0);
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -134,8 +117,6 @@
 	memset(skb->cb, 0, sizeof(skb->cb));
 
 	netif_rx(skb);
-
-
 }
 
 struct tx_complete_mon_data {
@@ -145,48 +126,30 @@
 
 static void mgmt_tx_complete(void *priv, int status)
 {
-
-	struct tx_complete_mon_data *pv_data = (struct tx_complete_mon_data *)priv;
-	u8 *buf =  pv_data->buff;
-
-
-
-	if (status == 1) {
-		if (INFO || buf[0] == 0x10 || buf[0] == 0xb0)
-			PRINT_INFO(HOSTAPD_DBG, "Packet sent successfully - Size = %d - Address = %p.\n", pv_data->size, pv_data->buff);
-	} else {
-		PRINT_INFO(HOSTAPD_DBG, "Couldn't send packet - Size = %d - Address = %p.\n", pv_data->size, pv_data->buff);
-	}
-
-
+	struct tx_complete_mon_data *pv_data = priv;
 
 	/* incase of fully hosting mode, the freeing will be done in response to the cfg packet */
 	kfree(pv_data->buff);
 
 	kfree(pv_data);
 }
+
 static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
 {
 	struct tx_complete_mon_data *mgmt_tx = NULL;
 
-	if (dev == NULL) {
-		PRINT_D(HOSTAPD_DBG, "ERROR: dev == NULL\n");
+	if (!dev)
 		return -EFAULT;
-	}
 
 	netif_stop_queue(dev);
-	mgmt_tx = kmalloc(sizeof(struct tx_complete_mon_data), GFP_ATOMIC);
-	if (mgmt_tx == NULL) {
-		PRINT_ER("Failed to allocate memory for mgmt_tx structure\n");
-		return -EFAULT;
-	}
+	mgmt_tx = kmalloc(sizeof(*mgmt_tx), GFP_ATOMIC);
+	if (!mgmt_tx)
+		return -ENOMEM;
 
 	mgmt_tx->buff = kmalloc(len, GFP_ATOMIC);
-	if (mgmt_tx->buff == NULL) {
-		PRINT_ER("Failed to allocate memory for mgmt_tx buff\n");
+	if (!mgmt_tx->buff) {
 		kfree(mgmt_tx);
-		return -EFAULT;
-
+		return -ENOMEM;
 	}
 
 	mgmt_tx->size = len;
@@ -211,47 +174,30 @@
 static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
 				     struct net_device *dev)
 {
-	u32 rtap_len, i, ret = 0;
+	u32 rtap_len, ret = 0;
 	struct WILC_WFI_mon_priv  *mon_priv;
 
 	struct sk_buff *skb2;
 	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
 
-	if (wilc_wfi_mon == NULL)
+	if (!wilc_wfi_mon)
 		return -EFAULT;
 
 	mon_priv = netdev_priv(wilc_wfi_mon);
-
-	if (mon_priv == NULL) {
-		PRINT_ER("Monitor interface private structure is NULL\n");
+	if (!mon_priv)
 		return -EFAULT;
-	}
-
-
 	rtap_len = ieee80211_get_radiotap_len(skb->data);
-	if (skb->len < rtap_len) {
-		PRINT_ER("Error in radiotap header\n");
+	if (skb->len < rtap_len)
 		return -1;
-	}
-	/* skip the radiotap header */
-	PRINT_INFO(HOSTAPD_DBG, "Radiotap len: %d\n", rtap_len);
 
-	if (INFO) {
-		for (i = 0; i < rtap_len; i++)
-			PRINT_INFO(HOSTAPD_DBG, "Radiotap_hdr[%d] %02x\n", i, skb->data[i]);
-	}
-	/* Skip the ratio tap header */
 	skb_pull(skb, rtap_len);
 
-	if (skb->data[0] == 0xc0)
-		PRINT_INFO(HOSTAPD_DBG, "%x:%x:%x:%x:%x%x\n", skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9]);
-
 	if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
 		skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
 
 		memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
 
-		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb2, sizeof(*cb_hdr));
+		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb2, sizeof(*cb_hdr));
 		memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
 
 		cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
@@ -278,24 +224,19 @@
 	}
 	skb->dev = mon_priv->real_ndev;
 
-	PRINT_INFO(HOSTAPD_DBG, "Skipping the radiotap header\n");
-
-
-
-	/* actual deliver of data is device-specific, and not shown here */
-	PRINT_INFO(HOSTAPD_DBG, "SKB netdevice name = %s\n", skb->dev->name);
-	PRINT_INFO(HOSTAPD_DBG, "MONITOR real dev name = %s\n", mon_priv->real_ndev->name);
-
 	/* Identify if Ethernet or MAC header (data or mgmt) */
 	memcpy(srcAdd, &skb->data[10], 6);
 	memcpy(bssid, &skb->data[16], 6);
 	/* if source address and bssid fields are equal>>Mac header */
 	/*send it to mgmt frames handler */
 	if (!(memcmp(srcAdd, bssid, 6))) {
-		mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
+		ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
+		if (ret)
+			netdev_err(dev, "fail to mgmt tx\n");
 		dev_kfree_skb(skb);
-	} else
+	} else {
 		ret = wilc_mac_xmit(skb, mon_priv->real_ndev);
+	}
 
 	return ret;
 }
@@ -316,23 +257,16 @@
  */
 struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev)
 {
-
-
 	u32 ret = 0;
 	struct WILC_WFI_mon_priv *priv;
 
 	/*If monitor interface is already initialized, return it*/
-	if (wilc_wfi_mon) {
+	if (wilc_wfi_mon)
 		return wilc_wfi_mon;
-	}
 
 	wilc_wfi_mon = alloc_etherdev(sizeof(struct WILC_WFI_mon_priv));
-	if (!wilc_wfi_mon) {
-		PRINT_ER("failed to allocate memory\n");
+	if (!wilc_wfi_mon)
 		return NULL;
-
-	}
-
 	wilc_wfi_mon->type = ARPHRD_IEEE80211_RADIOTAP;
 	strncpy(wilc_wfi_mon->name, name, IFNAMSIZ);
 	wilc_wfi_mon->name[IFNAMSIZ - 1] = 0;
@@ -340,14 +274,12 @@
 
 	ret = register_netdevice(wilc_wfi_mon);
 	if (ret) {
-		PRINT_ER(" register_netdevice failed (%d)\n", ret);
+		netdev_err(real_dev, "register_netdevice failed\n");
 		return NULL;
 	}
 	priv = netdev_priv(wilc_wfi_mon);
-	if (priv == NULL) {
-		PRINT_ER("private structure is NULL\n");
+	if (!priv)
 		return NULL;
-	}
 
 	priv->real_ndev = real_dev;
 
@@ -367,14 +299,11 @@
 {
 	bool rollback_lock = false;
 
-	if (wilc_wfi_mon != NULL) {
-		PRINT_D(HOSTAPD_DBG, "In Deinit monitor interface\n");
-		PRINT_D(HOSTAPD_DBG, "RTNL is being locked\n");
+	if (wilc_wfi_mon) {
 		if (rtnl_is_locked()) {
 			rtnl_unlock();
 			rollback_lock = true;
 		}
-		PRINT_D(HOSTAPD_DBG, "Unregister netdev\n");
 		unregister_netdev(wilc_wfi_mon);
 
 		if (rollback_lock) {
@@ -384,5 +313,4 @@
 		wilc_wfi_mon = NULL;
 	}
 	return 0;
-
 }
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 54fe9d7..bfa754b 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,5 +1,4 @@
 #include "wilc_wfi_cfgoperations.h"
-#include "linux_wlan_common.h"
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
 
@@ -13,7 +12,6 @@
 
 #include <linux/kthread.h>
 #include <linux/firmware.h>
-#include <linux/delay.h>
 
 #include <linux/init.h>
 #include <linux/netdevice.h>
@@ -25,7 +23,8 @@
 
 #include <linux/semaphore.h>
 
-static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr);
+static int dev_state_ev_handler(struct notifier_block *this,
+				unsigned long event, void *ptr);
 
 static struct notifier_block g_dev_notifier = {
 	.notifier_call = dev_state_ev_handler
@@ -57,9 +56,10 @@
 
 };
 
-static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr)
+static int dev_state_ev_handler(struct notifier_block *this,
+				unsigned long event, void *ptr)
 {
-	struct in_ifaddr *dev_iface = (struct in_ifaddr *)ptr;
+	struct in_ifaddr *dev_iface = ptr;
 	struct wilc_priv *priv;
 	struct host_if_drv *hif_drv;
 	struct net_device *dev;
@@ -68,66 +68,48 @@
 	u8 null_ip[4] = {0};
 	char wlan_dev_name[5] = "wlan0";
 
-	if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev) {
-		PRINT_D(GENERIC_DBG, "dev_iface = NULL\n");
+	if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev)
 		return NOTIFY_DONE;
-	}
 
 	if (memcmp(dev_iface->ifa_label, "wlan0", 5) &&
-	    memcmp(dev_iface->ifa_label, "p2p0", 4)) {
-		PRINT_D(GENERIC_DBG, "Interface is neither WLAN0 nor P2P0\n");
+	    memcmp(dev_iface->ifa_label, "p2p0", 4))
 		return NOTIFY_DONE;
-	}
 
 	dev  = (struct net_device *)dev_iface->ifa_dev->dev;
-	if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) {
-		PRINT_D(GENERIC_DBG, "No Wireless registerd\n");
+	if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
 		return NOTIFY_DONE;
-	}
-	priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
-	if (!priv) {
-		PRINT_D(GENERIC_DBG, "No Wireless Priv\n");
-		return NOTIFY_DONE;
-	}
-	hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
-	vif = netdev_priv(dev);
-	if (!vif || !hif_drv) {
-		PRINT_D(GENERIC_DBG, "No Wireless Priv\n");
-		return NOTIFY_DONE;
-	}
 
-	PRINT_INFO(GENERIC_DBG, "dev_state_ev_handler +++\n");
+	priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
+	if (!priv)
+		return NOTIFY_DONE;
+
+	hif_drv = (struct host_if_drv *)priv->hif_drv;
+	vif = netdev_priv(dev);
+	if (!vif || !hif_drv)
+		return NOTIFY_DONE;
 
 	switch (event) {
 	case NETDEV_UP:
-		PRINT_D(GENERIC_DBG, "dev_state_ev_handler event=NETDEV_UP %p\n", dev);
-
-		PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Obtained ===============\n\n");
-
 		if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) {
 			hif_drv->IFC_UP = 1;
 			wilc_optaining_ip = false;
 			del_timer(&wilc_during_ip_timer);
-			PRINT_D(GENERIC_DBG, "IP obtained , enable scan\n");
 		}
 
 		if (wilc_enable_ps)
 			wilc_set_power_mgmt(vif, 1, 0);
 
-		PRINT_D(GENERIC_DBG, "[%s] Up IP\n", dev_iface->ifa_label);
+		netdev_dbg(dev, "[%s] Up IP\n", dev_iface->ifa_label);
 
 		ip_addr_buf = (char *)&dev_iface->ifa_address;
-		PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n",
-			ip_addr_buf[0], ip_addr_buf[1],
-			ip_addr_buf[2], ip_addr_buf[3]);
-		wilc_setup_ipaddress(vif, ip_addr_buf, vif->u8IfIdx);
+		netdev_dbg(dev, "IP add=%d:%d:%d:%d\n",
+			   ip_addr_buf[0], ip_addr_buf[1],
+			   ip_addr_buf[2], ip_addr_buf[3]);
+		wilc_setup_ipaddress(vif, ip_addr_buf, vif->idx);
 
 		break;
 
 	case NETDEV_DOWN:
-		PRINT_D(GENERIC_DBG, "dev_state_ev_handler event=NETDEV_DOWN %p\n", dev);
-
-		PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Released ===============\n\n");
 		if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) {
 			hif_drv->IFC_UP = 0;
 			wilc_optaining_ip = false;
@@ -138,21 +120,18 @@
 
 		wilc_resolve_disconnect_aberration(vif);
 
-		PRINT_D(GENERIC_DBG, "[%s] Down IP\n", dev_iface->ifa_label);
+		netdev_dbg(dev, "[%s] Down IP\n", dev_iface->ifa_label);
 
 		ip_addr_buf = null_ip;
-		PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n",
-			ip_addr_buf[0], ip_addr_buf[1],
-			ip_addr_buf[2], ip_addr_buf[3]);
+		netdev_dbg(dev, "IP add=%d:%d:%d:%d\n",
+			   ip_addr_buf[0], ip_addr_buf[1],
+			   ip_addr_buf[2], ip_addr_buf[3]);
 
-		wilc_setup_ipaddress(vif, ip_addr_buf, vif->u8IfIdx);
+		wilc_setup_ipaddress(vif, ip_addr_buf, vif->idx);
 
 		break;
 
 	default:
-		PRINT_INFO(GENERIC_DBG, "dev_state_ev_handler event=default\n");
-		PRINT_INFO(GENERIC_DBG, "[%s] unknown dev event: %lu\n", dev_iface->ifa_label, event);
-
 		break;
 	}
 
@@ -163,14 +142,13 @@
 {
 	struct wilc_vif *vif;
 	struct wilc *wilc;
-	struct net_device *dev = (struct net_device *)user_data;
+	struct net_device *dev = user_data;
 
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
-	PRINT_D(INT_DBG, "Interrupt received UH\n");
 
 	if (wilc->close) {
-		PRINT_ER("Driver is CLOSING: Can't handle UH interrupt\n");
+		netdev_err(dev, "Can't handle UH interrupt\n");
 		return IRQ_HANDLED;
 	}
 	return IRQ_WAKE_THREAD;
@@ -180,16 +158,16 @@
 {
 	struct wilc_vif *vif;
 	struct wilc *wilc;
+	struct net_device *dev = userdata;
 
 	vif = netdev_priv(userdata);
 	wilc = vif->wilc;
 
 	if (wilc->close) {
-		PRINT_ER("Driver is CLOSING: Can't handle BH interrupt\n");
+		netdev_err(dev, "Can't handle BH interrupt\n");
 		return IRQ_HANDLED;
 	}
 
-	PRINT_D(INT_DBG, "Interrupt received BH\n");
 	wilc_handle_isr(wilc);
 
 	return IRQ_HANDLED;
@@ -209,7 +187,7 @@
 		wl->dev_irq_num = gpio_to_irq(wl->gpio);
 	} else {
 		ret = -1;
-		PRINT_ER("could not obtain gpio for WILC_INTR\n");
+		netdev_err(dev, "could not obtain gpio for WILC_INTR\n");
 	}
 
 	if (ret != -1 && request_threaded_irq(wl->dev_irq_num,
@@ -217,12 +195,13 @@
 					      isr_bh_routine,
 					      IRQF_TRIGGER_LOW | IRQF_ONESHOT,
 					      "WILC_IRQ", dev) < 0) {
-		PRINT_ER("Failed to request IRQ for GPIO: %d\n", wl->gpio);
+		netdev_err(dev, "Failed to request IRQ GPIO: %d\n", wl->gpio);
 		gpio_free(wl->gpio);
 		ret = -1;
 	} else {
-		PRINT_D(INIT_DBG, "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n",
-			wl->dev_irq_num, wl->gpio);
+		netdev_dbg(dev,
+			   "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n",
+			   wl->dev_irq_num, wl->gpio);
 	}
 
 	return ret;
@@ -243,22 +222,14 @@
 	}
 }
 
-void wilc_dbg(u8 *buff)
-{
-	PRINT_D(INIT_DBG, "%d\n", *buff);
-}
-
 int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout)
 {
 	/* FIXME: replace with mutex_lock or wait_for_completion */
 	int error = -1;
 
-	PRINT_D(LOCK_DBG, "Locking %p\n", vp);
 	if (vp)
-		error = down_timeout((struct semaphore *)vp,
+		error = down_timeout(vp,
 				     msecs_to_jiffies(timeout));
-	else
-		PRINT_ER("Failed, mutex is NULL\n");
 	return error;
 }
 
@@ -275,8 +246,6 @@
 		} else {
 			wilc->mac_status = status;
 		}
-	} else if (flag == WILC_MAC_INDICATE_SCAN) {
-		PRINT_D(GENERIC_DBG, "Scanning ...\n");
 	}
 }
 
@@ -288,26 +257,19 @@
 	bssid = mac_header + 10;
 	bssid1 = mac_header + 4;
 
-	for (i = 0; i < wilc->vif_num; i++)
-		if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN) ||
-		    !memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
-			return wilc->vif[i]->ndev;
+	for (i = 0; i < wilc->vif_num; i++) {
+		if (wilc->vif[i]->mode == STATION_MODE)
+			if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
+				return wilc->vif[i]->ndev;
+		if (wilc->vif[i]->mode == AP_MODE)
+			if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN))
+				return wilc->vif[i]->ndev;
+	}
 
-	PRINT_INFO(INIT_DBG, "Invalide handle\n");
-	for (i = 0; i < 25; i++)
-		PRINT_D(INIT_DBG, "%02x ", mac_header[i]);
-	bssid = mac_header + 18;
-	bssid1 = mac_header + 12;
-	for (i = 0; i < wilc->vif_num; i++)
-		if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN) ||
-		    !memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
-			return wilc->vif[i]->ndev;
-
-	PRINT_INFO(INIT_DBG, "\n");
 	return NULL;
 }
 
-int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid)
+int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
 {
 	int i = 0;
 	int ret = -1;
@@ -320,6 +282,7 @@
 	for (i = 0; i < wilc->vif_num; i++)
 		if (wilc->vif[i]->ndev == wilc_netdev) {
 			memcpy(wilc->vif[i]->bssid, bssid, 6);
+			wilc->vif[i]->mode = mode;
 			ret = 0;
 			break;
 		}
@@ -362,28 +325,21 @@
 
 	up(&wl->txq_thread_started);
 	while (1) {
-		PRINT_D(TX_DBG, "txq_task Taking a nap :)\n");
 		down(&wl->txq_event);
-		PRINT_D(TX_DBG, "txq_task Who waked me up :$\n");
 
 		if (wl->close) {
 			up(&wl->txq_thread_started);
 
 			while (!kthread_should_stop())
 				schedule();
-
-			PRINT_D(TX_DBG, "TX thread stopped\n");
 			break;
 		}
-		PRINT_D(TX_DBG, "txq_task handle the sending packet and let me go to sleep.\n");
 #if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
 		ret = wilc_wlan_handle_txq(dev, &txq_count);
 #else
 		do {
 			ret = wilc_wlan_handle_txq(dev, &txq_count);
 			if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
-				PRINT_D(TX_DBG, "Waking up queue\n");
-
 				if (netif_queue_stopped(wl->vif[0]->ndev))
 					netif_wake_queue(wl->vif[0]->ndev);
 				if (netif_queue_stopped(wl->vif[1]->ndev))
@@ -391,9 +347,6 @@
 			}
 
 			if (ret == WILC_TX_ERR_NO_BUF) {
-				do {
-					msleep(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight);
-				} while (0);
 				backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
 				if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
 					backoff_weight = TX_BACKOFF_WEIGHT_MAX;
@@ -410,43 +363,31 @@
 	return 0;
 }
 
-void wilc_rx_complete(struct wilc *nic)
-{
-	PRINT_D(RX_DBG, "RX completed\n");
-}
-
 int wilc_wlan_get_firmware(struct net_device *dev)
 {
 	struct wilc_vif *vif;
 	struct wilc *wilc;
-	int ret = 0;
+	int chip_id, ret = 0;
 	const struct firmware *wilc_firmware;
 	char *firmware;
 
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
 
-	if (vif->iftype == AP_MODE) {
-		firmware = AP_FIRMWARE;
-	} else if (vif->iftype == STATION_MODE) {
-		firmware = STA_FIRMWARE;
-	} else {
-		PRINT_D(INIT_DBG, "Get P2P_CONCURRENCY_FIRMWARE\n");
-		firmware = P2P_CONCURRENCY_FIRMWARE;
-	}
+	chip_id = wilc_get_chipid(wilc, false);
 
-	if (!vif) {
-		PRINT_ER("vif is NULL\n");
-		goto _fail_;
-	}
+	if (chip_id < 0x1003a0)
+		firmware = FIRMWARE_1002;
+	else
+		firmware = FIRMWARE_1003;
 
-	if (!(&vif->ndev->dev)) {
-		PRINT_ER("&vif->ndev->dev  is NULL\n");
+	netdev_info(dev, "loading firmware %s\n", firmware);
+
+	if (!(&vif->ndev->dev))
 		goto _fail_;
-	}
 
 	if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
-		PRINT_ER("%s - firmare not available\n", firmware);
+		netdev_err(dev, "%s - firmare not available\n", firmware);
 		ret = -1;
 		goto _fail_;
 	}
@@ -466,20 +407,13 @@
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
 
-	PRINT_D(INIT_DBG, "Starting Firmware ...\n");
 	ret = wilc_wlan_start(wilc);
-	if (ret < 0) {
-		PRINT_ER("Failed to start Firmware\n");
+	if (ret < 0)
 		return ret;
-	}
 
-	PRINT_D(INIT_DBG, "Waiting for Firmware to get ready ...\n");
 	ret = wilc_lock_timeout(wilc, &wilc->sync_event, 5000);
-	if (ret) {
-		PRINT_D(INIT_DBG, "Firmware start timed out");
+	if (ret)
 		return ret;
-	}
-	PRINT_D(INIT_DBG, "Firmware successfully started\n");
 
 	return 0;
 }
@@ -494,128 +428,123 @@
 	wilc = vif->wilc;
 
 	if (!wilc->firmware) {
-		PRINT_ER("Firmware buffer is NULL\n");
+		netdev_err(dev, "Firmware buffer is NULL\n");
 		return -ENOBUFS;
 	}
-	PRINT_D(INIT_DBG, "Downloading Firmware ...\n");
+
 	ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data,
 					  wilc->firmware->size);
 	if (ret < 0)
 		return ret;
 
-	PRINT_D(INIT_DBG, "Freeing FW buffer ...\n");
-	PRINT_D(INIT_DBG, "Releasing firmware\n");
 	release_firmware(wilc->firmware);
 	wilc->firmware = NULL;
 
-	PRINT_D(INIT_DBG, "Download Succeeded\n");
+	netdev_dbg(dev, "Download Succeeded\n");
 
 	return 0;
 }
 
 static int linux_wlan_init_test_config(struct net_device *dev,
-				       struct wilc *wilc)
+				       struct wilc_vif *vif)
 {
 	unsigned char c_val[64];
 	unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff};
-
+	struct wilc *wilc = vif->wilc;
 	struct wilc_priv *priv;
 	struct host_if_drv *hif_drv;
 
-	PRINT_D(TX_DBG, "Start configuring Firmware\n");
-	get_random_bytes(&mac_add[5], 1);
-	get_random_bytes(&mac_add[4], 1);
+	netdev_dbg(dev, "Start configuring Firmware\n");
 	priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
-	hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
-	PRINT_D(INIT_DBG, "Host = %p\n", hif_drv);
+	hif_drv = (struct host_if_drv *)priv->hif_drv;
+	netdev_dbg(dev, "Host = %p\n", hif_drv);
+	wilc_get_mac_address(vif, mac_add);
 
-	PRINT_D(INIT_DBG, "MAC address is : %02x-%02x-%02x-%02x-%02x-%02x\n",
-		mac_add[0], mac_add[1], mac_add[2],
-		mac_add[3], mac_add[4], mac_add[5]);
-	wilc_get_chipid(wilc, 0);
+	netdev_dbg(dev, "MAC address is : %pM\n", mac_add);
+	wilc_get_chipid(wilc, false);
 
 	*(int *)c_val = 1;
 
-	if (!wilc_wlan_cfg_set(wilc, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 0;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = INFRASTRUCTURE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = RATE_AUTO;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = G_MIXED_11B_2_MODE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11G_OPERATING_MODE, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = 1;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = G_SHORT_PREAMBLE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_PREAMBLE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = AUTO_PROT;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = ACTIVE_SCAN;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_SCAN_TYPE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = SITE_SURVEY_OFF;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_SITE_SURVEY, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, c_val, 1, 0, 0))
 		goto _fail_;
 
 	*((int *)c_val) = 0xffff;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0))
 		goto _fail_;
 
 	*((int *)c_val) = 2346;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 0;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_BCAST_SSID, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 1;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_QOS_ENABLE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = NO_POWERSAVE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = NO_SECURITY; /* NO_ENCRYPT, 0x79 */
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11I_MODE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = OPEN_SYSTEM;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_AUTH_TYPE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	strcpy(c_val, "123456790abcdef1234567890");
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_WEP_KEY_VALUE, c_val,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_WEP_KEY_VALUE, c_val,
 			       (strlen(c_val) + 1), 0, 0))
 		goto _fail_;
 
 	strcpy(c_val, "12345678");
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0,
 			       0))
 		goto _fail_;
 
 	strcpy(c_val, "password");
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1),
+	if (!wilc_wlan_cfg_set(vif, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1),
 			       0, 0))
 		goto _fail_;
 
@@ -623,106 +552,106 @@
 	c_val[1] = 168;
 	c_val[2] = 1;
 	c_val[3] = 112;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 3;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 3;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = NORMAL_ACK;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_ACK_POLICY, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 0;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1,
 			       0, 0))
 		goto _fail_;
 
 	c_val[0] = 48;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = 28;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	*((int *)c_val) = 100;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0))
 		goto _fail_;
 
 	c_val[0] = REKEY_DISABLE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_POLICY, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, c_val, 1, 0, 0))
 		goto _fail_;
 
 	*((int *)c_val) = 84600;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0))
 		goto _fail_;
 
 	*((int *)c_val) = 500;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = 1;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = G_SELF_CTS_PROT;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 1;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_ENABLE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = HT_MIXED_MODE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_OPERATING_MODE, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = 1;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	memcpy(c_val, mac_add, 6);
 
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
 		goto _fail_;
 
 	c_val[0] = DETECT_PROTECT_REPORT;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
 			       0, 0))
 		goto _fail_;
 
 	c_val[0] = RTS_CTS_NONHT_PROT;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 0;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = MIMO_MODE;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0))
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0))
 		goto _fail_;
 
 	c_val[0] = 7;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0,
 			       0))
 		goto _fail_;
 
 	c_val[0] = 1;
-	if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1,
+	if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1,
 			       1, 1))
 		goto _fail_;
 
@@ -748,7 +677,6 @@
 	if (wl->initialized)	{
 		netdev_info(dev, "Deinitializing wilc1000...\n");
 
-		PRINT_D(INIT_DBG, "Disabling IRQ\n");
 		if (!wl->dev_irq_num &&
 		    wl->hif_func->disable_interrupt) {
 			mutex_lock(&wl->hif_cs);
@@ -758,37 +686,26 @@
 		if (&wl->txq_event)
 			up(&wl->txq_event);
 
-		PRINT_D(INIT_DBG, "Deinitializing Threads\n");
 		wlan_deinitialize_threads(dev);
-
-		PRINT_D(INIT_DBG, "Deinitializing IRQ\n");
 		deinit_irq(dev);
 
 		wilc_wlan_stop(wl);
-
-		PRINT_D(INIT_DBG, "Deinitializing WILC Wlan\n");
 		wilc_wlan_cleanup(dev);
 #if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31)
 		if (!wl->dev_irq_num &&
 		    wl->hif_func->disable_interrupt) {
-
-			PRINT_D(INIT_DBG, "Disabling IRQ 2\n");
-
 			mutex_lock(&wl->hif_cs);
 			wl->hif_func->disable_interrupt(wl);
 			mutex_unlock(&wl->hif_cs);
 		}
 #endif
-
-		PRINT_D(INIT_DBG, "Deinitializing Locks\n");
 		wlan_deinit_locks(dev);
 
 		wl->initialized = false;
 
-		PRINT_D(INIT_DBG, "wilc1000 deinitialization Done\n");
-
+		netdev_dbg(dev, "wilc1000 deinitialization Done\n");
 	} else {
-		PRINT_D(INIT_DBG, "wilc1000 is not initialized\n");
+		netdev_dbg(dev, "wilc1000 is not initialized\n");
 	}
 }
 
@@ -800,8 +717,6 @@
 	vif = netdev_priv(dev);
 	wl = vif->wilc;
 
-	PRINT_D(INIT_DBG, "Initializing Locks ...\n");
-
 	mutex_init(&wl->hif_cs);
 	mutex_init(&wl->rxq_cs);
 
@@ -826,8 +741,6 @@
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
 
-	PRINT_D(INIT_DBG, "De-Initializing Locks\n");
-
 	if (&wilc->hif_cs)
 		mutex_destroy(&wilc->hif_cs);
 
@@ -845,12 +758,10 @@
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
 
-	PRINT_D(INIT_DBG, "Initializing Threads ...\n");
-	PRINT_D(INIT_DBG, "Creating kthread for transmission\n");
 	wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
 				     "K_TXQ_TASK");
 	if (!wilc->txq_thread) {
-		PRINT_ER("couldn't create TXQ thread\n");
+		netdev_err(dev, "couldn't create TXQ thread\n");
 		wilc->close = 0;
 		return -ENOBUFS;
 	}
@@ -863,11 +774,11 @@
 {
 	struct wilc_vif *vif;
 	struct wilc *wl;
+
 	vif = netdev_priv(dev);
 	wl = vif->wilc;
 
 	wl->close = 1;
-	PRINT_D(INIT_DBG, "Deinitializing Threads\n");
 
 	if (&wl->txq_event)
 		up(&wl->txq_event);
@@ -891,20 +802,17 @@
 
 		ret = wilc_wlan_init(dev);
 		if (ret < 0) {
-			PRINT_ER("Initializing WILC_Wlan FAILED\n");
 			ret = -EIO;
 			goto _fail_locks_;
 		}
 
 		if (wl->gpio >= 0 && init_irq(dev)) {
-			PRINT_ER("couldn't initialize IRQ\n");
 			ret = -EIO;
 			goto _fail_locks_;
 		}
 
 		ret = wlan_initialize_threads(dev);
 		if (ret < 0) {
-			PRINT_ER("Initializing Threads FAILED\n");
 			ret = -EIO;
 			goto _fail_wilc_wlan_;
 		}
@@ -912,45 +820,41 @@
 		if (!wl->dev_irq_num &&
 		    wl->hif_func->enable_interrupt &&
 		    wl->hif_func->enable_interrupt(wl)) {
-			PRINT_ER("couldn't initialize IRQ\n");
 			ret = -EIO;
 			goto _fail_irq_init_;
 		}
 
 		if (wilc_wlan_get_firmware(dev)) {
-			PRINT_ER("Can't get firmware\n");
 			ret = -EIO;
 			goto _fail_irq_enable_;
 		}
 
 		ret = wilc1000_firmware_download(dev);
 		if (ret < 0) {
-			PRINT_ER("Failed to download firmware\n");
 			ret = -EIO;
 			goto _fail_irq_enable_;
 		}
 
 		ret = linux_wlan_start_firmware(dev);
 		if (ret < 0) {
-			PRINT_ER("Failed to start firmware\n");
 			ret = -EIO;
 			goto _fail_irq_enable_;
 		}
 
-		if (wilc_wlan_cfg_get(wl, 1, WID_FIRMWARE_VERSION, 1, 0)) {
+		if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
 			int size;
-			char Firmware_ver[20];
+			char firmware_ver[20];
 
-			size = wilc_wlan_cfg_get_val(
-					WID_FIRMWARE_VERSION,
-					Firmware_ver, sizeof(Firmware_ver));
-			Firmware_ver[size] = '\0';
-			PRINT_D(INIT_DBG, "***** Firmware Ver = %s  *******\n", Firmware_ver);
+			size = wilc_wlan_cfg_get_val(WID_FIRMWARE_VERSION,
+						     firmware_ver,
+						     sizeof(firmware_ver));
+			firmware_ver[size] = '\0';
+			netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver);
 		}
-		ret = linux_wlan_init_test_config(dev, wl);
+		ret = linux_wlan_init_test_config(dev, vif);
 
 		if (ret < 0) {
-			PRINT_ER("Failed to configure firmware\n");
+			netdev_err(dev, "Failed to configure firmware\n");
 			ret = -EIO;
 			goto _fail_fw_start_;
 		}
@@ -974,9 +878,9 @@
 		wilc_wlan_cleanup(dev);
 _fail_locks_:
 		wlan_deinit_locks(dev);
-		PRINT_ER("WLAN Iinitialization FAILED\n");
+		netdev_err(dev, "WLAN Iinitialization FAILED\n");
 	} else {
-		PRINT_D(INIT_DBG, "wilc1000 already initialized\n");
+		netdev_dbg(dev, "wilc1000 already initialized\n");
 	}
 	return ret;
 }
@@ -1003,7 +907,7 @@
 	vif = netdev_priv(ndev);
 	wl = vif->wilc;
 
-	if (!wl|| !wl->dev) {
+	if (!wl || !wl->dev) {
 		netdev_err(ndev, "wilc1000: SPI device not ready\n");
 		return -ENODEV;
 	}
@@ -1011,31 +915,45 @@
 	vif = netdev_priv(ndev);
 	wilc = vif->wilc;
 	priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
-	PRINT_D(INIT_DBG, "MAC OPEN[%p]\n", ndev);
+	netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
 
 	ret = wilc_init_host_int(ndev);
-	if (ret < 0) {
-		PRINT_ER("Failed to initialize host interface\n");
-
+	if (ret < 0)
 		return ret;
-	}
 
-	PRINT_D(INIT_DBG, "*** re-init ***\n");
 	ret = wilc1000_wlan_init(ndev, vif);
 	if (ret < 0) {
-		PRINT_ER("Failed to initialize wilc1000\n");
 		wilc_deinit_host_int(ndev);
 		return ret;
 	}
 
-	wilc_set_machw_change_vir_if(ndev, false);
-
-	wilc_get_mac_address(vif, mac_add);
-	PRINT_D(INIT_DBG, "Mac address: %pM\n", mac_add);
-
 	for (i = 0; i < wl->vif_num; i++) {
 		if (ndev == wl->vif[i]->ndev) {
+			if (vif->iftype == AP_MODE) {
+				wilc_set_wfi_drv_handler(vif,
+							 wilc_get_vif_idx(vif),
+							 0);
+			} else if (!wilc_wlan_get_num_conn_ifcs(wilc)) {
+				wilc_set_wfi_drv_handler(vif,
+							 wilc_get_vif_idx(vif),
+							 wilc->open_ifcs);
+			} else {
+				if (memcmp(wilc->vif[i ^ 1]->bssid,
+					   wilc->vif[i ^ 1]->src_addr, 6))
+					wilc_set_wfi_drv_handler(vif,
+							 wilc_get_vif_idx(vif),
+							 0);
+				else
+					wilc_set_wfi_drv_handler(vif,
+							 wilc_get_vif_idx(vif),
+							 1);
+			}
+			wilc_set_operation_mode(vif, vif->iftype);
+
+			wilc_get_mac_address(vif, mac_add);
+			netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
 			memcpy(wl->vif[i]->src_addr, mac_add, ETH_ALEN);
+
 			break;
 		}
 	}
@@ -1043,7 +961,7 @@
 	memcpy(ndev->dev_addr, wl->vif[i]->src_addr, ETH_ALEN);
 
 	if (!is_valid_ether_addr(ndev->dev_addr)) {
-		PRINT_ER("Error: Wrong MAC address\n");
+		netdev_err(ndev, "Wrong MAC address\n");
 		wilc_deinit_host_int(ndev);
 		wilc1000_wlan_deinit(ndev);
 		return -EINVAL;
@@ -1065,7 +983,7 @@
 
 static struct net_device_stats *mac_stats(struct net_device *dev)
 {
-	struct wilc_vif *vif= netdev_priv(dev);
+	struct wilc_vif *vif = netdev_priv(dev);
 
 	return &vif->netstats;
 }
@@ -1080,57 +998,41 @@
 
 	priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
 	vif = netdev_priv(dev);
-	hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
+	hif_drv = (struct host_if_drv *)priv->hif_drv;
 
-	if (!dev)
+	if (dev->flags & IFF_PROMISC)
 		return;
 
-	PRINT_D(INIT_DBG, "Setting Multicast List with count = %d.\n",
-		dev->mc.count);
-
-	if (dev->flags & IFF_PROMISC) {
-		PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets\n");
-		return;
-	}
-
 	if ((dev->flags & IFF_ALLMULTI) ||
 	    (dev->mc.count) > WILC_MULTICAST_TABLE_SIZE) {
-		PRINT_D(INIT_DBG, "Disable multicast filter, retrive all multicast packets\n");
 		wilc_setup_multicast_filter(vif, false, 0);
 		return;
 	}
 
 	if ((dev->mc.count) == 0) {
-		PRINT_D(INIT_DBG, "Enable multicast filter, retrive directed packets only.\n");
 		wilc_setup_multicast_filter(vif, true, 0);
 		return;
 	}
 
 	netdev_for_each_mc_addr(ha, dev) {
 		memcpy(wilc_multicast_mac_addr_list[i], ha->addr, ETH_ALEN);
-		PRINT_D(INIT_DBG, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i,
-			wilc_multicast_mac_addr_list[i][0],
-			wilc_multicast_mac_addr_list[i][1],
-			wilc_multicast_mac_addr_list[i][2],
-			wilc_multicast_mac_addr_list[i][3],
-			wilc_multicast_mac_addr_list[i][4],
-			wilc_multicast_mac_addr_list[i][5]);
+		netdev_dbg(dev, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i,
+			   wilc_multicast_mac_addr_list[i][0],
+			   wilc_multicast_mac_addr_list[i][1],
+			   wilc_multicast_mac_addr_list[i][2],
+			   wilc_multicast_mac_addr_list[i][3],
+			   wilc_multicast_mac_addr_list[i][4],
+			   wilc_multicast_mac_addr_list[i][5]);
 		i++;
 	}
 
 	wilc_setup_multicast_filter(vif, true, (dev->mc.count));
-
-	return;
 }
 
 static void linux_wlan_tx_complete(void *priv, int status)
 {
-	struct tx_complete_data *pv_data = (struct tx_complete_data *)priv;
+	struct tx_complete_data *pv_data = priv;
 
-	if (status == 1)
-		PRINT_D(TX_DBG, "Packet sent successfully - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
-	else
-		PRINT_D(TX_DBG, "Couldn't send packet - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
 	dev_kfree_skb(pv_data->skb);
 	kfree(pv_data);
 }
@@ -1148,16 +1050,13 @@
 	vif = netdev_priv(ndev);
 	wilc = vif->wilc;
 
-	PRINT_D(TX_DBG, "Sending packet just received from TCP/IP\n");
-
 	if (skb->dev != ndev) {
-		PRINT_ER("Packet not destined to this device\n");
+		netdev_err(ndev, "Packet not destined to this device\n");
 		return 0;
 	}
 
 	tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
 	if (!tx_data) {
-		PRINT_ER("Failed to allocate memory for tx_data structure\n");
 		dev_kfree_skb(skb);
 		netif_wake_queue(ndev);
 		return 0;
@@ -1169,21 +1068,19 @@
 
 	eth_h = (struct ethhdr *)(skb->data);
 	if (eth_h->h_proto == 0x8e88)
-		PRINT_D(INIT_DBG, "EAPOL transmitted\n");
+		netdev_dbg(ndev, "EAPOL transmitted\n");
 
 	ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
 
 	udp_buf = (char *)ih + sizeof(struct iphdr);
 	if ((udp_buf[1] == 68 && udp_buf[3] == 67) ||
 	    (udp_buf[1] == 67 && udp_buf[3] == 68))
-		PRINT_D(GENERIC_DBG, "DHCP Message transmitted, type:%x %x %x\n",
-			udp_buf[248], udp_buf[249], udp_buf[250]);
+		netdev_dbg(ndev, "DHCP Message transmitted, type:%x %x %x\n",
+			   udp_buf[248], udp_buf[249], udp_buf[250]);
 
-	PRINT_D(TX_DBG, "Sending packet - Size = %d - Address = %p - SKB = %p\n", tx_data->size, tx_data->buff, tx_data->skb);
-	PRINT_D(TX_DBG, "Adding tx packet to TX Queue\n");
 	vif->netstats.tx_packets++;
 	vif->netstats.tx_bytes += tx_data->size;
-	tx_data->pBssid = wilc->vif[vif->u8IfIdx]->bssid;
+	tx_data->bssid = wilc->vif[vif->idx]->bssid;
 	queue_count = wilc_wlan_txq_add_net_pkt(ndev, (void *)tx_data,
 						tx_data->buff, tx_data->size,
 						linux_wlan_tx_complete);
@@ -1206,39 +1103,29 @@
 	vif = netdev_priv(ndev);
 
 	if (!vif || !vif->ndev || !vif->ndev->ieee80211_ptr ||
-	    !vif->ndev->ieee80211_ptr->wiphy) {
-		PRINT_ER("vif = NULL\n");
+	    !vif->ndev->ieee80211_ptr->wiphy)
 		return 0;
-	}
 
 	priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
 	wl = vif->wilc;
 
-	if (!priv) {
-		PRINT_ER("priv = NULL\n");
+	if (!priv)
 		return 0;
-	}
 
-	hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
+	hif_drv = (struct host_if_drv *)priv->hif_drv;
 
-	PRINT_D(GENERIC_DBG, "Mac close\n");
+	netdev_dbg(ndev, "Mac close\n");
 
-	if (!wl) {
-		PRINT_ER("wl = NULL\n");
+	if (!wl)
 		return 0;
-	}
 
-	if (!hif_drv) {
-		PRINT_ER("hif_drv = NULL\n");
+	if (!hif_drv)
 		return 0;
-	}
 
-	if ((wl->open_ifcs) > 0) {
+	if ((wl->open_ifcs) > 0)
 		wl->open_ifcs--;
-	} else {
-		PRINT_ER("ERROR: MAC close called while number of opened interfaces is zero\n");
+	else
 		return 0;
-	}
 
 	if (vif->ndev) {
 		netif_stop_queue(vif->ndev);
@@ -1247,7 +1134,7 @@
 	}
 
 	if (wl->open_ifcs == 0) {
-		PRINT_D(GENERIC_DBG, "Deinitializing wilc1000\n");
+		netdev_dbg(ndev, "Deinitializing wilc1000\n");
 		wl->close = 1;
 		wilc1000_wlan_deinit(ndev);
 		WILC_WFI_deinit_mon_interface();
@@ -1278,7 +1165,7 @@
 	switch (cmd) {
 	case SIOCSIWPRIV:
 	{
-		struct iwreq *wrq = (struct iwreq *) req;
+		struct iwreq *wrq = (struct iwreq *)req;
 
 		size = wrq->u.data.length;
 
@@ -1291,16 +1178,14 @@
 			if (strncasecmp(buff, "RSSI", length) == 0) {
 				priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
 				ret = wilc_get_rssi(vif, &rssi);
-				if (ret)
-					PRINT_ER("Failed to send get rssi param's message queue ");
-				PRINT_INFO(GENERIC_DBG, "RSSI :%d\n", rssi);
+				netdev_info(ndev, "RSSI :%d\n", rssi);
 
 				rssi += 5;
 
 				snprintf(buff, size, "rssi %d", rssi);
 
 				if (copy_to_user(wrq->u.data.pointer, buff, size)) {
-					PRINT_ER("%s: failed to copy data to user buffer\n", __func__);
+					netdev_err(ndev, "failed to copy\n");
 					ret = -EFAULT;
 					goto done;
 				}
@@ -1311,7 +1196,7 @@
 
 	default:
 	{
-		PRINT_INFO(GENERIC_DBG, "Command - %d - has been received\n", cmd);
+		netdev_info(ndev, "Command - %d - has been received\n", cmd);
 		ret = -EOPNOTSUPP;
 		goto done;
 	}
@@ -1333,6 +1218,9 @@
 	struct net_device *wilc_netdev;
 	struct wilc_vif *vif;
 
+	if (!wilc)
+		return;
+
 	wilc_netdev = get_if_handler(wilc, buff);
 	if (!wilc_netdev)
 		return;
@@ -1345,18 +1233,11 @@
 		buff_to_send = buff;
 
 		skb = dev_alloc_skb(frame_len);
-		if (!skb) {
-			PRINT_ER("Low memory - packet droped\n");
+		if (!skb)
 			return;
-		}
 
-		if (!wilc || !wilc_netdev)
-			PRINT_ER("wilc_netdev in wilc is NULL");
 		skb->dev = wilc_netdev;
 
-		if (!skb->dev)
-			PRINT_ER("skb->dev is NULL\n");
-
 		memcpy(skb_put(skb, frame_len), buff_to_send, frame_len);
 
 		skb->protocol = eth_type_trans(skb, wilc_netdev);
@@ -1364,7 +1245,7 @@
 		vif->netstats.rx_bytes += frame_len;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		stats = netif_rx(skb);
-		PRINT_D(RX_DBG, "netif_rx ret value is: %d\n", stats);
+		netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
 	}
 }
 
@@ -1403,7 +1284,7 @@
 		release_firmware(wilc->firmware);
 
 	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
-		wilc_lock_timeout(wilc, &close_exit_sync, 12 * 1000);
+		wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
 
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++)
 			if (wilc->vif[i]->ndev)
@@ -1424,7 +1305,7 @@
 int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
 		     int gpio, const struct wilc_hif_func *ops)
 {
-	int i;
+	int i, ret;
 	struct wilc_vif *vif;
 	struct net_device *ndev;
 	struct wilc *wl;
@@ -1444,10 +1325,8 @@
 
 	for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
 		ndev = alloc_etherdev(sizeof(struct wilc_vif));
-		if (!ndev) {
-			PRINT_ER("Failed to allocate ethernet dev\n");
-			return -1;
-		}
+		if (!ndev)
+			return -ENOMEM;
 
 		vif = netdev_priv(ndev);
 		memset(vif, 0, sizeof(struct wilc_vif));
@@ -1457,7 +1336,7 @@
 		else
 			strcpy(ndev->name, "p2p%d");
 
-		vif->u8IfIdx = wl->vif_num;
+		vif->idx = wl->vif_num;
 		vif->wilc = *wilc;
 		wl->vif[i] = vif;
 		wl->vif[wl->vif_num]->ndev = ndev;
@@ -1466,13 +1345,14 @@
 
 		{
 			struct wireless_dev *wdev;
+
 			wdev = wilc_create_wiphy(ndev, dev);
 
 			if (dev)
 				SET_NETDEV_DEV(ndev, dev);
 
 			if (!wdev) {
-				PRINT_ER("Can't register WILC Wiphy\n");
+				netdev_err(ndev, "Can't register WILC Wiphy\n");
 				return -1;
 			}
 
@@ -1485,11 +1365,9 @@
 			vif->netstats.tx_bytes = 0;
 		}
 
-		if (register_netdev(ndev)) {
-			PRINT_ER("Device couldn't be registered - %s\n",
-				 ndev->name);
-			return -1;
-		}
+		ret = register_netdev(ndev);
+		if (ret)
+			return ret;
 
 		vif->iftype = STATION_MODE;
 		vif->mac_opened = 0;
diff --git a/drivers/staging/wilc1000/linux_wlan_common.h b/drivers/staging/wilc1000/linux_wlan_common.h
deleted file mode 100644
index 5d40f051..0000000
--- a/drivers/staging/wilc1000/linux_wlan_common.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef LINUX_WLAN_COMMON_H
-#define LINUX_WLAN_COMMON_H
-
-enum debug_region {
-	Generic_debug = 0,
-	Hostapd_debug,
-	Hostinf_debug,
-	CFG80211_debug,
-	Coreconfig_debug,
-	Interrupt_debug,
-	TX_debug,
-	RX_debug,
-	Lock_debug,
-	Tcp_enhance,
-	Spin_debug,
-
-	Init_debug,
-	Bus_debug,
-	Mem_debug,
-	Firmware_debug,
-	COMP = 0xFFFFFFFF,
-};
-
-#define GENERIC_DBG             (1 << Generic_debug)
-#define HOSTAPD_DBG             (1 << Hostapd_debug)
-#define HOSTINF_DBG             (1 << Hostinf_debug)
-#define CORECONFIG_DBG          (1 << Coreconfig_debug)
-#define CFG80211_DBG            (1 << CFG80211_debug)
-#define INT_DBG                 (1 << Interrupt_debug)
-#define TX_DBG                  (1 << TX_debug)
-#define RX_DBG                  (1 << RX_debug)
-#define LOCK_DBG                (1 << Lock_debug)
-#define TCP_ENH                 (1 << Tcp_enhance)
-#define SPIN_DEBUG              (1 << Spin_debug)
-#define INIT_DBG                (1 << Init_debug)
-#define BUS_DBG                 (1 << Bus_debug)
-#define MEM_DBG                 (1 << Mem_debug)
-#define FIRM_DBG                (1 << Firmware_debug)
-
-#if defined (WILC_DEBUGFS)
-extern atomic_t WILC_REGION;
-extern atomic_t WILC_DEBUG_LEVEL;
-
-#define DEBUG           BIT(0)
-#define INFO            BIT(1)
-#define WRN             BIT(2)
-#define ERR             BIT(3)
-
-#define PRINT_D(region, ...)						\
-	do {								\
-		if ((atomic_read(&WILC_DEBUG_LEVEL) & DEBUG) &&	\
-		   ((atomic_read(&WILC_REGION)) & (region))) {	\
-			printk("DBG [%s: %d]", __func__, __LINE__);	\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#define PRINT_INFO(region, ...)						\
-	do {								\
-		if ((atomic_read(&WILC_DEBUG_LEVEL) & INFO) &&	\
-		   ((atomic_read(&WILC_REGION)) & (region))) {	\
-			printk("INFO [%s]", __func__);			\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#define PRINT_WRN(region, ...)						\
-	do {								\
-		if ((atomic_read(&WILC_DEBUG_LEVEL) & WRN) &&	\
-		   ((atomic_read(&WILC_REGION)) & (region))) {	\
-			printk("WRN [%s: %d]", __func__, __LINE__);	\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#define PRINT_ER(...)							\
-	do {								\
-		if ((atomic_read(&WILC_DEBUG_LEVEL) & ERR)) {	\
-			printk("ERR [%s: %d]", __func__, __LINE__);	\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#else
-
-#define REGION  (INIT_DBG | GENERIC_DBG | CFG80211_DBG | FIRM_DBG | HOSTAPD_DBG)
-
-#define DEBUG       1
-#define INFO        0
-#define WRN         0
-
-#define PRINT_D(region, ...)						\
-	do {								\
-		if (DEBUG == 1 && ((REGION)&(region))) {		\
-			printk("DBG [%s: %d]", __func__, __LINE__);	\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#define PRINT_INFO(region, ...)						\
-	do {								\
-		if (INFO == 1 && ((REGION)&(region))) {			\
-			printk("INFO [%s]", __func__);			\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#define PRINT_WRN(region, ...)						\
-	do {								\
-		if (WRN == 1 && ((REGION)&(region))) {			\
-			printk("WRN [%s: %d]", __func__, __LINE__);	\
-			printk(__VA_ARGS__);				\
-		}							\
-	} while (0)
-
-#define PRINT_ER(...)							\
-	do {								\
-		printk("ERR [%s: %d]", __func__, __LINE__);		\
-		printk(__VA_ARGS__);					\
-	} while (0)
-
-#endif
-
-#define FN_IN   /* PRINT_D(">>> \n") */
-#define FN_OUT  /* PRINT_D("<<<\n") */
-
-#define LINUX_RX_SIZE	(96 * 1024)
-#define LINUX_TX_SIZE	(64 * 1024)
-
-
-#define WILC_MULTICAST_TABLE_SIZE	8
-
-#if defined (BEAGLE_BOARD)
-	#define SPI_CHANNEL	4
-
-	#if SPI_CHANNEL == 4
-		#define MODALIAS	"wilc_spi4"
-		#define GPIO_NUM	162
-	#else
-		#define MODALIAS	"wilc_spi3"
-		#define GPIO_NUM	133
-	#endif
-#elif defined(PLAT_WMS8304)             /* rachel */
-	#define MODALIAS	"wilc_spi"
-	#define GPIO_NUM	139
-#elif defined (PLAT_RKXXXX)
- #define MODALIAS	"WILC_IRQ"
- #define GPIO_NUM	RK30_PIN3_PD2 /* RK30_PIN3_PA1 */
-/* RK30_PIN3_PD2 */
-/* RK2928_PIN1_PA7 */
-
-#elif defined(CUSTOMER_PLATFORM)
-/*
- TODO : specify MODALIAS name and GPIO number. This is certainly necessary for SPI interface.
- *
- * ex)
- * #define MODALIAS  "WILC_SPI"
- * #define GPIO_NUM  139
- */
-
-#else
-/* base on SAMA5D3_Xplained Board */
-	#define MODALIAS	"WILC_SPI"
-	#define GPIO_NUM	0x44
-#endif
-#endif
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 27c653a..fcbc95d 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -23,11 +23,12 @@
 /*
  * --------------------------------------------------------------------------------
  */
+#define DEBUG           BIT(0)
+#define INFO            BIT(1)
+#define WRN             BIT(2)
+#define ERR             BIT(3)
 
-#define DBG_REGION_ALL	(GENERIC_DBG | HOSTAPD_DBG | HOSTINF_DBG | CORECONFIG_DBG | CFG80211_DBG | INT_DBG | TX_DBG | RX_DBG | LOCK_DBG | INIT_DBG | BUS_DBG | MEM_DBG)
 #define DBG_LEVEL_ALL	(DEBUG | INFO | WRN | ERR)
-atomic_t WILC_REGION = ATOMIC_INIT(INIT_DBG | GENERIC_DBG | CFG80211_DBG | FIRM_DBG | HOSTAPD_DBG);
-EXPORT_SYMBOL_GPL(WILC_REGION);
 atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
 EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
 
@@ -68,48 +69,9 @@
 	atomic_set(&WILC_DEBUG_LEVEL, (int)flag);
 
 	if (flag == 0)
-		printk("Debug-level disabled\n");
+		printk(KERN_INFO "Debug-level disabled\n");
 	else
-		printk("Debug-level enabled\n");
-
-	return count;
-}
-
-static ssize_t wilc_debug_region_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos)
-{
-	char buf[128];
-	int res = 0;
-
-	/* only allow read from start */
-	if (*ppos > 0)
-		return 0;
-
-	res = scnprintf(buf, sizeof(buf), "Debug region: %x\n", atomic_read(&WILC_REGION));
-
-	return simple_read_from_buffer(userbuf, count, ppos, buf, res);
-}
-
-static ssize_t wilc_debug_region_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
-{
-	char buffer[128] = {};
-	int flag;
-
-	if (count > sizeof(buffer))
-		return -EINVAL;
-
-	if (copy_from_user(buffer, buf, count)) {
-		return -EFAULT;
-	}
-
-	flag = buffer[0] - '0';
-
-	if (flag > DBG_REGION_ALL) {
-		printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_REGION));
-		return -EFAULT;
-	}
-
-	atomic_set(&WILC_REGION, (int)flag);
-	printk("new debug-region is %x\n", atomic_read(&WILC_REGION));
+		printk(KERN_INFO "Debug-level enabled\n");
 
 	return count;
 }
@@ -130,12 +92,11 @@
 	const char *name;
 	int perm;
 	unsigned int data;
-	struct file_operations fops;
+	const struct file_operations fops;
 };
 
 static struct wilc_debugfs_info_t debugfs_info[] = {
 	{ "wilc_debug_level",	0666,	(DEBUG | ERR), FOPS(NULL, wilc_debug_level_read, wilc_debug_level_write, NULL), },
-	{ "wilc_debug_region",	0666,	(INIT_DBG | GENERIC_DBG | CFG80211_DBG), FOPS(NULL, wilc_debug_region_read, wilc_debug_region_write, NULL), },
 };
 
 static int __init wilc_debugfs_init(void)
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c
index 098390c..6cb894e 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.c
+++ b/drivers/staging/wilc1000/wilc_msgqueue.c
@@ -1,7 +1,6 @@
 
 #include "wilc_msgqueue.h"
 #include <linux/spinlock.h>
-#include "linux_wlan_common.h"
 #include <linux/errno.h>
 #include <linux/slab.h>
 
@@ -11,13 +10,13 @@
  *  @note		copied from FLO glue implementatuion
  *  @version		1.0
  */
-int wilc_mq_create(WILC_MsgQueueHandle *pHandle)
+int wilc_mq_create(struct message_queue *mq)
 {
-	spin_lock_init(&pHandle->strCriticalSection);
-	sema_init(&pHandle->hSem, 0);
-	pHandle->pstrMessageList = NULL;
-	pHandle->u32ReceiversCount = 0;
-	pHandle->bExiting = false;
+	spin_lock_init(&mq->lock);
+	sema_init(&mq->sem, 0);
+	INIT_LIST_HEAD(&mq->msg_list);
+	mq->recv_count = 0;
+	mq->exiting = false;
 	return 0;
 }
 
@@ -27,21 +26,22 @@
  *  @note		copied from FLO glue implementatuion
  *  @version		1.0
  */
-int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle)
+int wilc_mq_destroy(struct message_queue *mq)
 {
-	pHandle->bExiting = true;
+	struct message *msg;
+
+	mq->exiting = true;
 
 	/* Release any waiting receiver thread. */
-	while (pHandle->u32ReceiversCount > 0) {
-		up(&pHandle->hSem);
-		pHandle->u32ReceiversCount--;
+	while (mq->recv_count > 0) {
+		up(&mq->sem);
+		mq->recv_count--;
 	}
 
-	while (pHandle->pstrMessageList) {
-		Message *pstrMessge = pHandle->pstrMessageList->pstrNext;
-
-		kfree(pHandle->pstrMessageList);
-		pHandle->pstrMessageList = pstrMessge;
+	while (!list_empty(&mq->msg_list)) {
+		msg = list_first_entry(&mq->msg_list, struct message, list);
+		list_del(&msg->list);
+		kfree(msg->buf);
 	}
 
 	return 0;
@@ -53,53 +53,39 @@
  *  @note		copied from FLO glue implementatuion
  *  @version		1.0
  */
-int wilc_mq_send(WILC_MsgQueueHandle *pHandle,
-			     const void *pvSendBuffer, u32 u32SendBufferSize)
+int wilc_mq_send(struct message_queue *mq,
+		 const void *send_buf, u32 send_buf_size)
 {
 	unsigned long flags;
-	Message *pstrMessage = NULL;
+	struct message *new_msg = NULL;
 
-	if ((!pHandle) || (u32SendBufferSize == 0) || (!pvSendBuffer)) {
-		PRINT_ER("pHandle or pvSendBuffer is null\n");
-		return -EFAULT;
-	}
+	if (!mq || (send_buf_size == 0) || !send_buf)
+		return -EINVAL;
 
-	if (pHandle->bExiting) {
-		PRINT_ER("pHandle fail\n");
+	if (mq->exiting)
 		return -EFAULT;
-	}
 
 	/* construct a new message */
-	pstrMessage = kmalloc(sizeof(Message), GFP_ATOMIC);
-	if (!pstrMessage)
+	new_msg = kmalloc(sizeof(*new_msg), GFP_ATOMIC);
+	if (!new_msg)
 		return -ENOMEM;
 
-	pstrMessage->u32Length = u32SendBufferSize;
-	pstrMessage->pstrNext = NULL;
-	pstrMessage->pvBuffer = kmemdup(pvSendBuffer, u32SendBufferSize,
-					GFP_ATOMIC);
-	if (!pstrMessage->pvBuffer) {
-		kfree(pstrMessage);
+	new_msg->len = send_buf_size;
+	INIT_LIST_HEAD(&new_msg->list);
+	new_msg->buf = kmemdup(send_buf, send_buf_size, GFP_ATOMIC);
+	if (!new_msg->buf) {
+		kfree(new_msg);
 		return -ENOMEM;
 	}
 
-	spin_lock_irqsave(&pHandle->strCriticalSection, flags);
+	spin_lock_irqsave(&mq->lock, flags);
 
 	/* add it to the message queue */
-	if (!pHandle->pstrMessageList) {
-		pHandle->pstrMessageList  = pstrMessage;
-	} else {
-		Message *pstrTailMsg = pHandle->pstrMessageList;
+	list_add_tail(&new_msg->list, &mq->msg_list);
 
-		while (pstrTailMsg->pstrNext)
-			pstrTailMsg = pstrTailMsg->pstrNext;
+	spin_unlock_irqrestore(&mq->lock, flags);
 
-		pstrTailMsg->pstrNext = pstrMessage;
-	}
-
-	spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
-
-	up(&pHandle->hSem);
+	up(&mq->sem);
 
 	return 0;
 }
@@ -110,62 +96,49 @@
  *  @note		copied from FLO glue implementatuion
  *  @version		1.0
  */
-int wilc_mq_recv(WILC_MsgQueueHandle *pHandle,
-			     void *pvRecvBuffer, u32 u32RecvBufferSize,
-			     u32 *pu32ReceivedLength)
+int wilc_mq_recv(struct message_queue *mq,
+		 void *recv_buf, u32 recv_buf_size, u32 *recv_len)
 {
-	Message *pstrMessage;
+	struct message *msg;
 	unsigned long flags;
 
-	if ((!pHandle) || (u32RecvBufferSize == 0)
-	    || (!pvRecvBuffer) || (!pu32ReceivedLength)) {
-		PRINT_ER("pHandle or pvRecvBuffer is null\n");
+	if (!mq || (recv_buf_size == 0) || !recv_buf || !recv_len)
 		return -EINVAL;
-	}
 
-	if (pHandle->bExiting) {
-		PRINT_ER("pHandle fail\n");
+	if (mq->exiting)
 		return -EFAULT;
-	}
 
-	spin_lock_irqsave(&pHandle->strCriticalSection, flags);
-	pHandle->u32ReceiversCount++;
-	spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
+	spin_lock_irqsave(&mq->lock, flags);
+	mq->recv_count++;
+	spin_unlock_irqrestore(&mq->lock, flags);
 
-	down(&pHandle->hSem);
+	down(&mq->sem);
+	spin_lock_irqsave(&mq->lock, flags);
 
-	if (pHandle->bExiting) {
-		PRINT_ER("pHandle fail\n");
-		return -EFAULT;
-	}
-
-	spin_lock_irqsave(&pHandle->strCriticalSection, flags);
-
-	pstrMessage = pHandle->pstrMessageList;
-	if (!pstrMessage) {
-		spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
-		PRINT_ER("pstrMessage is null\n");
+	if (list_empty(&mq->msg_list)) {
+		spin_unlock_irqrestore(&mq->lock, flags);
+		up(&mq->sem);
 		return -EFAULT;
 	}
 	/* check buffer size */
-	if (u32RecvBufferSize < pstrMessage->u32Length)	{
-		spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
-		up(&pHandle->hSem);
-		PRINT_ER("u32RecvBufferSize overflow\n");
+	msg = list_first_entry(&mq->msg_list, struct message, list);
+	if (recv_buf_size < msg->len) {
+		spin_unlock_irqrestore(&mq->lock, flags);
+		up(&mq->sem);
 		return -EOVERFLOW;
 	}
 
 	/* consume the message */
-	pHandle->u32ReceiversCount--;
-	memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length);
-	*pu32ReceivedLength = pstrMessage->u32Length;
+	mq->recv_count--;
+	memcpy(recv_buf, msg->buf, msg->len);
+	*recv_len = msg->len;
 
-	pHandle->pstrMessageList = pstrMessage->pstrNext;
+	list_del(&msg->list);
 
-	kfree(pstrMessage->pvBuffer);
-	kfree(pstrMessage);
+	kfree(msg->buf);
+	kfree(msg);
 
-	spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h
index d7e0328..846a484 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.h
+++ b/drivers/staging/wilc1000/wilc_msgqueue.h
@@ -1,94 +1,28 @@
 #ifndef __WILC_MSG_QUEUE_H__
 #define __WILC_MSG_QUEUE_H__
 
-/*!
- *  @file	wilc_msgqueue.h
- *  @brief	Message Queue OS wrapper functionality
- *  @author	syounan
- *  @sa		wilc_oswrapper.h top level OS wrapper file
- *  @date	30 Aug 2010
- *  @version	1.0
- */
-
 #include <linux/semaphore.h>
+#include <linux/list.h>
 
-/* Message Queue type is a structure */
-typedef struct __Message_struct {
-	void *pvBuffer;
-	u32 u32Length;
-	struct __Message_struct *pstrNext;
-} Message;
+struct message {
+	void *buf;
+	u32 len;
+	struct list_head list;
+};
 
-typedef struct __MessageQueue_struct {
-	struct semaphore hSem;
-	spinlock_t strCriticalSection;
-	bool bExiting;
-	u32 u32ReceiversCount;
-	Message *pstrMessageList;
-} WILC_MsgQueueHandle;
+struct message_queue {
+	struct semaphore sem;
+	spinlock_t lock;
+	bool exiting;
+	u32 recv_count;
+	struct list_head msg_list;
+};
 
-/*!
- *  @brief		Creates a new Message queue
- *  @details		Creates a new Message queue, if the feature
- *                              CONFIG_WILC_MSG_QUEUE_IPC_NAME is enabled and pstrAttrs->pcName
- *                              is not Null, then this message queue can be used for IPC with
- *                              any other message queue having the same name in the system
- *  @param[in,out]	pHandle handle to the message queue object
- *  @param[in]	pstrAttrs Optional attributes, NULL for default
- *  @return		Error code indicating success/failure
- *  @author		syounan
- *  @date		30 Aug 2010
- *  @version		1.0
- */
-int wilc_mq_create(WILC_MsgQueueHandle *pHandle);
-
-/*!
- *  @brief		Sends a message
- *  @details		Sends a message, this API will block until the message is
- *                              actually sent or until it is timedout (as long as the feature
- *                              CONFIG_WILC_MSG_QUEUE_TIMEOUT is enabled and pstrAttrs->u32Timeout
- *                              is not set to WILC_OS_INFINITY), zero timeout is a valid value
- *  @param[in]	pHandle handle to the message queue object
- *  @param[in]	pvSendBuffer pointer to the data to send
- *  @param[in]	u32SendBufferSize the size of the data to send
- *  @param[in]	pstrAttrs Optional attributes, NULL for default
- *  @return		Error code indicating success/failure
- *  @author		syounan
- *  @date		30 Aug 2010
- *  @version		1.0
- */
-int wilc_mq_send(WILC_MsgQueueHandle *pHandle,
-			     const void *pvSendBuffer, u32 u32SendBufferSize);
-
-/*!
- *  @brief		Receives a message
- *  @details		Receives a message, this API will block until a message is
- *                              received or until it is timedout (as long as the feature
- *                              CONFIG_WILC_MSG_QUEUE_TIMEOUT is enabled and pstrAttrs->u32Timeout
- *                              is not set to WILC_OS_INFINITY), zero timeout is a valid value
- *  @param[in]	pHandle handle to the message queue object
- *  @param[out]	pvRecvBuffer pointer to a buffer to fill with the received message
- *  @param[in]	u32RecvBufferSize the size of the receive buffer
- *  @param[out]	pu32ReceivedLength the length of received data
- *  @param[in]	pstrAttrs Optional attributes, NULL for default
- *  @return		Error code indicating success/failure
- *  @author		syounan
- *  @date		30 Aug 2010
- *  @version		1.0
- */
-int wilc_mq_recv(WILC_MsgQueueHandle *pHandle,
-			     void *pvRecvBuffer, u32 u32RecvBufferSize,
-			     u32 *pu32ReceivedLength);
-
-/*!
- *  @brief		Destroys an existing  Message queue
- *  @param[in]	pHandle handle to the message queue object
- *  @param[in]	pstrAttrs Optional attributes, NULL for default
- *  @return		Error code indicating success/failure
- *  @author		syounan
- *  @date		30 Aug 2010
- *  @version		1.0
- */
-int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle);
+int wilc_mq_create(struct message_queue *mq);
+int wilc_mq_send(struct message_queue *mq,
+		 const void *send_buf, u32 send_buf_size);
+int wilc_mq_recv(struct message_queue *mq,
+		 void *recv_buf, u32 recv_buf_size, u32 *recv_len);
+int wilc_mq_destroy(struct message_queue *mq);
 
 #endif
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index e961b50..029bd09 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -30,18 +30,19 @@
 
 #define WILC_SDIO_BLOCK_SIZE 512
 
-typedef struct {
+struct wilc_sdio {
 	bool irq_gpio;
 	u32 block_size;
 	int nint;
 #define MAX_NUN_INT_THRPT_ENH2 (5) /* Max num interrupts allowed in registers 0xf7, 0xf8 */
 	int has_thrpt_enh3;
-} wilc_sdio_t;
+};
 
-static wilc_sdio_t g_sdio;
+static struct wilc_sdio g_sdio;
 
 static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
 static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
+static int sdio_init(struct wilc *wilc, bool resume);
 
 static void wilc_sdio_interrupt(struct sdio_func *func)
 {
@@ -142,11 +143,82 @@
 	wilc_netdev_cleanup(sdio_get_drvdata(func));
 }
 
+static int sdio_reset(struct wilc *wilc)
+{
+	sdio_cmd52_t cmd;
+	int ret;
+	struct sdio_func *func = dev_to_sdio_func(wilc->dev);
+
+	cmd.read_write = 1;
+	cmd.function = 0;
+	cmd.raw = 0;
+	cmd.address = 0x6;
+	cmd.data = 0x8;
+	ret = wilc_sdio_cmd52(wilc, &cmd);
+	if (ret) {
+		dev_err(&func->dev, "Fail cmd 52, reset cmd ...\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int wilc_sdio_suspend(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	struct wilc *wilc = sdio_get_drvdata(func);
+	int ret;
+
+	dev_info(dev, "sdio suspend\n");
+	chip_wakeup(wilc);
+
+	if (!wilc->suspend_event) {
+		wilc_chip_sleep_manually(wilc);
+	} else {
+		host_sleep_notify(wilc);
+		chip_allow_sleep(wilc);
+	}
+
+	ret = sdio_reset(wilc);
+	if (ret) {
+		dev_err(&func->dev, "Fail reset sdio\n");
+		return ret;
+	}
+	sdio_claim_host(func);
+
+	return 0;
+}
+
+static int wilc_sdio_resume(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	struct wilc *wilc = sdio_get_drvdata(func);
+
+	dev_info(dev, "sdio resume\n");
+	sdio_release_host(func);
+	chip_wakeup(wilc);
+	sdio_init(wilc, true);
+
+	if (wilc->suspend_event)
+		host_wakeup_notify(wilc);
+
+	chip_allow_sleep(wilc);
+
+	return 0;
+}
+
+static const struct dev_pm_ops wilc_sdio_pm_ops = {
+	.suspend = wilc_sdio_suspend,
+	.resume = wilc_sdio_resume,
+};
+
 static struct sdio_driver wilc1000_sdio_driver = {
 	.name		= SDIO_MODALIAS,
 	.id_table	= wilc_sdio_ids,
 	.probe		= linux_sdio_probe,
 	.remove		= linux_sdio_remove,
+	.drv = {
+		.pm = &wilc_sdio_pm_ops,
+	}
 };
 module_driver(wilc1000_sdio_driver,
 	      sdio_register_driver,
@@ -185,11 +257,6 @@
 	dev_info(&func->dev, "wilc_sdio_disable_interrupt OUT\n");
 }
 
-static int wilc_sdio_init(void)
-{
-	return 1;
-}
-
 /********************************************
  *
  *      Function 0
@@ -600,22 +667,16 @@
 	return 1;
 }
 
-static int sdio_init(struct wilc *wilc)
+static int sdio_init(struct wilc *wilc, bool resume)
 {
 	struct sdio_func *func = dev_to_sdio_func(wilc->dev);
 	sdio_cmd52_t cmd;
 	int loop, ret;
 	u32 chipid;
 
-	memset(&g_sdio, 0, sizeof(wilc_sdio_t));
-
-	g_sdio.irq_gpio = (wilc->dev_irq_num);
-
-	if (!wilc_sdio_init()) {
-		dev_err(&func->dev, "Failed io init bus...\n");
-		return 0;
-	} else {
-		return 0;
+	if (!resume) {
+		memset(&g_sdio, 0, sizeof(struct wilc_sdio));
+		g_sdio.irq_gpio = wilc->dev_irq_num;
 	}
 
 	/**
@@ -706,16 +767,19 @@
 	/**
 	 *      make sure can read back chip id correctly
 	 **/
-	if (!sdio_read_reg(wilc, 0x1000, &chipid)) {
-		dev_err(&func->dev, "Fail cmd read chip id...\n");
-		goto _fail_;
+	if (!resume) {
+		if (!sdio_read_reg(wilc, 0x1000, &chipid)) {
+			dev_err(&func->dev, "Fail cmd read chip id...\n");
+			goto _fail_;
+		}
+		dev_err(&func->dev, "chipid (%08x)\n", chipid);
+		if ((chipid & 0xfff) > 0x2a0)
+			g_sdio.has_thrpt_enh3 = 1;
+		else
+			g_sdio.has_thrpt_enh3 = 0;
+		dev_info(&func->dev, "has_thrpt_enh3 = %d...\n",
+			 g_sdio.has_thrpt_enh3);
 	}
-	dev_err(&func->dev, "chipid (%08x)\n", chipid);
-	if ((chipid & 0xfff) > 0x2a0)
-		g_sdio.has_thrpt_enh3 = 1;
-	else
-		g_sdio.has_thrpt_enh3 = 0;
-	dev_info(&func->dev, "has_thrpt_enh3 = %d...\n", g_sdio.has_thrpt_enh3);
 
 	return 1;
 
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 86de50c..d41b8b6 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -18,19 +18,18 @@
 #include <linux/spi/spi.h>
 #include <linux/of_gpio.h>
 
-#include "linux_wlan_common.h"
 #include <linux/string.h>
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
 #include "wilc_wfi_netdevice.h"
 
-typedef struct {
+struct wilc_spi {
 	int crc_off;
 	int nint;
 	int has_thrpt_enh;
-} wilc_spi_t;
+};
 
-static wilc_spi_t g_spi;
+static struct wilc_spi g_spi;
 
 static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
 static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
@@ -120,8 +119,6 @@
 
 #define USE_SPI_DMA     0
 
-static const struct wilc1000_ops wilc1000_spi_ops;
-
 static int wilc_bus_probe(struct spi_device *spi)
 {
 	int ret, gpio;
@@ -153,7 +150,7 @@
 };
 MODULE_DEVICE_TABLE(of, wilc1000_of_match);
 
-struct spi_driver wilc1000_spi_driver = {
+static struct spi_driver wilc1000_spi_driver = {
 	.driver = {
 		.name = MODALIAS,
 		.of_match_table = wilc1000_of_match,
@@ -382,9 +379,8 @@
 		break;
 	}
 
-	if (result != N_OK) {
+	if (result != N_OK)
 		return result;
-	}
 
 	if (!g_spi.crc_off)
 		wb[len - 1] = (crc7(0x7f, (const u8 *)&wb[0], len - 1)) << 1;
@@ -421,9 +417,8 @@
 		return result;
 	}
 	/* zero spi write buffers. */
-	for (wix = len; wix < len2; wix++) {
+	for (wix = len; wix < len2; wix++)
 		wb[wix] = 0;
-	}
 	rix = len;
 
 	if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
@@ -447,8 +442,9 @@
 	/* } while(&rptr[1] <= &rb[len2]); */
 
 	if (rsp != cmd) {
-		dev_err(&spi->dev, "Failed cmd response, cmd (%02x)"
-			 ", resp (%02x)\n", cmd, rsp);
+		dev_err(&spi->dev,
+			"Failed cmd response, cmd (%02x), resp (%02x)\n",
+			cmd, rsp);
 		result = N_FAIL;
 		return result;
 	}
@@ -516,7 +512,7 @@
 					crc[0] = rb[rix++];
 					crc[1] = rb[rix++];
 				} else {
-					dev_err(&spi->dev,"buffer overrun when reading crc.\n");
+					dev_err(&spi->dev, "buffer overrun when reading crc.\n");
 					result = N_FAIL;
 					return result;
 				}
@@ -525,9 +521,8 @@
 			int ix;
 
 			/* some data may be read in response to dummy bytes. */
-			for (ix = 0; (rix < len2) && (ix < sz); ) {
+			for (ix = 0; (rix < len2) && (ix < sz); )
 				b[ix++] = rb[rix++];
-			}
 
 			sz -= ix;
 
@@ -682,7 +677,7 @@
 		 **/
 		if (!g_spi.crc_off) {
 			if (wilc_spi_tx(wilc, crc, 2)) {
-				dev_err(&spi->dev,"Failed data block crc write, bus error...\n");
+				dev_err(&spi->dev, "Failed data block crc write, bus error...\n");
 				result = N_FAIL;
 				break;
 			}
@@ -713,9 +708,8 @@
 	dat = cpu_to_le32(dat);
 	result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
 				  0);
-	if (result != N_OK) {
+	if (result != N_OK)
 		dev_err(&spi->dev, "Failed internal write cmd...\n");
-	}
 
 	return result;
 }
@@ -758,9 +752,8 @@
 	}
 
 	result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless);
-	if (result != N_OK) {
+	if (result != N_OK)
 		dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr);
-	}
 
 	return result;
 }
@@ -788,9 +781,8 @@
 	 *      Data
 	 **/
 	result = spi_data_write(wilc, buf, size);
-	if (result != N_OK) {
+	if (result != N_OK)
 		dev_err(&spi->dev, "Failed block data write...\n");
-	}
 
 	return 1;
 }
@@ -852,7 +844,7 @@
 	return 1;
 }
 
-static int wilc_spi_init(struct wilc *wilc)
+static int wilc_spi_init(struct wilc *wilc, bool resume)
 {
 	struct spi_device *spi = to_spi_device(wilc->dev);
 	u32 reg;
@@ -869,7 +861,7 @@
 		return 1;
 	}
 
-	memset(&g_spi, 0, sizeof(wilc_spi_t));
+	memset(&g_spi, 0, sizeof(struct wilc_spi));
 
 	/**
 	 *      configure protocol
@@ -1076,7 +1068,7 @@
 				ret = wilc_spi_write_reg(wilc,
 							 WILC_VMM_CORE_CTL, 1);
 				if (!ret) {
-					dev_err(&spi->dev,"fail write reg vmm_core_ctl...\n");
+					dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n");
 					goto _fail_;
 				}
 			}
@@ -1126,9 +1118,9 @@
 		return 0;
 	}
 
-	for (i = 0; (i < 5) && (nint > 0); i++, nint--) {
+	for (i = 0; (i < 5) && (nint > 0); i++, nint--)
 		reg |= (BIT((27 + i)));
-	}
+
 	ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg);
 	if (!ret) {
 		dev_err(&spi->dev, "Failed write reg (%08x)...\n",
@@ -1143,9 +1135,8 @@
 			return 0;
 		}
 
-		for (i = 0; (i < 3) && (nint > 0); i++, nint--) {
+		for (i = 0; (i < 3) && (nint > 0); i++, nint--)
 			reg |= BIT(i);
-		}
 
 		ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
 		if (!ret) {
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 53fb2d4..378c350 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -74,6 +74,10 @@
 	}
 };
 
+static const struct wiphy_wowlan_support wowlan_support = {
+	.flags = WIPHY_WOWLAN_ANY
+};
+
 #define WILC_WFI_DWELL_PASSIVE 100
 #define WILC_WFI_DWELL_ACTIVE  40
 
@@ -89,7 +93,7 @@
 extern int wilc_mac_open(struct net_device *ndev);
 extern int wilc_mac_close(struct net_device *ndev);
 
-static tstrNetworkInfo last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
+static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
 static u32 last_scanned_cnt;
 struct timer_list wilc_during_ip_timer;
 static struct timer_list hAgingTimer;
@@ -153,7 +157,7 @@
 static u8 curr_channel;
 static u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09};
 static u8 p2p_local_random = 0x01;
-static u8 p2p_recv_random = 0x00;
+static u8 p2p_recv_random;
 static u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
 static bool wilc_ie;
 
@@ -188,29 +192,29 @@
 
 	if (op_ifcs == 0) {
 		del_timer_sync(&hAgingTimer);
-		PRINT_INFO(CORECONFIG_DBG, "destroy aging timer\n");
 
 		for (i = 0; i < last_scanned_cnt; i++) {
-			if (last_scanned_shadow[last_scanned_cnt].pu8IEs) {
-				kfree(last_scanned_shadow[i].pu8IEs);
-				last_scanned_shadow[last_scanned_cnt].pu8IEs = NULL;
+			if (last_scanned_shadow[last_scanned_cnt].ies) {
+				kfree(last_scanned_shadow[i].ies);
+				last_scanned_shadow[last_scanned_cnt].ies = NULL;
 			}
 
-			wilc_free_join_params(last_scanned_shadow[i].pJoinParams);
-			last_scanned_shadow[i].pJoinParams = NULL;
+			kfree(last_scanned_shadow[i].join_params);
+			last_scanned_shadow[i].join_params = NULL;
 		}
 		last_scanned_cnt = 0;
 	}
 }
 
-static u32 get_rssi_avg(tstrNetworkInfo *network_info)
+static u32 get_rssi_avg(struct network_info *network_info)
 {
 	u8 i;
 	int rssi_v = 0;
-	u8 num_rssi = (network_info->strRssi.u8Full) ? NUM_RSSI : (network_info->strRssi.u8Index);
+	u8 num_rssi = (network_info->str_rssi.u8Full) ?
+		       NUM_RSSI : (network_info->str_rssi.u8Index);
 
 	for (i = 0; i < num_rssi; i++)
-		rssi_v += network_info->strRssi.as8RSSI[i];
+		rssi_v += network_info->str_rssi.as8RSSI[i];
 
 	rssi_v /= num_rssi;
 	return rssi_v;
@@ -224,28 +228,36 @@
 	int i;
 	int rssi = 0;
 
-	priv = (struct wilc_priv *)user_void;
+	priv = user_void;
 	wiphy = priv->dev->ieee80211_ptr->wiphy;
 
 	for (i = 0; i < last_scanned_cnt; i++) {
-		tstrNetworkInfo *network_info;
+		struct network_info *network_info;
 
 		network_info = &last_scanned_shadow[i];
 
-		if (!network_info->u8Found || all) {
+		if (!network_info->found || all) {
 			s32 freq;
 			struct ieee80211_channel *channel;
 
 			if (network_info) {
-				freq = ieee80211_channel_to_frequency((s32)network_info->u8channel, IEEE80211_BAND_2GHZ);
+				freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
 				channel = ieee80211_get_channel(wiphy, freq);
 
 				rssi = get_rssi_avg(network_info);
-				if (memcmp("DIRECT-", network_info->au8ssid, 7) ||
+				if (memcmp("DIRECT-", network_info->ssid, 7) ||
 				    direct_scan) {
-					bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, network_info->au8bssid, network_info->u64Tsf, network_info->u16CapInfo,
-								  network_info->u16BeaconPeriod, (const u8 *)network_info->pu8IEs,
-								  (size_t)network_info->u16IEsLen, (((s32)rssi) * 100), GFP_KERNEL);
+					bss = cfg80211_inform_bss(wiphy,
+								  channel,
+								  CFG80211_BSS_FTYPE_UNKNOWN,
+								  network_info->bssid,
+								  network_info->tsf_hi,
+								  network_info->cap_info,
+								  network_info->beacon_period,
+								  (const u8 *)network_info->ies,
+								  (size_t)network_info->ies_len,
+								  (s32)rssi * 100,
+								  GFP_KERNEL);
 					cfg80211_put_bss(wiphy, bss);
 				}
 			}
@@ -258,7 +270,7 @@
 	int i;
 
 	for (i = 0; i < last_scanned_cnt; i++)
-		last_scanned_shadow[i].u8Found = 0;
+		last_scanned_shadow[i].found = 0;
 }
 
 static void update_scan_time(void)
@@ -266,7 +278,7 @@
 	int i;
 
 	for (i = 0; i < last_scanned_cnt; i++)
-		last_scanned_shadow[i].u32TimeRcvdInScan = jiffies;
+		last_scanned_shadow[i].time_scan = jiffies;
 }
 
 static void remove_network_from_shadow(unsigned long arg)
@@ -276,13 +288,12 @@
 
 
 	for (i = 0; i < last_scanned_cnt; i++) {
-		if (time_after(now, last_scanned_shadow[i].u32TimeRcvdInScan + (unsigned long)(SCAN_RESULT_EXPIRE))) {
-			PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s\n", last_scanned_shadow[i].au8ssid);
+		if (time_after(now, last_scanned_shadow[i].time_scan +
+			       (unsigned long)(SCAN_RESULT_EXPIRE))) {
+			kfree(last_scanned_shadow[i].ies);
+			last_scanned_shadow[i].ies = NULL;
 
-			kfree(last_scanned_shadow[i].pu8IEs);
-			last_scanned_shadow[i].pu8IEs = NULL;
-
-			wilc_free_join_params(last_scanned_shadow[i].pJoinParams);
+			kfree(last_scanned_shadow[i].join_params);
 
 			for (j = i; (j < last_scanned_cnt - 1); j++)
 				last_scanned_shadow[j] = last_scanned_shadow[j + 1];
@@ -291,37 +302,31 @@
 		}
 	}
 
-	PRINT_D(CFG80211_DBG, "Number of cached networks: %d\n",
-		last_scanned_cnt);
 	if (last_scanned_cnt != 0) {
 		hAgingTimer.data = arg;
 		mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
-	} else {
-		PRINT_D(CFG80211_DBG, "No need to restart Aging timer\n");
 	}
 }
 
 static void clear_duringIP(unsigned long arg)
 {
-	PRINT_D(GENERIC_DBG, "GO:IP Obtained , enable scan\n");
 	wilc_optaining_ip = false;
 }
 
-static int is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo,
+static int is_network_in_shadow(struct network_info *pstrNetworkInfo,
 				void *user_void)
 {
 	int state = -1;
 	int i;
 
 	if (last_scanned_cnt == 0) {
-		PRINT_D(CFG80211_DBG, "Starting Aging timer\n");
 		hAgingTimer.data = (unsigned long)user_void;
 		mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
 		state = -1;
 	} else {
 		for (i = 0; i < last_scanned_cnt; i++) {
-			if (memcmp(last_scanned_shadow[i].au8bssid,
-				   pstrNetworkInfo->au8bssid, 6) == 0) {
+			if (memcmp(last_scanned_shadow[i].bssid,
+				   pstrNetworkInfo->bssid, 6) == 0) {
 				state = i;
 				break;
 			}
@@ -330,58 +335,57 @@
 	return state;
 }
 
-static void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo,
+static void add_network_to_shadow(struct network_info *pstrNetworkInfo,
 				  void *user_void, void *pJoinParams)
 {
 	int ap_found = is_network_in_shadow(pstrNetworkInfo, user_void);
 	u32 ap_index = 0;
 	u8 rssi_index = 0;
 
-	if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW) {
-		PRINT_D(CFG80211_DBG, "Shadow network reached its maximum limit\n");
+	if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW)
 		return;
-	}
+
 	if (ap_found == -1) {
 		ap_index = last_scanned_cnt;
 		last_scanned_cnt++;
 	} else {
 		ap_index = ap_found;
 	}
-	rssi_index = last_scanned_shadow[ap_index].strRssi.u8Index;
-	last_scanned_shadow[ap_index].strRssi.as8RSSI[rssi_index++] = pstrNetworkInfo->s8rssi;
+	rssi_index = last_scanned_shadow[ap_index].str_rssi.u8Index;
+	last_scanned_shadow[ap_index].str_rssi.as8RSSI[rssi_index++] = pstrNetworkInfo->rssi;
 	if (rssi_index == NUM_RSSI) {
 		rssi_index = 0;
-		last_scanned_shadow[ap_index].strRssi.u8Full = 1;
+		last_scanned_shadow[ap_index].str_rssi.u8Full = 1;
 	}
-	last_scanned_shadow[ap_index].strRssi.u8Index = rssi_index;
-	last_scanned_shadow[ap_index].s8rssi = pstrNetworkInfo->s8rssi;
-	last_scanned_shadow[ap_index].u16CapInfo = pstrNetworkInfo->u16CapInfo;
-	last_scanned_shadow[ap_index].u8SsidLen = pstrNetworkInfo->u8SsidLen;
-	memcpy(last_scanned_shadow[ap_index].au8ssid,
-	       pstrNetworkInfo->au8ssid, pstrNetworkInfo->u8SsidLen);
-	memcpy(last_scanned_shadow[ap_index].au8bssid,
-	       pstrNetworkInfo->au8bssid, ETH_ALEN);
-	last_scanned_shadow[ap_index].u16BeaconPeriod = pstrNetworkInfo->u16BeaconPeriod;
-	last_scanned_shadow[ap_index].u8DtimPeriod = pstrNetworkInfo->u8DtimPeriod;
-	last_scanned_shadow[ap_index].u8channel = pstrNetworkInfo->u8channel;
-	last_scanned_shadow[ap_index].u16IEsLen = pstrNetworkInfo->u16IEsLen;
-	last_scanned_shadow[ap_index].u64Tsf = pstrNetworkInfo->u64Tsf;
+	last_scanned_shadow[ap_index].str_rssi.u8Index = rssi_index;
+	last_scanned_shadow[ap_index].rssi = pstrNetworkInfo->rssi;
+	last_scanned_shadow[ap_index].cap_info = pstrNetworkInfo->cap_info;
+	last_scanned_shadow[ap_index].ssid_len = pstrNetworkInfo->ssid_len;
+	memcpy(last_scanned_shadow[ap_index].ssid,
+	       pstrNetworkInfo->ssid, pstrNetworkInfo->ssid_len);
+	memcpy(last_scanned_shadow[ap_index].bssid,
+	       pstrNetworkInfo->bssid, ETH_ALEN);
+	last_scanned_shadow[ap_index].beacon_period = pstrNetworkInfo->beacon_period;
+	last_scanned_shadow[ap_index].dtim_period = pstrNetworkInfo->dtim_period;
+	last_scanned_shadow[ap_index].ch = pstrNetworkInfo->ch;
+	last_scanned_shadow[ap_index].ies_len = pstrNetworkInfo->ies_len;
+	last_scanned_shadow[ap_index].tsf_hi = pstrNetworkInfo->tsf_hi;
 	if (ap_found != -1)
-		kfree(last_scanned_shadow[ap_index].pu8IEs);
-	last_scanned_shadow[ap_index].pu8IEs =
-		kmalloc(pstrNetworkInfo->u16IEsLen, GFP_KERNEL);
-	memcpy(last_scanned_shadow[ap_index].pu8IEs,
-	       pstrNetworkInfo->pu8IEs, pstrNetworkInfo->u16IEsLen);
-	last_scanned_shadow[ap_index].u32TimeRcvdInScan = jiffies;
-	last_scanned_shadow[ap_index].u32TimeRcvdInScanCached = jiffies;
-	last_scanned_shadow[ap_index].u8Found = 1;
+		kfree(last_scanned_shadow[ap_index].ies);
+	last_scanned_shadow[ap_index].ies = kmalloc(pstrNetworkInfo->ies_len,
+						    GFP_KERNEL);
+	memcpy(last_scanned_shadow[ap_index].ies,
+	       pstrNetworkInfo->ies, pstrNetworkInfo->ies_len);
+	last_scanned_shadow[ap_index].time_scan = jiffies;
+	last_scanned_shadow[ap_index].time_scan_cached = jiffies;
+	last_scanned_shadow[ap_index].found = 1;
 	if (ap_found != -1)
-		wilc_free_join_params(last_scanned_shadow[ap_index].pJoinParams);
-	last_scanned_shadow[ap_index].pJoinParams = pJoinParams;
+		kfree(last_scanned_shadow[ap_index].join_params);
+	last_scanned_shadow[ap_index].join_params = pJoinParams;
 }
 
 static void CfgScanResult(enum scan_event scan_event,
-			  tstrNetworkInfo *network_info,
+			  struct network_info *network_info,
 			  void *user_void,
 			  void *join_params)
 {
@@ -391,7 +395,7 @@
 	struct ieee80211_channel *channel;
 	struct cfg80211_bss *bss = NULL;
 
-	priv = (struct wilc_priv *)user_void;
+	priv = user_void;
 	if (priv->bCfgScanning) {
 		if (scan_event == SCAN_EVENT_NETWORK_FOUND) {
 			wiphy = priv->dev->ieee80211_ptr->wiphy;
@@ -400,67 +404,53 @@
 				return;
 
 			if (wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
-			    (((s32)network_info->s8rssi * 100) < 0 ||
-			    ((s32)network_info->s8rssi * 100) > 100)) {
-				PRINT_ER("wiphy signal type fial\n");
+			    (((s32)network_info->rssi * 100) < 0 ||
+			    ((s32)network_info->rssi * 100) > 100))
 				return;
-			}
 
 			if (network_info) {
-				s32Freq = ieee80211_channel_to_frequency((s32)network_info->u8channel, IEEE80211_BAND_2GHZ);
+				s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
 				channel = ieee80211_get_channel(wiphy, s32Freq);
 
 				if (!channel)
 					return;
 
-				PRINT_INFO(CFG80211_DBG, "Network Info:: CHANNEL Frequency: %d, RSSI: %d, CapabilityInfo: %d,"
-					   "BeaconPeriod: %d\n", channel->center_freq, (((s32)network_info->s8rssi) * 100),
-					   network_info->u16CapInfo, network_info->u16BeaconPeriod);
-
-				if (network_info->bNewNetwork) {
+				if (network_info->new_network) {
 					if (priv->u32RcvdChCount < MAX_NUM_SCANNED_NETWORKS) {
-						PRINT_D(CFG80211_DBG, "Network %s found\n", network_info->au8ssid);
 						priv->u32RcvdChCount++;
 
-						if (!join_params)
-							PRINT_INFO(CORECONFIG_DBG, ">> Something really bad happened\n");
 						add_network_to_shadow(network_info, priv, join_params);
 
-						if (!(memcmp("DIRECT-", network_info->au8ssid, 7))) {
-							bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN,  network_info->au8bssid, network_info->u64Tsf, network_info->u16CapInfo,
-										  network_info->u16BeaconPeriod, (const u8 *)network_info->pu8IEs,
-										  (size_t)network_info->u16IEsLen, (((s32)network_info->s8rssi) * 100), GFP_KERNEL);
+						if (!(memcmp("DIRECT-", network_info->ssid, 7))) {
+							bss = cfg80211_inform_bss(wiphy,
+										  channel,
+										  CFG80211_BSS_FTYPE_UNKNOWN,
+										  network_info->bssid,
+										  network_info->tsf_hi,
+										  network_info->cap_info,
+										  network_info->beacon_period,
+										  (const u8 *)network_info->ies,
+										  (size_t)network_info->ies_len,
+										  (s32)network_info->rssi * 100,
+										  GFP_KERNEL);
 							cfg80211_put_bss(wiphy, bss);
 						}
-
-
-					} else {
-						PRINT_ER("Discovered networks exceeded the max limit\n");
 					}
 				} else {
 					u32 i;
 
 					for (i = 0; i < priv->u32RcvdChCount; i++) {
-						if (memcmp(last_scanned_shadow[i].au8bssid, network_info->au8bssid, 6) == 0) {
-							PRINT_D(CFG80211_DBG, "Update RSSI of %s\n", last_scanned_shadow[i].au8ssid);
-
-							last_scanned_shadow[i].s8rssi = network_info->s8rssi;
-							last_scanned_shadow[i].u32TimeRcvdInScan = jiffies;
+						if (memcmp(last_scanned_shadow[i].bssid, network_info->bssid, 6) == 0) {
+							last_scanned_shadow[i].rssi = network_info->rssi;
+							last_scanned_shadow[i].time_scan = jiffies;
 							break;
 						}
 					}
 				}
 			}
 		} else if (scan_event == SCAN_EVENT_DONE) {
-			PRINT_D(CFG80211_DBG, "Scan Done[%p]\n", priv->dev);
-			PRINT_D(CFG80211_DBG, "Refreshing Scan ...\n");
 			refresh_scan(priv, 1, false);
 
-			if (priv->u32RcvdChCount > 0)
-				PRINT_D(CFG80211_DBG, "%d Network(s) found\n", priv->u32RcvdChCount);
-			else
-				PRINT_D(CFG80211_DBG, "No networks found\n");
-
 			down(&(priv->hSemScanReq));
 
 			if (priv->pstrScanReq) {
@@ -473,7 +463,6 @@
 		} else if (scan_event == SCAN_EVENT_ABORTED) {
 			down(&(priv->hSemScanReq));
 
-			PRINT_D(CFG80211_DBG, "Scan Aborted\n");
 			if (priv->pstrScanReq) {
 				update_scan_time();
 				refresh_scan(priv, 1, false);
@@ -490,9 +479,9 @@
 int wilc_connecting;
 
 static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
-			     tstrConnectInfo *pstrConnectInfo,
+			     struct connect_info *pstrConnectInfo,
 			     u8 u8MacStatus,
-			     tstrDisconnectNotifInfo *pstrDisconnectNotifInfo,
+			     struct disconnect_info *pstrDisconnectNotifInfo,
 			     void *pUserVoid)
 {
 	struct wilc_priv *priv;
@@ -504,49 +493,47 @@
 
 	wilc_connecting = 0;
 
-	priv = (struct wilc_priv *)pUserVoid;
+	priv = pUserVoid;
 	dev = priv->dev;
 	vif = netdev_priv(dev);
 	wl = vif->wilc;
-	pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+	pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
 
 	if (enuConnDisconnEvent == CONN_DISCONN_EVENT_CONN_RESP) {
 		u16 u16ConnectStatus;
 
-		u16ConnectStatus = pstrConnectInfo->u16ConnectStatus;
-
-		PRINT_D(CFG80211_DBG, " Connection response received = %d\n", u8MacStatus);
+		u16ConnectStatus = pstrConnectInfo->status;
 
 		if ((u8MacStatus == MAC_DISCONNECTED) &&
-		    (pstrConnectInfo->u16ConnectStatus == SUCCESSFUL_STATUSCODE)) {
+		    (pstrConnectInfo->status == SUCCESSFUL_STATUSCODE)) {
 			u16ConnectStatus = WLAN_STATUS_UNSPECIFIED_FAILURE;
-			wilc_wlan_set_bssid(priv->dev, NullBssid);
+			wilc_wlan_set_bssid(priv->dev, NullBssid,
+					    STATION_MODE);
 			eth_zero_addr(wilc_connected_ssid);
 
 			if (!pstrWFIDrv->p2p_connect)
 				wlan_channel = INVALID_CHANNEL;
 
-			PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d\n", u16ConnectStatus, u8MacStatus);
+			netdev_err(dev, "Unspecified failure\n");
 		}
 
 		if (u16ConnectStatus == WLAN_STATUS_SUCCESS) {
 			bool bNeedScanRefresh = false;
 			u32 i;
 
-			PRINT_INFO(CFG80211_DBG, "Connection Successful:: BSSID: %x%x%x%x%x%x\n", pstrConnectInfo->au8bssid[0],
-				   pstrConnectInfo->au8bssid[1], pstrConnectInfo->au8bssid[2], pstrConnectInfo->au8bssid[3], pstrConnectInfo->au8bssid[4], pstrConnectInfo->au8bssid[5]);
-			memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN);
+			memcpy(priv->au8AssociatedBss, pstrConnectInfo->bssid, ETH_ALEN);
 
 
 			for (i = 0; i < last_scanned_cnt; i++) {
-				if (memcmp(last_scanned_shadow[i].au8bssid,
-					   pstrConnectInfo->au8bssid, ETH_ALEN) == 0) {
+				if (memcmp(last_scanned_shadow[i].bssid,
+					   pstrConnectInfo->bssid,
+					   ETH_ALEN) == 0) {
 					unsigned long now = jiffies;
 
 					if (time_after(now,
-						       last_scanned_shadow[i].u32TimeRcvdInScanCached + (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ)))) {
+						       last_scanned_shadow[i].time_scan_cached +
+						       (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ))))
 						bNeedScanRefresh = true;
-					}
 
 					break;
 				}
@@ -556,34 +543,27 @@
 				refresh_scan(priv, 1, true);
 		}
 
-
-		PRINT_D(CFG80211_DBG, "Association request info elements length = %zu\n", pstrConnectInfo->ReqIEsLen);
-
-		PRINT_D(CFG80211_DBG, "Association response info elements length = %d\n", pstrConnectInfo->u16RespIEsLen);
-
-		cfg80211_connect_result(dev, pstrConnectInfo->au8bssid,
-					pstrConnectInfo->pu8ReqIEs, pstrConnectInfo->ReqIEsLen,
-					pstrConnectInfo->pu8RespIEs, pstrConnectInfo->u16RespIEsLen,
+		cfg80211_connect_result(dev, pstrConnectInfo->bssid,
+					pstrConnectInfo->req_ies, pstrConnectInfo->req_ies_len,
+					pstrConnectInfo->resp_ies, pstrConnectInfo->resp_ies_len,
 					u16ConnectStatus, GFP_KERNEL);
 	} else if (enuConnDisconnEvent == CONN_DISCONN_EVENT_DISCONN_NOTIF)    {
 		wilc_optaining_ip = false;
-		PRINT_ER("Received MAC_DISCONNECTED from firmware with reason %d on dev [%p]\n",
-			 pstrDisconnectNotifInfo->u16reason, priv->dev);
 		p2p_local_random = 0x01;
 		p2p_recv_random = 0x00;
 		wilc_ie = false;
 		eth_zero_addr(priv->au8AssociatedBss);
-		wilc_wlan_set_bssid(priv->dev, NullBssid);
+		wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE);
 		eth_zero_addr(wilc_connected_ssid);
 
 		if (!pstrWFIDrv->p2p_connect)
 			wlan_channel = INVALID_CHANNEL;
 		if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
-			pstrDisconnectNotifInfo->u16reason = 3;
+			pstrDisconnectNotifInfo->reason = 3;
 		} else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
-			pstrDisconnectNotifInfo->u16reason = 1;
+			pstrDisconnectNotifInfo->reason = 1;
 		}
-		cfg80211_disconnected(dev, pstrDisconnectNotifInfo->u16reason, pstrDisconnectNotifInfo->ie,
+		cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
 				      pstrDisconnectNotifInfo->ie_len, false,
 				      GFP_KERNEL);
 	}
@@ -601,13 +581,12 @@
 	vif = netdev_priv(priv->dev);
 
 	channelnum = ieee80211_frequency_to_channel(chandef->chan->center_freq);
-	PRINT_D(CFG80211_DBG, "Setting channel %d with frequency %d\n", channelnum, chandef->chan->center_freq);
 
 	curr_channel = channelnum;
 	result = wilc_set_mac_chnl_num(vif, channelnum);
 
 	if (result != 0)
-		PRINT_ER("Error in setting channel %d\n", channelnum);
+		netdev_err(priv->dev, "Error in setting channel\n");
 
 	return result;
 }
@@ -628,38 +607,33 @@
 
 	priv->u32RcvdChCount = 0;
 
-	wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif));
 	reset_shadow_found();
 
 	priv->bCfgScanning = true;
 	if (request->n_channels <= MAX_NUM_SCANNED_NETWORKS) {
-		for (i = 0; i < request->n_channels; i++) {
+		for (i = 0; i < request->n_channels; i++)
 			au8ScanChanList[i] = (u8)ieee80211_frequency_to_channel(request->channels[i]->center_freq);
-			PRINT_INFO(CFG80211_DBG, "ScanChannel List[%d] = %d,", i, au8ScanChanList[i]);
-		}
-
-		PRINT_D(CFG80211_DBG, "Requested num of scan channel %d\n", request->n_channels);
-		PRINT_D(CFG80211_DBG, "Scan Request IE len =  %zu\n", request->ie_len);
-
-		PRINT_D(CFG80211_DBG, "Number of SSIDs %d\n", request->n_ssids);
 
 		if (request->n_ssids >= 1) {
-			strHiddenNetwork.pstrHiddenNetworkInfo = kmalloc(request->n_ssids * sizeof(struct hidden_network), GFP_KERNEL);
-			strHiddenNetwork.u8ssidnum = request->n_ssids;
+			strHiddenNetwork.net_info =
+				kmalloc_array(request->n_ssids,
+					      sizeof(struct hidden_network),
+					      GFP_KERNEL);
+			if (!strHiddenNetwork.net_info)
+				return -ENOMEM;
+			strHiddenNetwork.n_ssids = request->n_ssids;
 
 
 			for (i = 0; i < request->n_ssids; i++) {
 				if (request->ssids[i].ssid &&
 				    request->ssids[i].ssid_len != 0) {
-					strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
-					memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
-					strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen = request->ssids[i].ssid_len;
+					strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
+					memcpy(strHiddenNetwork.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
+					strHiddenNetwork.net_info[i].ssid_len = request->ssids[i].ssid_len;
 				} else {
-					PRINT_D(CFG80211_DBG, "Received one NULL SSID\n");
-					strHiddenNetwork.u8ssidnum -= 1;
+					strHiddenNetwork.n_ssids -= 1;
 				}
 			}
-			PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
 			s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
 					     au8ScanChanList,
 					     request->n_channels,
@@ -667,7 +641,6 @@
 					     request->ie_len, CfgScanResult,
 					     (void *)priv, &strHiddenNetwork);
 		} else {
-			PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
 			s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
 					     au8ScanChanList,
 					     request->n_channels,
@@ -676,14 +649,11 @@
 					     (void *)priv, NULL);
 		}
 	} else {
-		PRINT_ER("Requested num of scanned channels is greater than the max, supported"
-			 " channels\n");
+		netdev_err(priv->dev, "Requested scanned channels over\n");
 	}
 
-	if (s32Error != 0) {
+	if (s32Error != 0)
 		s32Error = -EBUSY;
-		PRINT_WRN(CFG80211_DBG, "Device is busy: Error(%d)\n", s32Error);
-	}
 
 	return s32Error;
 }
@@ -701,92 +671,52 @@
 
 	struct wilc_priv *priv;
 	struct host_if_drv *pstrWFIDrv;
-	tstrNetworkInfo *pstrNetworkInfo = NULL;
+	struct network_info *pstrNetworkInfo = NULL;
 	struct wilc_vif *vif;
 
 	wilc_connecting = 1;
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
-	pstrWFIDrv = (struct host_if_drv *)(priv->hWILCWFIDrv);
+	pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
 
-	wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif));
-
-	PRINT_D(CFG80211_DBG, "Connecting to SSID [%s] on netdev [%p] host if [%p]\n", sme->ssid, dev, priv->hWILCWFIDrv);
-	if (!(strncmp(sme->ssid, "DIRECT-", 7))) {
-		PRINT_D(CFG80211_DBG, "Connected to Direct network,OBSS disabled\n");
+	if (!(strncmp(sme->ssid, "DIRECT-", 7)))
 		pstrWFIDrv->p2p_connect = 1;
-	} else {
+	else
 		pstrWFIDrv->p2p_connect = 0;
-	}
-	PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d\n", sme->ssid, sme->auth_type);
 
 	for (i = 0; i < last_scanned_cnt; i++) {
-		if ((sme->ssid_len == last_scanned_shadow[i].u8SsidLen) &&
-		    memcmp(last_scanned_shadow[i].au8ssid,
+		if ((sme->ssid_len == last_scanned_shadow[i].ssid_len) &&
+		    memcmp(last_scanned_shadow[i].ssid,
 			   sme->ssid,
 			   sme->ssid_len) == 0) {
-			PRINT_INFO(CFG80211_DBG, "Network with required SSID is found %s\n", sme->ssid);
-			if (!sme->bssid) {
-				PRINT_INFO(CFG80211_DBG, "BSSID is not passed from the user\n");
+			if (!sme->bssid)
 				break;
-			} else {
-				if (memcmp(last_scanned_shadow[i].au8bssid,
+			else
+				if (memcmp(last_scanned_shadow[i].bssid,
 					   sme->bssid,
-					   ETH_ALEN) == 0) {
-					PRINT_INFO(CFG80211_DBG, "BSSID is passed from the user and matched\n");
+					   ETH_ALEN) == 0)
 					break;
-				}
-			}
 		}
 	}
 
 	if (i < last_scanned_cnt) {
-		PRINT_D(CFG80211_DBG, "Required bss is in scan results\n");
-
 		pstrNetworkInfo = &last_scanned_shadow[i];
-
-		PRINT_INFO(CFG80211_DBG, "network BSSID to be associated: %x%x%x%x%x%x\n",
-			   pstrNetworkInfo->au8bssid[0], pstrNetworkInfo->au8bssid[1],
-			   pstrNetworkInfo->au8bssid[2], pstrNetworkInfo->au8bssid[3],
-			   pstrNetworkInfo->au8bssid[4], pstrNetworkInfo->au8bssid[5]);
 	} else {
 		s32Error = -ENOENT;
-		if (last_scanned_cnt == 0)
-			PRINT_D(CFG80211_DBG, "No Scan results yet\n");
-		else
-			PRINT_D(CFG80211_DBG, "Required bss not in scan results: Error(%d)\n", s32Error);
-
-		goto done;
+		wilc_connecting = 0;
+		return s32Error;
 	}
 
-	priv->WILC_WFI_wep_default = 0;
 	memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
 	memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
 
-	PRINT_INFO(CFG80211_DBG, "sme->crypto.wpa_versions=%x\n", sme->crypto.wpa_versions);
-	PRINT_INFO(CFG80211_DBG, "sme->crypto.cipher_group=%x\n", sme->crypto.cipher_group);
-
-	PRINT_INFO(CFG80211_DBG, "sme->crypto.n_ciphers_pairwise=%d\n", sme->crypto.n_ciphers_pairwise);
-
-	if (INFO) {
-		for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++)
-			PRINT_D(CORECONFIG_DBG, "sme->crypto.ciphers_pairwise[%d]=%x\n", i, sme->crypto.ciphers_pairwise[i]);
-	}
-
 	if (sme->crypto.cipher_group != NO_ENCRYPT) {
 		pcwpa_version = "Default";
-		PRINT_D(CORECONFIG_DBG, ">> sme->crypto.wpa_versions: %x\n", sme->crypto.wpa_versions);
 		if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) {
 			u8security = ENCRYPT_ENABLED | WEP;
 			pcgroup_encrypt_val = "WEP40";
 			pccipher_group = "WLAN_CIPHER_SUITE_WEP40";
-			PRINT_INFO(CFG80211_DBG, "WEP Default Key Idx = %d\n", sme->key_idx);
 
-			if (INFO) {
-				for (i = 0; i < sme->key_len; i++)
-					PRINT_D(CORECONFIG_DBG, "WEP Key Value[%d] = %d\n", i, sme->key[i]);
-			}
-			priv->WILC_WFI_wep_default = sme->key_idx;
 			priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
 			memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
 
@@ -804,7 +734,6 @@
 			pcgroup_encrypt_val = "WEP104";
 			pccipher_group = "WLAN_CIPHER_SUITE_WEP104";
 
-			priv->WILC_WFI_wep_default = sme->key_idx;
 			priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
 			memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
 
@@ -842,9 +771,9 @@
 
 		} else {
 			s32Error = -ENOTSUPP;
-			PRINT_ER("Not supported cipher: Error(%d)\n", s32Error);
-
-			goto done;
+			netdev_err(dev, "Not supported cipher\n");
+			wilc_connecting = 0;
+			return s32Error;
 		}
 	}
 
@@ -859,22 +788,17 @@
 		}
 	}
 
-	PRINT_D(CFG80211_DBG, "Adding key with cipher group = %x\n", sme->crypto.cipher_group);
-
-	PRINT_D(CFG80211_DBG, "Authentication Type = %d\n", sme->auth_type);
 	switch (sme->auth_type)	{
 	case NL80211_AUTHTYPE_OPEN_SYSTEM:
-		PRINT_D(CFG80211_DBG, "In OPEN SYSTEM\n");
 		tenuAuth_type = OPEN_SYSTEM;
 		break;
 
 	case NL80211_AUTHTYPE_SHARED_KEY:
 		tenuAuth_type = SHARED_KEY;
-		PRINT_D(CFG80211_DBG, "In SHARED KEY\n");
 		break;
 
 	default:
-		PRINT_D(CFG80211_DBG, "Automatic Authentation type = %d\n", sme->auth_type);
+		break;
 	}
 
 	if (sme->crypto.n_akm_suites) {
@@ -888,33 +812,26 @@
 		}
 	}
 
-
-	PRINT_INFO(CFG80211_DBG, "Required Channel = %d\n", pstrNetworkInfo->u8channel);
-
-	PRINT_INFO(CFG80211_DBG, "Group encryption value = %s\n Cipher Group = %s\n WPA version = %s\n",
-		   pcgroup_encrypt_val, pccipher_group, pcwpa_version);
-
-	curr_channel = pstrNetworkInfo->u8channel;
+	curr_channel = pstrNetworkInfo->ch;
 
 	if (!pstrWFIDrv->p2p_connect)
-		wlan_channel = pstrNetworkInfo->u8channel;
+		wlan_channel = pstrNetworkInfo->ch;
 
-	wilc_wlan_set_bssid(dev, pstrNetworkInfo->au8bssid);
+	wilc_wlan_set_bssid(dev, pstrNetworkInfo->bssid, STATION_MODE);
 
-	s32Error = wilc_set_join_req(vif, pstrNetworkInfo->au8bssid, sme->ssid,
+	s32Error = wilc_set_join_req(vif, pstrNetworkInfo->bssid, sme->ssid,
 				     sme->ssid_len, sme->ie, sme->ie_len,
 				     CfgConnectResult, (void *)priv,
 				     u8security, tenuAuth_type,
-				     pstrNetworkInfo->u8channel,
-				     pstrNetworkInfo->pJoinParams);
+				     pstrNetworkInfo->ch,
+				     pstrNetworkInfo->join_params);
 	if (s32Error != 0) {
-		PRINT_ER("wilc_set_join_req(): Error(%d)\n", s32Error);
+		netdev_err(dev, "wilc_set_join_req(): Error\n");
 		s32Error = -ENOENT;
-		goto done;
+		wilc_connecting = 0;
+		return s32Error;
 	}
 
-done:
-
 	return s32Error;
 }
 
@@ -930,12 +847,10 @@
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+	pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
 	if (!pstrWFIDrv->p2p_connect)
 		wlan_channel = INVALID_CHANNEL;
-	wilc_wlan_set_bssid(priv->dev, NullBssid);
-
-	PRINT_D(CFG80211_DBG, "Disconnecting with reason code(%d)\n", reason_code);
+	wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE);
 
 	p2p_local_random = 0x01;
 	p2p_recv_random = 0x00;
@@ -944,7 +859,7 @@
 
 	s32Error = wilc_disconnect(vif, reason_code);
 	if (s32Error != 0) {
-		PRINT_ER("Error in disconnecting: Error(%d)\n", s32Error);
+		netdev_err(priv->dev, "Error in disconnecting\n");
 		s32Error = -EINVAL;
 	}
 
@@ -957,7 +872,6 @@
 
 {
 	s32 s32Error = 0, KeyLen = params->key_len;
-	u32 i;
 	struct wilc_priv *priv;
 	const u8 *pu8RxMic = NULL;
 	const u8 *pu8TxMic = NULL;
@@ -972,29 +886,13 @@
 	vif = netdev_priv(netdev);
 	wl = vif->wilc;
 
-	PRINT_D(CFG80211_DBG, "Adding key with cipher suite = %x\n", params->cipher);
-
-	PRINT_D(CFG80211_DBG, "%p %p %d\n", wiphy, netdev, key_index);
-
-	PRINT_D(CFG80211_DBG, "key %x %x %x\n", params->key[0],
-		params->key[1],
-		params->key[2]);
-
-
 	switch (params->cipher)	{
 	case WLAN_CIPHER_SUITE_WEP40:
 	case WLAN_CIPHER_SUITE_WEP104:
 		if (priv->wdev->iftype == NL80211_IFTYPE_AP) {
-			priv->WILC_WFI_wep_default = key_index;
 			priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
 			memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
 
-			PRINT_D(CFG80211_DBG, "Adding AP WEP Default key Idx = %d\n", key_index);
-			PRINT_D(CFG80211_DBG, "Adding AP WEP Key len= %d\n", params->key_len);
-
-			for (i = 0; i < params->key_len; i++)
-				PRINT_D(CFG80211_DBG, "WEP AP key val[%d] = %x\n", i, params->key[i]);
-
 			tenuAuth_type = OPEN_SYSTEM;
 
 			if (params->cipher == WLAN_CIPHER_SUITE_WEP40)
@@ -1008,16 +906,9 @@
 			break;
 		}
 		if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
-			priv->WILC_WFI_wep_default = key_index;
 			priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
 			memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
 
-			PRINT_D(CFG80211_DBG, "Adding WEP Default key Idx = %d\n", key_index);
-			PRINT_D(CFG80211_DBG, "Adding WEP Key length = %d\n", params->key_len);
-			if (INFO) {
-				for (i = 0; i < params->key_len; i++)
-					PRINT_INFO(CFG80211_DBG, "WEP key value[%d] = %d\n", i, params->key[i]);
-			}
 			wilc_add_wep_key_bss_sta(vif, params->key,
 						 params->key_len, key_index);
 		}
@@ -1068,22 +959,12 @@
 				priv->wilc_gtk[key_index]->key_len = params->key_len;
 				priv->wilc_gtk[key_index]->seq_len = params->seq_len;
 
-				if (INFO) {
-					for (i = 0; i < params->key_len; i++)
-						PRINT_INFO(CFG80211_DBG, "Adding group key value[%d] = %x\n", i, params->key[i]);
-					for (i = 0; i < params->seq_len; i++)
-						PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]);
-				}
-
-
 				wilc_add_rx_gtk(vif, params->key, KeyLen,
 						key_index, params->seq_len,
 						params->seq, pu8RxMic,
 						pu8TxMic, AP_MODE, u8gmode);
 
 			} else {
-				PRINT_INFO(CFG80211_DBG, "STA Address: %x%x%x%x%x\n", mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4]);
-
 				if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
 					u8pmode = ENCRYPT_ENABLED | WPA | TKIP;
 				else
@@ -1105,14 +986,6 @@
 				if ((params->seq_len) > 0)
 					priv->wilc_ptk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL);
 
-				if (INFO) {
-					for (i = 0; i < params->key_len; i++)
-						PRINT_INFO(CFG80211_DBG, "Adding pairwise key value[%d] = %x\n", i, params->key[i]);
-
-					for (i = 0; i < params->seq_len; i++)
-						PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]);
-				}
-
 				memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
 
 				if ((params->seq_len) > 0)
@@ -1156,10 +1029,6 @@
 						memcpy(g_key_gtk_params.seq, params->seq, params->seq_len);
 					}
 					g_key_gtk_params.cipher = params->cipher;
-
-					PRINT_D(CFG80211_DBG, "key %x %x %x\n", g_key_gtk_params.key[0],
-						g_key_gtk_params.key[1],
-						g_key_gtk_params.key[2]);
 					g_gtk_keys_saved = true;
 				}
 
@@ -1193,27 +1062,18 @@
 						memcpy(g_key_ptk_params.seq, params->seq, params->seq_len);
 					}
 					g_key_ptk_params.cipher = params->cipher;
-
-					PRINT_D(CFG80211_DBG, "key %x %x %x\n", g_key_ptk_params.key[0],
-						g_key_ptk_params.key[1],
-						g_key_ptk_params.key[2]);
 					g_ptk_keys_saved = true;
 				}
 
 				wilc_add_ptk(vif, params->key, KeyLen,
 					     mac_addr, pu8RxMic, pu8TxMic,
 					     STATION_MODE, u8mode, key_index);
-				PRINT_D(CFG80211_DBG, "Adding pairwise key\n");
-				if (INFO) {
-					for (i = 0; i < params->key_len; i++)
-						PRINT_INFO(CFG80211_DBG, "Adding pairwise key value[%d] = %d\n", i, params->key[i]);
-				}
 			}
 		}
 		break;
 
 	default:
-		PRINT_ER("Not supported cipher: Error(%d)\n", s32Error);
+		netdev_err(netdev, "Not supported cipher\n");
 		s32Error = -ENOTSUPP;
 	}
 
@@ -1270,18 +1130,14 @@
 		kfree(g_key_gtk_params.seq);
 		g_key_gtk_params.seq = NULL;
 
-		wilc_set_machw_change_vir_if(netdev, false);
 	}
 
 	if (key_index >= 0 && key_index <= 3) {
 		memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
 		priv->WILC_WFI_wep_key_len[key_index] = 0;
-
-		PRINT_D(CFG80211_DBG, "Removing WEP key with index = %d\n", key_index);
 		wilc_remove_wep_key(vif, key_index);
 	} else {
-		PRINT_D(CFG80211_DBG, "Removing all installed keys\n");
-		wilc_remove_key(priv->hWILCWFIDrv, mac_addr);
+		wilc_remove_key(priv->hif_drv, mac_addr);
 	}
 
 	return 0;
@@ -1293,26 +1149,17 @@
 {
 	struct wilc_priv *priv;
 	struct  key_params key_params;
-	u32 i;
 
 	priv = wiphy_priv(wiphy);
 
 
 	if (!pairwise) {
-		PRINT_D(CFG80211_DBG, "Getting group key idx: %x\n", key_index);
-
 		key_params.key = priv->wilc_gtk[key_index]->key;
 		key_params.cipher = priv->wilc_gtk[key_index]->cipher;
 		key_params.key_len = priv->wilc_gtk[key_index]->key_len;
 		key_params.seq = priv->wilc_gtk[key_index]->seq;
 		key_params.seq_len = priv->wilc_gtk[key_index]->seq_len;
-		if (INFO) {
-			for (i = 0; i < key_params.key_len; i++)
-				PRINT_INFO(CFG80211_DBG, "Retrieved key value %x\n", key_params.key[i]);
-		}
 	} else {
-		PRINT_D(CFG80211_DBG, "Getting pairwise  key\n");
-
 		key_params.key = priv->wilc_ptk[key_index]->key;
 		key_params.cipher = priv->wilc_ptk[key_index]->cipher;
 		key_params.key_len = priv->wilc_ptk[key_index]->key_len;
@@ -1334,11 +1181,7 @@
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	PRINT_D(CFG80211_DBG, "Setting default key with idx = %d\n", key_index);
-
-	if (key_index != priv->WILC_WFI_wep_default) {
-		wilc_set_wep_default_keyid(vif, key_index);
-	}
+	wilc_set_wep_default_keyid(vif, key_index);
 
 	return 0;
 }
@@ -1355,10 +1198,6 @@
 	vif = netdev_priv(dev);
 
 	if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
-		PRINT_D(HOSTAPD_DBG, "Getting station parameters\n");
-
-		PRINT_INFO(HOSTAPD_DBG, ": %x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4]);
-
 		for (i = 0; i < NUM_STA_ASSOCIATED; i++) {
 			if (!(memcmp(mac, priv->assoc_stainfo.au8Sta_AssociatedBss[i], ETH_ALEN))) {
 				associatedsta = i;
@@ -1367,7 +1206,7 @@
 		}
 
 		if (associatedsta == -1) {
-			PRINT_ER("Station required is not associated\n");
+			netdev_err(dev, "sta required is not associated\n");
 			return -ENOENT;
 		}
 
@@ -1375,7 +1214,6 @@
 
 		wilc_get_inactive_time(vif, mac, &inactive_time);
 		sinfo->inactive_time = 1000 * inactive_time;
-		PRINT_D(CFG80211_DBG, "Inactive time %d\n", sinfo->inactive_time);
 	}
 
 	if (vif->iftype == STATION_MODE) {
@@ -1400,9 +1238,6 @@
 			wilc_enable_tcp_ack_filter(true);
 		else if (strStatistics.link_speed != DEFAULT_LINK_SPEED)
 			wilc_enable_tcp_ack_filter(false);
-
-		PRINT_D(CORECONFIG_DBG, "*** stats[%d][%d][%d][%d][%d]\n", sinfo->signal, sinfo->rx_packets, sinfo->tx_packets,
-			sinfo->tx_failed, sinfo->txrate.legacy);
 	}
 	return 0;
 }
@@ -1410,14 +1245,13 @@
 static int change_bss(struct wiphy *wiphy, struct net_device *dev,
 		      struct bss_parameters *params)
 {
-	PRINT_D(CFG80211_DBG, "Changing Bss parametrs\n");
 	return 0;
 }
 
 static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
 {
 	s32 s32Error = 0;
-	struct cfg_param_val pstrCfgParamVal;
+	struct cfg_param_attr pstrCfgParamVal;
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
 
@@ -1425,37 +1259,28 @@
 	vif = netdev_priv(priv->dev);
 
 	pstrCfgParamVal.flag = 0;
-	PRINT_D(CFG80211_DBG, "Setting Wiphy params\n");
 
 	if (changed & WIPHY_PARAM_RETRY_SHORT) {
-		PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_SHORT %d\n",
-			priv->dev->ieee80211_ptr->wiphy->retry_short);
 		pstrCfgParamVal.flag  |= RETRY_SHORT;
 		pstrCfgParamVal.short_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_short;
 	}
 	if (changed & WIPHY_PARAM_RETRY_LONG) {
-		PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_LONG %d\n", priv->dev->ieee80211_ptr->wiphy->retry_long);
 		pstrCfgParamVal.flag |= RETRY_LONG;
 		pstrCfgParamVal.long_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_long;
 	}
 	if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
-		PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_FRAG_THRESHOLD %d\n", priv->dev->ieee80211_ptr->wiphy->frag_threshold);
 		pstrCfgParamVal.flag |= FRAG_THRESHOLD;
 		pstrCfgParamVal.frag_threshold = priv->dev->ieee80211_ptr->wiphy->frag_threshold;
 	}
 
 	if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
-		PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RTS_THRESHOLD %d\n", priv->dev->ieee80211_ptr->wiphy->rts_threshold);
-
 		pstrCfgParamVal.flag |= RTS_THRESHOLD;
 		pstrCfgParamVal.rts_threshold = priv->dev->ieee80211_ptr->wiphy->rts_threshold;
 	}
 
-	PRINT_D(CFG80211_DBG, "Setting CFG params in the host interface\n");
 	s32Error = wilc_hif_set_cfg(vif, &pstrCfgParamVal);
 	if (s32Error)
-		PRINT_ER("Error in setting WIPHY PARAMS\n");
-
+		netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
 
 	return s32Error;
 }
@@ -1470,19 +1295,16 @@
 	struct wilc_priv *priv = wiphy_priv(wiphy);
 
 	vif = netdev_priv(priv->dev);
-	PRINT_D(CFG80211_DBG, "Setting PMKSA\n");
 
 
 	for (i = 0; i < priv->pmkid_list.numpmkid; i++)	{
 		if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
 				 ETH_ALEN)) {
 			flag = PMKID_FOUND;
-			PRINT_D(CFG80211_DBG, "PMKID already exists\n");
 			break;
 		}
 	}
 	if (i < WILC_MAX_NUM_PMKIDS) {
-		PRINT_D(CFG80211_DBG, "Setting PMKID in private structure\n");
 		memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
 			    ETH_ALEN);
 		memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
@@ -1490,14 +1312,13 @@
 		if (!(flag == PMKID_FOUND))
 			priv->pmkid_list.numpmkid++;
 	} else {
-		PRINT_ER("Invalid PMKID index\n");
+		netdev_err(netdev, "Invalid PMKID index\n");
 		s32Error = -EINVAL;
 	}
 
-	if (!s32Error) {
-		PRINT_D(CFG80211_DBG, "Setting pmkid in the host interface\n");
+	if (!s32Error)
 		s32Error = wilc_set_pmkid_info(vif, &priv->pmkid_list);
-	}
+
 	return s32Error;
 }
 
@@ -1509,12 +1330,9 @@
 
 	struct wilc_priv *priv = wiphy_priv(wiphy);
 
-	PRINT_D(CFG80211_DBG, "Deleting PMKSA keys\n");
-
 	for (i = 0; i < priv->pmkid_list.numpmkid; i++)	{
 		if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
 				 ETH_ALEN)) {
-			PRINT_D(CFG80211_DBG, "Reseting PMKID values\n");
 			memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(struct host_if_pmkid));
 			break;
 		}
@@ -1541,8 +1359,6 @@
 {
 	struct wilc_priv *priv = wiphy_priv(wiphy);
 
-	PRINT_D(CFG80211_DBG,  "Flushing  PMKID key values\n");
-
 	memset(&priv->pmkid_list, 0, sizeof(struct host_if_pmkid_attr));
 
 	return 0;
@@ -1569,7 +1385,6 @@
 	}
 	if (wlan_channel != INVALID_CHANNEL) {
 		if (channel_list_attr_index) {
-			PRINT_D(GENERIC_DBG, "Modify channel list attribute\n");
 			for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
 				if (buf[i] == 0x51) {
 					for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
@@ -1581,7 +1396,6 @@
 		}
 
 		if (op_channel_attr_index) {
-			PRINT_D(GENERIC_DBG, "Modify operating channel attribute\n");
 			buf[op_channel_attr_index + 6] = 0x51;
 			buf[op_channel_attr_index + 7] = wlan_channel;
 		}
@@ -1611,7 +1425,6 @@
 	}
 	if (wlan_channel != INVALID_CHANNEL && bOperChan) {
 		if (channel_list_attr_index) {
-			PRINT_D(GENERIC_DBG, "Modify channel list attribute\n");
 			for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
 				if (buf[i] == 0x51) {
 					for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
@@ -1623,7 +1436,6 @@
 		}
 
 		if (op_channel_attr_index) {
-			PRINT_D(GENERIC_DBG, "Modify operating channel attribute\n");
 			buf[op_channel_attr_index + 6] = 0x51;
 			buf[op_channel_attr_index + 7] = wlan_channel;
 		}
@@ -1639,7 +1451,7 @@
 	s32 s32Freq;
 
 	priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
-	pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+	pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
 
 	memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
 
@@ -1647,41 +1459,29 @@
 
 	if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
 		if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP) {
-			PRINT_D(GENERIC_DBG, "Probe response ACK\n");
 			cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL);
 			return;
 		} else {
-			if (pkt_offset & IS_MGMT_STATUS_SUCCES)	{
-				PRINT_D(GENERIC_DBG, "Success Ack - Action frame category: %x Action Subtype: %d Dialog T: %x OR %x\n", buff[ACTION_CAT_ID], buff[ACTION_SUBTYPE_ID],
-					buff[ACTION_SUBTYPE_ID + 1], buff[P2P_PUB_ACTION_SUBTYPE + 1]);
+			if (pkt_offset & IS_MGMT_STATUS_SUCCES)
 				cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL);
-			} else {
-				PRINT_D(GENERIC_DBG, "Fail Ack - Action frame category: %x Action Subtype: %d Dialog T: %x OR %x\n", buff[ACTION_CAT_ID], buff[ACTION_SUBTYPE_ID],
-					buff[ACTION_SUBTYPE_ID + 1], buff[P2P_PUB_ACTION_SUBTYPE + 1]);
+			else
 				cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, false, GFP_KERNEL);
-			}
 			return;
 		}
 	} else {
-		PRINT_D(GENERIC_DBG, "Rx Frame Type:%x\n", buff[FRAME_TYPE_ID]);
-
 		s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ);
 
 		if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
-			PRINT_D(GENERIC_DBG, "Rx Action Frame Type: %x %x\n", buff[ACTION_SUBTYPE_ID], buff[P2P_PUB_ACTION_SUBTYPE]);
-
 			if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
-				PRINT_D(GENERIC_DBG, "Receiving action frames from wrong channels\n");
+				netdev_dbg(dev, "Receiving action wrong ch\n");
 				return;
 			}
 			if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
 				switch (buff[ACTION_SUBTYPE_ID]) {
 				case GAS_INTIAL_REQ:
-					PRINT_D(GENERIC_DBG, "GAS INITIAL REQ %x\n", buff[ACTION_SUBTYPE_ID]);
 					break;
 
 				case GAS_INTIAL_RSP:
-					PRINT_D(GENERIC_DBG, "GAS INITIAL RSP %x\n", buff[ACTION_SUBTYPE_ID]);
 					break;
 
 				case PUBLIC_ACT_VENDORSPEC:
@@ -1692,7 +1492,6 @@
 									if (!memcmp(p2p_vendor_spec, &buff[i], 6)) {
 										p2p_recv_random = buff[i + 6];
 										wilc_ie = true;
-										PRINT_D(GENERIC_DBG, "WILC Vendor specific IE:%02x\n", p2p_recv_random);
 										break;
 									}
 								}
@@ -1709,32 +1508,31 @@
 								}
 							}
 						} else {
-							PRINT_D(GENERIC_DBG, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
+							netdev_dbg(dev, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
 						}
 					}
 
 
 					if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP) && (wilc_ie))	{
-						PRINT_D(GENERIC_DBG, "Sending P2P to host without extra elemnt\n");
 						cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0);
 						return;
 					}
 					break;
 
 				default:
-					PRINT_D(GENERIC_DBG, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]);
+					netdev_dbg(dev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]);
 					break;
 				}
 			}
 		}
 
-		cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0);
+		cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size, 0);
 	}
 }
 
 static void WILC_WFI_mgmt_tx_complete(void *priv, int status)
 {
-	struct p2p_mgmt_data *pv_data = (struct p2p_mgmt_data *)priv;
+	struct p2p_mgmt_data *pv_data = priv;
 
 
 	kfree(pv_data->buff);
@@ -1745,9 +1543,7 @@
 {
 	struct wilc_priv *priv;
 
-	priv = (struct wilc_priv *)pUserVoid;
-
-	PRINT_D(HOSTINF_DBG, "Remain on channel ready\n");
+	priv = pUserVoid;
 
 	priv->bInP2PlistenState = true;
 
@@ -1762,20 +1558,15 @@
 {
 	struct wilc_priv *priv;
 
-	priv = (struct wilc_priv *)pUserVoid;
+	priv = pUserVoid;
 
 	if (u32SessionID == priv->strRemainOnChanParams.u32ListenSessionID) {
-		PRINT_D(GENERIC_DBG, "Remain on channel expired\n");
-
 		priv->bInP2PlistenState = false;
 
 		cfg80211_remain_on_channel_expired(priv->wdev,
 						   priv->strRemainOnChanParams.u64ListenCookie,
 						   priv->strRemainOnChanParams.pstrListenChan,
 						   GFP_KERNEL);
-	} else {
-		PRINT_D(GENERIC_DBG, "Received ID 0x%x Expected ID 0x%x (No match)\n", u32SessionID
-			, priv->strRemainOnChanParams.u32ListenSessionID);
 	}
 }
 
@@ -1791,11 +1582,8 @@
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	PRINT_D(GENERIC_DBG, "Remaining on channel %d\n", chan->hw_value);
-
-
 	if (wdev->iftype == NL80211_IFTYPE_AP) {
-		PRINT_D(GENERIC_DBG, "Required remain-on-channel while in AP mode");
+		netdev_dbg(vif->ndev, "Required while in AP mode\n");
 		return s32Error;
 	}
 
@@ -1826,8 +1614,6 @@
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	PRINT_D(CFG80211_DBG, "Cancel remain on channel\n");
-
 	s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID);
 	return s32Error;
 }
@@ -1851,7 +1637,7 @@
 
 	vif = netdev_priv(wdev->netdev);
 	priv = wiphy_priv(wiphy);
-	pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+	pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
 
 	*cookie = (unsigned long)buf;
 	priv->u64tx_cookie = *cookie;
@@ -1859,49 +1645,36 @@
 
 	if (ieee80211_is_mgmt(mgmt->frame_control)) {
 		mgmt_tx = kmalloc(sizeof(struct p2p_mgmt_data), GFP_KERNEL);
-		if (!mgmt_tx) {
-			PRINT_ER("Failed to allocate memory for mgmt_tx structure\n");
+		if (!mgmt_tx)
 			return -EFAULT;
-		}
+
 		mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL);
 		if (!mgmt_tx->buff) {
-			PRINT_ER("Failed to allocate memory for mgmt_tx buff\n");
 			kfree(mgmt_tx);
-			return -EFAULT;
+			return -ENOMEM;
 		}
+
 		memcpy(mgmt_tx->buff, buf, len);
 		mgmt_tx->size = len;
 
 
 		if (ieee80211_is_probe_resp(mgmt->frame_control)) {
-			PRINT_D(GENERIC_DBG, "TX: Probe Response\n");
-			PRINT_D(GENERIC_DBG, "Setting channel: %d\n", chan->hw_value);
 			wilc_set_mac_chnl_num(vif, chan->hw_value);
 			curr_channel = chan->hw_value;
 		} else if (ieee80211_is_action(mgmt->frame_control))   {
-			PRINT_D(GENERIC_DBG, "ACTION FRAME:%x\n", (u16)mgmt->frame_control);
-
-
 			if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
 				if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC ||
 				    buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF)	{
-					PRINT_D(GENERIC_DBG, "Setting channel: %d\n", chan->hw_value);
 					wilc_set_mac_chnl_num(vif,
 							      chan->hw_value);
 					curr_channel = chan->hw_value;
 				}
 				switch (buf[ACTION_SUBTYPE_ID])	{
 				case GAS_INTIAL_REQ:
-				{
-					PRINT_D(GENERIC_DBG, "GAS INITIAL REQ %x\n", buf[ACTION_SUBTYPE_ID]);
 					break;
-				}
 
 				case GAS_INTIAL_RSP:
-				{
-					PRINT_D(GENERIC_DBG, "GAS INITIAL RSP %x\n", buf[ACTION_SUBTYPE_ID]);
 					break;
-				}
 
 				case PUBLIC_ACT_VENDORSPEC:
 				{
@@ -1916,8 +1689,6 @@
 						if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP
 						      || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
 							if (p2p_local_random > p2p_recv_random)	{
-								PRINT_D(GENERIC_DBG, "LOCAL WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
-
 								for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
 									if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buf[i + 2], 4))) {
 										if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)
@@ -1933,13 +1704,11 @@
 									mgmt_tx->buff[len + sizeof(p2p_vendor_spec)] = p2p_local_random;
 									mgmt_tx->size = buf_len;
 								}
-							} else {
-								PRINT_D(GENERIC_DBG, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
 							}
 						}
 
 					} else {
-						PRINT_D(GENERIC_DBG, "Not a P2P public action frame\n");
+						netdev_dbg(vif->ndev, "Not a P2P public action frame\n");
 					}
 
 					break;
@@ -1947,24 +1716,18 @@
 
 				default:
 				{
-					PRINT_D(GENERIC_DBG, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]);
+					netdev_dbg(vif->ndev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]);
 					break;
 				}
 				}
 			}
 
-			PRINT_D(GENERIC_DBG, "TX: ACTION FRAME Type:%x : Chan:%d\n", buf[ACTION_SUBTYPE_ID], chan->hw_value);
 			pstrWFIDrv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
-
-			PRINT_D(GENERIC_DBG, "Current Jiffies: %lu Timeout:%llu\n",
-				jiffies, pstrWFIDrv->p2p_timeout);
 		}
 
 		wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx,
 					   mgmt_tx->buff, mgmt_tx->size,
 					   WILC_WFI_mgmt_tx_complete);
-	} else {
-		PRINT_D(GENERIC_DBG, "This function transmits only management frames\n");
 	}
 	return 0;
 }
@@ -1977,10 +1740,7 @@
 	struct host_if_drv *pstrWFIDrv;
 
 	priv = wiphy_priv(wiphy);
-	pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
-
-
-	PRINT_D(GENERIC_DBG, "Tx Cancel wait :%lu\n", jiffies);
+	pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
 	pstrWFIDrv->p2p_timeout = jiffies;
 
 	if (!priv->bInP2PlistenState) {
@@ -2007,7 +1767,6 @@
 	if (!frame_type)
 		return;
 
-	PRINT_D(GENERIC_DBG, "Frame registering Frame Type: %x: Boolean: %d\n", frame_type, reg);
 	switch (frame_type) {
 	case PROBE_REQ:
 	{
@@ -2029,17 +1788,14 @@
 	}
 	}
 
-	if (!wl->initialized) {
-		PRINT_D(GENERIC_DBG, "Return since mac is closed\n");
+	if (!wl->initialized)
 		return;
-	}
 	wilc_frame_register(vif, frame_type, reg);
 }
 
 static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev,
 			       s32 rssi_thold, u32 rssi_hyst)
 {
-	PRINT_D(CFG80211_DBG, "Setting CQM RSSi Function\n");
 	return 0;
 }
 
@@ -2049,8 +1805,6 @@
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
 
-	PRINT_D(CFG80211_DBG, "Dumping station information\n");
-
 	if (idx != 0)
 		return -ENOENT;
 
@@ -2070,17 +1824,13 @@
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
 
-	PRINT_D(CFG80211_DBG, " Power save Enabled= %d , TimeOut = %d\n", enabled, timeout);
-
 	if (!wiphy)
 		return -ENOENT;
 
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
-	if (!priv->hWILCWFIDrv) {
-		PRINT_ER("Driver is NULL\n");
+	if (!priv->hif_drv)
 		return -EIO;
-	}
 
 	if (wilc_enable_ps)
 		wilc_set_power_mgmt(vif, enabled, timeout);
@@ -2094,286 +1844,73 @@
 {
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
-	u8 interface_type;
-	u16 TID = 0;
-	u8 i;
 	struct wilc *wl;
 
 	vif = netdev_priv(dev);
 	priv = wiphy_priv(wiphy);
 	wl = vif->wilc;
-
-	PRINT_D(HOSTAPD_DBG, "In Change virtual interface function\n");
-	PRINT_D(HOSTAPD_DBG, "Wireless interface name =%s\n", dev->name);
 	p2p_local_random = 0x01;
 	p2p_recv_random = 0x00;
 	wilc_ie = false;
 	wilc_optaining_ip = false;
 	del_timer(&wilc_during_ip_timer);
-	PRINT_D(GENERIC_DBG, "Changing virtual interface, enable scan\n");
-
-	if (g_ptk_keys_saved && g_gtk_keys_saved) {
-		wilc_set_machw_change_vir_if(dev, true);
-	}
 
 	switch (type) {
 	case NL80211_IFTYPE_STATION:
 		wilc_connecting = 0;
-		PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_STATION\n");
-
 		dev->ieee80211_ptr->iftype = type;
 		priv->wdev->iftype = type;
 		vif->monitor_flag = 0;
 		vif->iftype = STATION_MODE;
+		wilc_set_operation_mode(vif, STATION_MODE);
 
 		memset(priv->assoc_stainfo.au8Sta_AssociatedBss, 0, MAX_NUM_STA * ETH_ALEN);
-		interface_type = vif->iftype;
-		vif->iftype = STATION_MODE;
 
-		if (wl->initialized) {
-			wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid,
-						   TID);
-			wilc_wait_msg_queue_idle();
-
-			up(&wl->cfg_event);
-
-			wilc1000_wlan_deinit(dev);
-			wilc1000_wlan_init(dev, vif);
-			wilc_initialized = 1;
-			vif->iftype = interface_type;
-
-			wilc_set_wfi_drv_handler(vif,
-						 wilc_get_vif_idx(wl->vif[0]));
-			wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr);
-			wilc_set_operation_mode(vif, STATION_MODE);
-
-			if (g_wep_keys_saved) {
-				wilc_set_wep_default_keyid(wl->vif[0],
-						g_key_wep_params.key_idx);
-				wilc_add_wep_key_bss_sta(wl->vif[0],
-						g_key_wep_params.key,
-						g_key_wep_params.key_len,
-						g_key_wep_params.key_idx);
-			}
-
-			wilc_flush_join_req(vif);
-
-			if (g_ptk_keys_saved && g_gtk_keys_saved) {
-				PRINT_D(CFG80211_DBG, "ptk %x %x %x\n", g_key_ptk_params.key[0],
-					g_key_ptk_params.key[1],
-					g_key_ptk_params.key[2]);
-				PRINT_D(CFG80211_DBG, "gtk %x %x %x\n", g_key_gtk_params.key[0],
-					g_key_gtk_params.key[1],
-					g_key_gtk_params.key[2]);
-				add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
-					wl->vif[0]->ndev,
-					g_add_ptk_key_params.key_idx,
-					g_add_ptk_key_params.pairwise,
-					g_add_ptk_key_params.mac_addr,
-					(struct key_params *)(&g_key_ptk_params));
-
-				add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
-					wl->vif[0]->ndev,
-					g_add_gtk_key_params.key_idx,
-					g_add_gtk_key_params.pairwise,
-					g_add_gtk_key_params.mac_addr,
-					(struct key_params *)(&g_key_gtk_params));
-			}
-
-			if (wl->initialized)	{
-				for (i = 0; i < num_reg_frame; i++) {
-					PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
-						vif->g_struct_frame_reg[i].reg);
-					wilc_frame_register(vif,
-								vif->g_struct_frame_reg[i].frame_type,
-								vif->g_struct_frame_reg[i].reg);
-				}
-			}
-
-			wilc_enable_ps = true;
-			wilc_set_power_mgmt(vif, 1, 0);
-		}
+		wilc_enable_ps = true;
+		wilc_set_power_mgmt(vif, 1, 0);
 		break;
 
 	case NL80211_IFTYPE_P2P_CLIENT:
-		wilc_enable_ps = false;
-		wilc_set_power_mgmt(vif, 0, 0);
 		wilc_connecting = 0;
-		PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_P2P_CLIENT\n");
-
-		wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, TID);
-
 		dev->ieee80211_ptr->iftype = type;
 		priv->wdev->iftype = type;
 		vif->monitor_flag = 0;
-
-		PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
 		vif->iftype = CLIENT_MODE;
+		wilc_set_operation_mode(vif, STATION_MODE);
 
-
-		if (wl->initialized)	{
-			wilc_wait_msg_queue_idle();
-
-			wilc1000_wlan_deinit(dev);
-			wilc1000_wlan_init(dev, vif);
-			wilc_initialized = 1;
-
-			wilc_set_wfi_drv_handler(vif,
-						 wilc_get_vif_idx(wl->vif[0]));
-			wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr);
-			wilc_set_operation_mode(vif, STATION_MODE);
-
-			if (g_wep_keys_saved) {
-				wilc_set_wep_default_keyid(wl->vif[0],
-						g_key_wep_params.key_idx);
-				wilc_add_wep_key_bss_sta(wl->vif[0],
-						g_key_wep_params.key,
-						g_key_wep_params.key_len,
-						g_key_wep_params.key_idx);
-			}
-
-			wilc_flush_join_req(vif);
-
-			if (g_ptk_keys_saved && g_gtk_keys_saved) {
-				PRINT_D(CFG80211_DBG, "ptk %x %x %x\n", g_key_ptk_params.key[0],
-					g_key_ptk_params.key[1],
-					g_key_ptk_params.key[2]);
-				PRINT_D(CFG80211_DBG, "gtk %x %x %x\n", g_key_gtk_params.key[0],
-					g_key_gtk_params.key[1],
-					g_key_gtk_params.key[2]);
-				add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
-					wl->vif[0]->ndev,
-					g_add_ptk_key_params.key_idx,
-					g_add_ptk_key_params.pairwise,
-					g_add_ptk_key_params.mac_addr,
-					(struct key_params *)(&g_key_ptk_params));
-
-				add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
-					wl->vif[0]->ndev,
-					g_add_gtk_key_params.key_idx,
-					g_add_gtk_key_params.pairwise,
-					g_add_gtk_key_params.mac_addr,
-					(struct key_params *)(&g_key_gtk_params));
-			}
-
-			refresh_scan(priv, 1, true);
-			wilc_set_machw_change_vir_if(dev, false);
-
-			if (wl->initialized)	{
-				for (i = 0; i < num_reg_frame; i++) {
-					PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
-						vif->g_struct_frame_reg[i].reg);
-					wilc_frame_register(vif,
-								vif->g_struct_frame_reg[i].frame_type,
-								vif->g_struct_frame_reg[i].reg);
-				}
-			}
-		}
+		wilc_enable_ps = false;
+		wilc_set_power_mgmt(vif, 0, 0);
 		break;
 
 	case NL80211_IFTYPE_AP:
 		wilc_enable_ps = false;
-		PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_AP %d\n", type);
 		dev->ieee80211_ptr->iftype = type;
 		priv->wdev->iftype = type;
 		vif->iftype = AP_MODE;
-		PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
 
-		PRINT_D(HOSTAPD_DBG, "Downloading AP firmware\n");
-		wilc_wlan_get_firmware(dev);
-
-		if (wl->initialized)	{
-			vif->iftype = AP_MODE;
-			wilc_mac_close(dev);
-			wilc_mac_open(dev);
-
-			for (i = 0; i < num_reg_frame; i++) {
-				PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
-					vif->g_struct_frame_reg[i].reg);
-				wilc_frame_register(vif,
-							vif->g_struct_frame_reg[i].frame_type,
-							vif->g_struct_frame_reg[i].reg);
-			}
+		if (wl->initialized) {
+			wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif),
+						 0);
+			wilc_set_operation_mode(vif, AP_MODE);
+			wilc_set_power_mgmt(vif, 0, 0);
 		}
 		break;
 
 	case NL80211_IFTYPE_P2P_GO:
-		PRINT_D(GENERIC_DBG, "start duringIP timer\n");
-
 		wilc_optaining_ip = true;
 		mod_timer(&wilc_during_ip_timer,
 			  jiffies + msecs_to_jiffies(during_ip_time));
-		wilc_set_power_mgmt(vif, 0, 0);
-		wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, TID);
-		wilc_enable_ps = false;
-		PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_GO\n");
+		wilc_set_operation_mode(vif, AP_MODE);
 		dev->ieee80211_ptr->iftype = type;
 		priv->wdev->iftype = type;
-
-		PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
-
-		PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
-
-
 		vif->iftype = GO_MODE;
 
-		wilc_wait_msg_queue_idle();
-		wilc1000_wlan_deinit(dev);
-		wilc1000_wlan_init(dev, vif);
-		wilc_initialized = 1;
-
-		wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(wl->vif[0]));
-		wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr);
-		wilc_set_operation_mode(vif, AP_MODE);
-
-		if (g_wep_keys_saved) {
-			wilc_set_wep_default_keyid(wl->vif[0],
-						   g_key_wep_params.key_idx);
-			wilc_add_wep_key_bss_sta(wl->vif[0],
-						 g_key_wep_params.key,
-						 g_key_wep_params.key_len,
-						 g_key_wep_params.key_idx);
-		}
-
-		wilc_flush_join_req(vif);
-
-		if (g_ptk_keys_saved && g_gtk_keys_saved) {
-			PRINT_D(CFG80211_DBG, "ptk %x %x %x cipher %x\n", g_key_ptk_params.key[0],
-				g_key_ptk_params.key[1],
-				g_key_ptk_params.key[2],
-				g_key_ptk_params.cipher);
-			PRINT_D(CFG80211_DBG, "gtk %x %x %x cipher %x\n", g_key_gtk_params.key[0],
-				g_key_gtk_params.key[1],
-				g_key_gtk_params.key[2],
-				g_key_gtk_params.cipher);
-			add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
-				wl->vif[0]->ndev,
-				g_add_ptk_key_params.key_idx,
-				g_add_ptk_key_params.pairwise,
-				g_add_ptk_key_params.mac_addr,
-				(struct key_params *)(&g_key_ptk_params));
-
-			add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
-				wl->vif[0]->ndev,
-				g_add_gtk_key_params.key_idx,
-				g_add_gtk_key_params.pairwise,
-				g_add_gtk_key_params.mac_addr,
-				(struct key_params *)(&g_key_gtk_params));
-		}
-
-		if (wl->initialized)	{
-			for (i = 0; i < num_reg_frame; i++) {
-				PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
-					vif->g_struct_frame_reg[i].reg);
-				wilc_frame_register(vif,
-							vif->g_struct_frame_reg[i].frame_type,
-							vif->g_struct_frame_reg[i].reg);
-			}
-		}
+		wilc_enable_ps = false;
+		wilc_set_power_mgmt(vif, 0, 0);
 		break;
 
 	default:
-		PRINT_ER("Unknown interface type= %d\n", type);
+		netdev_err(dev, "Unknown interface type= %d\n", type);
 		return -EINVAL;
 	}
 
@@ -2391,18 +1928,15 @@
 
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(dev);
-	wl = vif ->wilc;
-	PRINT_D(HOSTAPD_DBG, "Starting ap\n");
-
-	PRINT_D(HOSTAPD_DBG, "Interval = %d\n DTIM period = %d\n Head length = %zu Tail length = %zu\n",
-		settings->beacon_interval, settings->dtim_period, beacon->head_len, beacon->tail_len);
+	wl = vif->wilc;
 
 	s32Error = set_channel(wiphy, &settings->chandef);
 
 	if (s32Error != 0)
-		PRINT_ER("Error in setting channel\n");
+		netdev_err(dev, "Error in setting channel\n");
 
-	wilc_wlan_set_bssid(dev, wl->vif[0]->src_addr);
+	wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
+	wilc_set_power_mgmt(vif, 0, 0);
 
 	s32Error = wilc_add_beacon(vif, settings->beacon_interval,
 				   settings->dtim_period, beacon->head_len,
@@ -2421,8 +1955,6 @@
 
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
-	PRINT_D(HOSTAPD_DBG, "Setting beacon\n");
-
 
 	s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len,
 				   (u8 *)beacon->head, beacon->tail_len,
@@ -2444,14 +1976,12 @@
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	PRINT_D(HOSTAPD_DBG, "Deleting beacon\n");
-
-	wilc_wlan_set_bssid(dev, NullBssid);
+	wilc_wlan_set_bssid(dev, NullBssid, AP_MODE);
 
 	s32Error = wilc_del_beacon(vif);
 
 	if (s32Error)
-		PRINT_ER("Host delete beacon fail\n");
+		netdev_err(dev, "Host delete beacon fail\n");
 
 	return s32Error;
 }
@@ -2477,14 +2007,6 @@
 		strStaParams.rates_len = params->supported_rates_len;
 		strStaParams.rates = params->supported_rates;
 
-		PRINT_D(CFG80211_DBG, "Adding station parameters %d\n", params->aid);
-
-		PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][0], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][1], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][2], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][3], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][4],
-			priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][5]);
-		PRINT_D(HOSTAPD_DBG, "ASSOC ID = %d\n", strStaParams.aid);
-		PRINT_D(HOSTAPD_DBG, "Number of supported rates = %d\n",
-			strStaParams.rates_len);
-
 		if (!params->ht_capa) {
 			strStaParams.ht_supported = false;
 		} else {
@@ -2502,26 +2024,9 @@
 		strStaParams.flags_mask = params->sta_flags_mask;
 		strStaParams.flags_set = params->sta_flags_set;
 
-		PRINT_D(HOSTAPD_DBG, "IS HT supported = %d\n",
-			strStaParams.ht_supported);
-		PRINT_D(HOSTAPD_DBG, "Capability Info = %d\n",
-			strStaParams.ht_capa_info);
-		PRINT_D(HOSTAPD_DBG, "AMPDU Params = %d\n",
-			strStaParams.ht_ampdu_params);
-		PRINT_D(HOSTAPD_DBG, "HT Extended params = %d\n",
-			strStaParams.ht_ext_params);
-		PRINT_D(HOSTAPD_DBG, "Tx Beamforming Cap = %d\n",
-			strStaParams.ht_tx_bf_cap);
-		PRINT_D(HOSTAPD_DBG, "Antenna selection info = %d\n",
-			strStaParams.ht_ante_sel);
-		PRINT_D(HOSTAPD_DBG, "Flag Mask = %d\n",
-			strStaParams.flags_mask);
-		PRINT_D(HOSTAPD_DBG, "Flag Set = %d\n",
-			strStaParams.flags_set);
-
 		s32Error = wilc_add_station(vif, &strStaParams);
 		if (s32Error)
-			PRINT_ER("Host add station fail\n");
+			netdev_err(dev, "Host add station fail\n");
 	}
 
 	return s32Error;
@@ -2542,21 +2047,14 @@
 	vif = netdev_priv(dev);
 
 	if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
-		PRINT_D(HOSTAPD_DBG, "Deleting station\n");
-
-
-		if (!mac) {
-			PRINT_D(HOSTAPD_DBG, "All associated stations\n");
+		if (!mac)
 			s32Error = wilc_del_allstation(vif,
 				     priv->assoc_stainfo.au8Sta_AssociatedBss);
-		} else {
-			PRINT_D(HOSTAPD_DBG, "With mac address: %x%x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
-		}
 
 		s32Error = wilc_del_station(vif, mac);
 
 		if (s32Error)
-			PRINT_ER("Host delete station fail\n");
+			netdev_err(dev, "Host delete station fail\n");
 	}
 	return s32Error;
 }
@@ -2569,9 +2067,6 @@
 	struct add_sta_param strStaParams = { {0} };
 	struct wilc_vif *vif;
 
-
-	PRINT_D(HOSTAPD_DBG, "Change station paramters\n");
-
 	if (!wiphy)
 		return -EFAULT;
 
@@ -2584,14 +2079,6 @@
 		strStaParams.rates_len = params->supported_rates_len;
 		strStaParams.rates = params->supported_rates;
 
-		PRINT_D(HOSTAPD_DBG, "BSSID = %x%x%x%x%x%x\n",
-			strStaParams.bssid[0], strStaParams.bssid[1],
-			strStaParams.bssid[2], strStaParams.bssid[3],
-			strStaParams.bssid[4], strStaParams.bssid[5]);
-		PRINT_D(HOSTAPD_DBG, "ASSOC ID = %d\n", strStaParams.aid);
-		PRINT_D(HOSTAPD_DBG, "Number of supported rates = %d\n",
-			strStaParams.rates_len);
-
 		if (!params->ht_capa) {
 			strStaParams.ht_supported = false;
 		} else {
@@ -2609,26 +2096,9 @@
 		strStaParams.flags_mask = params->sta_flags_mask;
 		strStaParams.flags_set = params->sta_flags_set;
 
-		PRINT_D(HOSTAPD_DBG, "IS HT supported = %d\n",
-			strStaParams.ht_supported);
-		PRINT_D(HOSTAPD_DBG, "Capability Info = %d\n",
-			strStaParams.ht_capa_info);
-		PRINT_D(HOSTAPD_DBG, "AMPDU Params = %d\n",
-			strStaParams.ht_ampdu_params);
-		PRINT_D(HOSTAPD_DBG, "HT Extended params = %d\n",
-			strStaParams.ht_ext_params);
-		PRINT_D(HOSTAPD_DBG, "Tx Beamforming Cap = %d\n",
-			strStaParams.ht_tx_bf_cap);
-		PRINT_D(HOSTAPD_DBG, "Antenna selection info = %d\n",
-			strStaParams.ht_ante_sel);
-		PRINT_D(HOSTAPD_DBG, "Flag Mask = %d\n",
-			strStaParams.flags_mask);
-		PRINT_D(HOSTAPD_DBG, "Flag Set = %d\n",
-			strStaParams.flags_set);
-
 		s32Error = wilc_edit_station(vif, &strStaParams);
 		if (s32Error)
-			PRINT_ER("Host edit station fail\n");
+			netdev_err(dev, "Host edit station fail\n");
 	}
 	return s32Error;
 }
@@ -2645,34 +2115,87 @@
 	struct net_device *new_ifc = NULL;
 
 	priv = wiphy_priv(wiphy);
-
-
-
-	PRINT_D(HOSTAPD_DBG, "Adding monitor interface[%p]\n", priv->wdev->netdev);
-
 	vif = netdev_priv(priv->wdev->netdev);
 
 
 	if (type == NL80211_IFTYPE_MONITOR) {
-		PRINT_D(HOSTAPD_DBG, "Monitor interface mode: Initializing mon interface virtual device driver\n");
-		PRINT_D(HOSTAPD_DBG, "Adding monitor interface[%p]\n", vif->ndev);
 		new_ifc = WILC_WFI_init_mon_interface(name, vif->ndev);
 		if (new_ifc) {
-			PRINT_D(HOSTAPD_DBG, "Setting monitor flag in private structure\n");
 			vif = netdev_priv(priv->wdev->netdev);
 			vif->monitor_flag = 1;
-		} else
-			PRINT_ER("Error in initializing monitor interface\n ");
+		}
 	}
 	return priv->wdev;
 }
 
 static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-	PRINT_D(HOSTAPD_DBG, "Deleting virtual interface\n");
 	return 0;
 }
 
+static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+{
+	struct wilc_priv *priv = wiphy_priv(wiphy);
+	struct wilc_vif *vif = netdev_priv(priv->dev);
+
+	if (!wow && wilc_wlan_get_num_conn_ifcs(vif->wilc))
+		vif->wilc->suspend_event = true;
+	else
+		vif->wilc->suspend_event = false;
+
+	return 0;
+}
+
+static int wilc_resume(struct wiphy *wiphy)
+{
+	struct wilc_priv *priv = wiphy_priv(wiphy);
+	struct wilc_vif *vif = netdev_priv(priv->dev);
+
+	netdev_info(vif->ndev, "cfg resume\n");
+	return 0;
+}
+
+static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+	struct wilc_priv *priv = wiphy_priv(wiphy);
+	struct wilc_vif *vif = netdev_priv(priv->dev);
+
+	netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
+}
+
+static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+			enum nl80211_tx_power_setting type, int mbm)
+{
+	int ret;
+	s32 tx_power = MBM_TO_DBM(mbm);
+	struct wilc_priv *priv = wiphy_priv(wiphy);
+	struct wilc_vif *vif = netdev_priv(priv->dev);
+
+	if (tx_power < 0)
+		tx_power = 0;
+	else if (tx_power > 18)
+		tx_power = 18;
+	ret = wilc_set_tx_power(vif, tx_power);
+	if (ret)
+		netdev_err(vif->ndev, "Failed to set tx power\n");
+
+	return ret;
+}
+
+static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+			int *dbm)
+{
+	int ret;
+	struct wilc_priv *priv = wiphy_priv(wiphy);
+	struct wilc_vif *vif = netdev_priv(priv->dev);
+
+	ret = wilc_get_tx_power(vif, (u8 *)dbm);
+	if (ret)
+		netdev_err(vif->ndev, "Failed to get tx power\n");
+
+	return ret;
+}
+
 static struct cfg80211_ops wilc_cfg80211_ops = {
 	.set_monitor_channel = set_channel,
 	.scan = scan,
@@ -2708,55 +2231,25 @@
 	.set_power_mgmt = set_power_mgmt,
 	.set_cqm_rssi_config = set_cqm_rssi_config,
 
+	.suspend = wilc_suspend,
+	.resume = wilc_resume,
+	.set_wakeup = wilc_set_wakeup,
+	.set_tx_power = set_tx_power,
+	.get_tx_power = get_tx_power,
+
 };
 
-int WILC_WFI_update_stats(struct wiphy *wiphy, u32 pktlen, u8 changed)
-{
-	struct wilc_priv *priv;
-
-	priv = wiphy_priv(wiphy);
-	switch (changed) {
-	case WILC_WFI_RX_PKT:
-	{
-		priv->netstats.rx_packets++;
-		priv->netstats.rx_bytes += pktlen;
-		priv->netstats.rx_time = get_jiffies_64();
-	}
-	break;
-
-	case WILC_WFI_TX_PKT:
-	{
-		priv->netstats.tx_packets++;
-		priv->netstats.tx_bytes += pktlen;
-		priv->netstats.tx_time = get_jiffies_64();
-
-	}
-	break;
-
-	default:
-		break;
-	}
-	return 0;
-}
-
 static struct wireless_dev *WILC_WFI_CfgAlloc(void)
 {
 	struct wireless_dev *wdev;
 
-
-	PRINT_D(CFG80211_DBG, "Allocating wireless device\n");
-
 	wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
-	if (!wdev) {
-		PRINT_ER("Cannot allocate wireless device\n");
+	if (!wdev)
 		goto _fail_;
-	}
 
 	wdev->wiphy = wiphy_new(&wilc_cfg80211_ops, sizeof(struct wilc_priv));
-	if (!wdev->wiphy) {
-		PRINT_ER("Cannot allocate wiphy\n");
+	if (!wdev->wiphy)
 		goto _fail_mem_;
-	}
 
 	WILC_WFI_band_2ghz.ht_cap.ht_supported = 1;
 	WILC_WFI_band_2ghz.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
@@ -2780,11 +2273,9 @@
 	struct wireless_dev *wdev;
 	s32 s32Error = 0;
 
-	PRINT_D(CFG80211_DBG, "Registering wifi device\n");
-
 	wdev = WILC_WFI_CfgAlloc();
 	if (!wdev) {
-		PRINT_ER("CfgAlloc Failed\n");
+		netdev_err(net, "wiphy new allocate failed\n");
 		return NULL;
 	}
 
@@ -2792,9 +2283,10 @@
 	sema_init(&(priv->SemHandleUpdateStats), 1);
 	priv->wdev = wdev;
 	wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID;
+#ifdef CONFIG_PM
+	wdev->wiphy->wowlan = &wowlan_support;
+#endif
 	wdev->wiphy->max_num_pmkids = WILC_MAX_NUM_PMKIDS;
-	PRINT_INFO(CFG80211_DBG, "Max number of PMKIDs = %d\n", wdev->wiphy->max_num_pmkids);
-
 	wdev->wiphy->max_scan_ie_len = 1000;
 	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 	wdev->wiphy->cipher_suites = cipher_suites;
@@ -2807,20 +2299,11 @@
 	wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 	wdev->iftype = NL80211_IFTYPE_STATION;
 
-
-
-	PRINT_INFO(CFG80211_DBG, "Max scan ids = %d,Max scan IE len = %d,Signal Type = %d,Interface Modes = %d,Interface Type = %d\n",
-		   wdev->wiphy->max_scan_ssids, wdev->wiphy->max_scan_ie_len, wdev->wiphy->signal_type,
-		   wdev->wiphy->interface_modes, wdev->iftype);
-
 	set_wiphy_dev(wdev->wiphy, dev);
 
 	s32Error = wiphy_register(wdev->wiphy);
-	if (s32Error) {
-		PRINT_ER("Cannot register wiphy device\n");
-	} else {
-		PRINT_D(CFG80211_DBG, "Successful Registering\n");
-	}
+	if (s32Error)
+		netdev_err(net, "Cannot register wiphy device\n");
 
 	priv->dev = net;
 	return wdev;
@@ -2832,26 +2315,21 @@
 
 	struct wilc_priv *priv;
 
-	PRINT_D(INIT_DBG, "Host[%p][%p]\n", net, net->ieee80211_ptr);
 	priv = wdev_priv(net->ieee80211_ptr);
 	if (op_ifcs == 0) {
 		setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
 		setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
 	}
 	op_ifcs++;
-	if (s32Error < 0) {
-		PRINT_ER("Failed to creat refresh Timer\n");
-		return s32Error;
-	}
 
 	priv->gbAutoRateAdjusted = false;
 
 	priv->bInP2PlistenState = false;
 
 	sema_init(&(priv->hSemScanReq), 1);
-	s32Error = wilc_init(net, &priv->hWILCWFIDrv);
+	s32Error = wilc_init(net, &priv->hif_drv);
 	if (s32Error)
-		PRINT_ER("Error while initializing hostinterface\n");
+		netdev_err(net, "Error while initializing hostinterface\n");
 
 	return s32Error;
 }
@@ -2874,39 +2352,28 @@
 	s32Error = wilc_deinit(vif);
 
 	clear_shadow_scan();
-	if (op_ifcs == 0) {
-		PRINT_D(CORECONFIG_DBG, "destroy during ip\n");
+	if (op_ifcs == 0)
 		del_timer_sync(&wilc_during_ip_timer);
-	}
 
 	if (s32Error)
-		PRINT_ER("Error while deintializing host interface\n");
+		netdev_err(net, "Error while deintializing host interface\n");
 
 	return s32Error;
 }
 
 void wilc_free_wiphy(struct net_device *net)
 {
-	PRINT_D(CFG80211_DBG, "Unregistering wiphy\n");
-
-	if (!net) {
-		PRINT_D(INIT_DBG, "net_device is NULL\n");
+	if (!net)
 		return;
-	}
 
-	if (!net->ieee80211_ptr) {
-		PRINT_D(INIT_DBG, "ieee80211_ptr is NULL\n");
+	if (!net->ieee80211_ptr)
 		return;
-	}
 
-	if (!net->ieee80211_ptr->wiphy) {
-		PRINT_D(INIT_DBG, "wiphy is NULL\n");
+	if (!net->ieee80211_ptr->wiphy)
 		return;
-	}
 
 	wiphy_unregister(net->ieee80211_ptr->wiphy);
 
-	PRINT_D(INIT_DBG, "Freeing wiphy\n");
 	wiphy_free(net->ieee80211_ptr->wiphy);
 	kfree(net->ieee80211_ptr);
 }
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index ab53d9d..85a3810 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -12,7 +12,6 @@
 
 struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *dev);
 void wilc_free_wiphy(struct net_device *net);
-int WILC_WFI_update_stats(struct wiphy *wiphy, u32 pktlen, u8 changed);
 int wilc_deinit_host_int(struct net_device *net);
 int wilc_init_host_int(struct net_device *net);
 void WILC_WFI_monitor_rx(u8 *buff, u32 size);
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 98ac8ed..4123cff 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -35,8 +35,6 @@
 #include <linux/skbuff.h>
 #include <linux/ieee80211.h>
 #include <net/cfg80211.h>
-#include <linux/ieee80211.h>
-#include <net/cfg80211.h>
 #include <net/ieee80211_radiotap.h>
 #include <linux/if_arp.h>
 #include <linux/in6.h>
@@ -121,10 +119,9 @@
 	spinlock_t lock;
 	struct net_device *dev;
 	struct napi_struct napi;
-	struct host_if_drv *hWILCWFIDrv;
+	struct host_if_drv *hif_drv;
 	struct host_if_pmkid_attr pmkid_list;
 	struct WILC_WFI_stats netstats;
-	u8 WILC_WFI_wep_default;
 	u8 WILC_WFI_wep_key[4][WLAN_KEY_LEN_WEP104];
 	u8 WILC_WFI_wep_key_len[4];
 	/* The real interface that the monitor is on */
@@ -149,7 +146,7 @@
 } struct_frame_reg;
 
 struct wilc_vif {
-	u8 u8IfIdx;
+	u8 idx;
 	u8 iftype;
 	int monitor_flag;
 	int mac_opened;
@@ -160,6 +157,7 @@
 	u8 bssid[ETH_ALEN];
 	struct host_if_drv *hif_drv;
 	struct net_device *ndev;
+	u8 mode;
 };
 
 struct wilc {
@@ -215,6 +213,9 @@
 	const struct firmware *firmware;
 
 	struct device *dev;
+	bool suspend_event;
+
+	struct rf_info dummy_statistics;
 };
 
 struct WILC_WFI_mon_priv {
@@ -225,17 +226,13 @@
 
 void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
 void wilc_mac_indicate(struct wilc *wilc, int flag);
-void wilc_rx_complete(struct wilc *wilc);
-void wilc_dbg(u8 *buff);
-
 int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout);
 void wilc_netdev_cleanup(struct wilc *wilc);
 int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
 		     const struct wilc_hif_func *ops);
 void wilc1000_wlan_deinit(struct net_device *dev);
 void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
-u16 wilc_set_machw_change_vir_if(struct net_device *dev, bool value);
 int wilc_wlan_get_firmware(struct net_device *dev);
-int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid);
+int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode);
 
 #endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 83af51b..0020620 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -3,54 +3,24 @@
 #include "wilc_wfi_netdevice.h"
 #include "wilc_wlan_cfg.h"
 
-#ifdef WILC_OPTIMIZE_SLEEP_INT
-static inline void chip_allow_sleep(struct wilc *wilc);
-#endif
-static inline void chip_wakeup(struct wilc *wilc);
-static u32 dbgflag = N_INIT | N_ERR | N_INTR | N_TXQ | N_RXQ;
-
-/* FIXME: replace with dev_debug() */
-static void wilc_debug(u32 flag, char *fmt, ...)
-{
-	char buf[256];
-	va_list args;
-
-	if (flag & dbgflag) {
-		va_start(args, fmt);
-		vsprintf(buf, fmt, args);
-		va_end(args);
-
-		wilc_dbg(buf);
-	}
-}
-
 static CHIP_PS_STATE_T chip_ps_state = CHIP_WAKEDUP;
 
 static inline void acquire_bus(struct wilc *wilc, BUS_ACQUIRE_T acquire)
 {
 	mutex_lock(&wilc->hif_cs);
-	#ifndef WILC_OPTIMIZE_SLEEP_INT
-	if (chip_ps_state != CHIP_WAKEDUP)
-	#endif
-	{
-		if (acquire == ACQUIRE_AND_WAKEUP)
-			chip_wakeup(wilc);
-	}
+	if (acquire == ACQUIRE_AND_WAKEUP)
+		chip_wakeup(wilc);
 }
 
 static inline void release_bus(struct wilc *wilc, BUS_RELEASE_T release)
 {
-	#ifdef WILC_OPTIMIZE_SLEEP_INT
 	if (release == RELEASE_ALLOW_SLEEP)
 		chip_allow_sleep(wilc);
-	#endif
 	mutex_unlock(&wilc->hif_cs);
 }
 
-#ifdef TCP_ACK_FILTER
-static void wilc_wlan_txq_remove(struct txq_entry_t *tqe)
+static void wilc_wlan_txq_remove(struct wilc *wilc, struct txq_entry_t *tqe)
 {
-
 	if (tqe == wilc->txq_head) {
 		wilc->txq_head = tqe->next;
 		if (wilc->txq_head)
@@ -65,7 +35,6 @@
 	}
 	wilc->txq_entries -= 1;
 }
-#endif
 
 static struct txq_entry_t *
 wilc_wlan_txq_remove_from_head(struct net_device *dev)
@@ -117,18 +86,18 @@
 		wilc->txq_tail = tqe;
 	}
 	wilc->txq_entries += 1;
-	PRINT_D(TX_DBG, "Number of entries in TxQ = %d\n", wilc->txq_entries);
 
 	spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
 
-	PRINT_D(TX_DBG, "Wake the txq_handling\n");
-
 	up(&wilc->txq_event);
 }
 
-static int wilc_wlan_txq_add_to_head(struct wilc *wilc, struct txq_entry_t *tqe)
+static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
+				     struct txq_entry_t *tqe)
 {
 	unsigned long flags;
+	struct wilc *wilc = vif->wilc;
+
 	if (wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
 				    CFG_PKTS_TIMEOUT))
 		return -1;
@@ -147,17 +116,14 @@
 		wilc->txq_head = tqe;
 	}
 	wilc->txq_entries += 1;
-	PRINT_D(TX_DBG, "Number of entries in TxQ = %d\n", wilc->txq_entries);
 
 	spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
 	up(&wilc->txq_add_to_head_cs);
 	up(&wilc->txq_event);
-	PRINT_D(TX_DBG, "Wake up the txq_handler\n");
 
 	return 0;
 }
 
-#ifdef	TCP_ACK_FILTER
 struct ack_session_info;
 struct ack_session_info {
 	u32 seq_num;
@@ -173,7 +139,6 @@
 	struct txq_entry_t  *txqe;
 };
 
-
 #define NOT_TCP_ACK			(-1)
 
 #define MAX_TCP_SESSION		25
@@ -192,19 +157,20 @@
 
 static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq)
 {
-	ack_session_info[tcp_session].seq_num = seq;
-	ack_session_info[tcp_session].bigger_ack_num = 0;
-	ack_session_info[tcp_session].src_port = src_prt;
-	ack_session_info[tcp_session].dst_port = dst_prt;
-	tcp_session++;
-
-	PRINT_D(TCP_ENH, "TCP Session %d to Ack %d\n", tcp_session, seq);
+	if (tcp_session < 2 * MAX_TCP_SESSION) {
+		ack_session_info[tcp_session].seq_num = seq;
+		ack_session_info[tcp_session].bigger_ack_num = 0;
+		ack_session_info[tcp_session].src_port = src_prt;
+		ack_session_info[tcp_session].dst_port = dst_prt;
+		tcp_session++;
+	}
 	return 0;
 }
 
 static inline int update_tcp_session(u32 index, u32 ack)
 {
-	if (ack > ack_session_info[index].bigger_ack_num)
+	if (index < 2 * MAX_TCP_SESSION &&
+	    ack > ack_session_info[index].bigger_ack_num)
 		ack_session_info[index].bigger_ack_num = ack;
 	return 0;
 }
@@ -212,7 +178,7 @@
 static inline int add_tcp_pending_ack(u32 ack, u32 session_index,
 				      struct txq_entry_t *txqe)
 {
-	if (pending_acks < MAX_PENDING_ACKS) {
+	if (pending_base + pending_acks < MAX_PENDING_ACKS) {
 		pending_acks_info[pending_base + pending_acks].ack_num = ack;
 		pending_acks_info[pending_base + pending_acks].txqe = txqe;
 		pending_acks_info[pending_base + pending_acks].session_index = session_index;
@@ -221,19 +187,9 @@
 	}
 	return 0;
 }
-static inline int remove_TCP_related(struct wilc *wilc)
+
+static inline void tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&wilc->txq_spinlock, flags);
-
-	spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
-	return 0;
-}
-
-static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
-{
-	int ret;
 	u8 *eth_hdr_ptr;
 	u8 *buffer = tqe->buffer;
 	unsigned short h_proto;
@@ -245,10 +201,11 @@
 	vif = netdev_priv(dev);
 	wilc = vif->wilc;
 
+	spin_lock_irqsave(&wilc->txq_spinlock, flags);
 
 	eth_hdr_ptr = &buffer[0];
 	h_proto = ntohs(*((unsigned short *)&eth_hdr_ptr[12]));
-	if (h_proto == 0x0800) {
+	if (h_proto == ETH_P_IP) {
 		u8 *ip_hdr_ptr;
 		u8 protocol;
 
@@ -278,7 +235,8 @@
 					 (u32)tcp_hdr_ptr[11];
 
 				for (i = 0; i < tcp_session; i++) {
-					if (ack_session_info[i].seq_num == seq_no) {
+					if (i < 2 * MAX_TCP_SESSION &&
+					    ack_session_info[i].seq_num == seq_no) {
 						update_tcp_session(i, ack_no);
 						break;
 					}
@@ -288,15 +246,9 @@
 
 				add_tcp_pending_ack(ack_no, i, tqe);
 			}
-
-		} else {
-			ret = 0;
 		}
-	} else {
-		ret = 0;
 	}
 	spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
-	return ret;
 }
 
 static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
@@ -311,14 +263,15 @@
 
 	spin_lock_irqsave(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
 	for (i = pending_base; i < (pending_base + pending_acks); i++) {
+		if (i >= MAX_PENDING_ACKS ||
+		    pending_acks_info[i].session_index >= 2 * MAX_TCP_SESSION)
+			break;
 		if (pending_acks_info[i].ack_num < ack_session_info[pending_acks_info[i].session_index].bigger_ack_num) {
 			struct txq_entry_t *tqe;
 
-			PRINT_D(TCP_ENH, "DROP ACK: %u\n",
-				pending_acks_info[i].ack_num);
 			tqe = pending_acks_info[i].txqe;
 			if (tqe) {
-				wilc_wlan_txq_remove(tqe);
+				wilc_wlan_txq_remove(wilc, tqe);
 				tqe->status = 1;
 				if (tqe->tx_complete_func)
 					tqe->tx_complete_func(tqe->priv,
@@ -345,36 +298,35 @@
 
 	return 1;
 }
-#endif
 
-static bool enabled = false;
+static bool enabled;
 
 void wilc_enable_tcp_ack_filter(bool value)
 {
 	enabled = value;
 }
 
-#ifdef TCP_ACK_FILTER
 static bool is_tcp_ack_filter_enabled(void)
 {
 	return enabled;
 }
-#endif
 
-static int wilc_wlan_txq_add_cfg_pkt(struct wilc *wilc, u8 *buffer, u32 buffer_size)
+static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
+				     u32 buffer_size)
 {
 	struct txq_entry_t *tqe;
+	struct wilc *wilc = vif->wilc;
 
-	PRINT_D(TX_DBG, "Adding config packet ...\n");
+	netdev_dbg(vif->ndev, "Adding config packet ...\n");
 	if (wilc->quit) {
-		PRINT_D(TX_DBG, "Return due to clear function\n");
+		netdev_dbg(vif->ndev, "Return due to clear function\n");
 		up(&wilc->cfg_event);
 		return 0;
 	}
 
 	tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
 	if (!tqe) {
-		PRINT_ER("Failed to allocate memory\n");
+		netdev_err(vif->ndev, "Failed to allocate memory\n");
 		return 0;
 	}
 
@@ -383,12 +335,9 @@
 	tqe->buffer_size = buffer_size;
 	tqe->tx_complete_func = NULL;
 	tqe->priv = NULL;
-#ifdef TCP_ACK_FILTER
 	tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
-#endif
-	PRINT_D(TX_DBG, "Adding the config packet at the Queue tail\n");
 
-	if (wilc_wlan_txq_add_to_head(wilc, tqe))
+	if (wilc_wlan_txq_add_to_head(vif, tqe))
 		return 0;
 	return 1;
 }
@@ -415,12 +364,9 @@
 	tqe->tx_complete_func = func;
 	tqe->priv = priv;
 
-	PRINT_D(TX_DBG, "Adding mgmt packet at the Queue tail\n");
-#ifdef TCP_ACK_FILTER
 	tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
 	if (is_tcp_ack_filter_enabled())
 		tcp_process(dev, tqe);
-#endif
 	wilc_wlan_txq_add_to_tail(dev, tqe);
 	return wilc->txq_entries;
 }
@@ -446,10 +392,7 @@
 	tqe->buffer_size = buffer_size;
 	tqe->tx_complete_func = func;
 	tqe->priv = priv;
-#ifdef TCP_ACK_FILTER
 	tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
-#endif
-	PRINT_D(TX_DBG, "Adding Network packet at the Queue tail\n");
 	wilc_wlan_txq_add_to_tail(dev, tqe);
 	return 1;
 }
@@ -483,32 +426,26 @@
 
 static int wilc_wlan_rxq_add(struct wilc *wilc, struct rxq_entry_t *rqe)
 {
-
 	if (wilc->quit)
 		return 0;
 
 	mutex_lock(&wilc->rxq_cs);
 	if (!wilc->rxq_head) {
-		PRINT_D(RX_DBG, "Add to Queue head\n");
 		rqe->next = NULL;
 		wilc->rxq_head = rqe;
 		wilc->rxq_tail = rqe;
 	} else {
-		PRINT_D(RX_DBG, "Add to Queue tail\n");
 		wilc->rxq_tail->next = rqe;
 		rqe->next = NULL;
 		wilc->rxq_tail = rqe;
 	}
 	wilc->rxq_entries += 1;
-	PRINT_D(RX_DBG, "Number of queue entries: %d\n", wilc->rxq_entries);
 	mutex_unlock(&wilc->rxq_cs);
 	return wilc->rxq_entries;
 }
 
 static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc)
 {
-
-	PRINT_D(RX_DBG, "Getting rxQ element\n");
 	if (wilc->rxq_head) {
 		struct rxq_entry_t *rqe;
 
@@ -516,29 +453,26 @@
 		rqe = wilc->rxq_head;
 		wilc->rxq_head = wilc->rxq_head->next;
 		wilc->rxq_entries -= 1;
-		PRINT_D(RX_DBG, "RXQ entries decreased\n");
 		mutex_unlock(&wilc->rxq_cs);
 		return rqe;
 	}
-	PRINT_D(RX_DBG, "Nothing to get from Q\n");
 	return NULL;
 }
 
-#ifdef WILC_OPTIMIZE_SLEEP_INT
-
-static inline void chip_allow_sleep(struct wilc *wilc)
+void chip_allow_sleep(struct wilc *wilc)
 {
 	u32 reg = 0;
 
 	wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
 
 	wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
+	wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
 }
+EXPORT_SYMBOL_GPL(chip_allow_sleep);
 
-static inline void chip_wakeup(struct wilc *wilc)
+void chip_wakeup(struct wilc *wilc)
 {
-	u32 reg, clk_status_reg, trials = 0;
-	u32 sleep_time;
+	u32 reg, clk_status_reg;
 
 	if ((wilc->io_type & 0x1) == HIF_SPI) {
 		do {
@@ -548,13 +482,12 @@
 
 			do {
 				usleep_range(2 * 1000, 2 * 1000);
-				if ((wilc_get_chipid(wilc, true) == 0))
-					wilc_debug(N_ERR, "Couldn't read chip id. Wake up failed\n");
-
-			} while ((wilc_get_chipid(wilc, true) == 0) && ((++trials % 3) == 0));
-
+				wilc_get_chipid(wilc, true);
+			} while (wilc_get_chipid(wilc, true) == 0);
 		} while (wilc_get_chipid(wilc, true) == 0);
 	} else if ((wilc->io_type & 0x1) == HIF_SDIO)	 {
+		wilc->hif_func->hif_write_reg(wilc, 0xfa, 1);
+		udelay(200);
 		wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
 		do {
 			wilc->hif_func->hif_write_reg(wilc, 0xf0,
@@ -562,14 +495,11 @@
 			wilc->hif_func->hif_read_reg(wilc, 0xf1,
 						     &clk_status_reg);
 
-			while (((clk_status_reg & 0x1) == 0) && (((++trials) % 3) == 0)) {
+			while ((clk_status_reg & 0x1) == 0) {
 				usleep_range(2 * 1000, 2 * 1000);
 
 				wilc->hif_func->hif_read_reg(wilc, 0xf1,
 							     &clk_status_reg);
-
-				if ((clk_status_reg & 0x1) == 0)
-					wilc_debug(N_ERR, "clocks still OFF. Wake up failed\n");
 			}
 			if ((clk_status_reg & 0x1) == 0) {
 				wilc->hif_func->hif_write_reg(wilc, 0xf0,
@@ -579,11 +509,7 @@
 	}
 
 	if (chip_ps_state == CHIP_SLEEPING_MANUAL) {
-		wilc->hif_func->hif_read_reg(wilc, 0x1C0C, &reg);
-		reg &= ~BIT(0);
-		wilc->hif_func->hif_write_reg(wilc, 0x1C0C, reg);
-
-		if (wilc_get_chipid(wilc, false) >= 0x1002b0) {
+		if (wilc_get_chipid(wilc, false) < 0x1002b0) {
 			u32 val32;
 
 			wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32);
@@ -597,71 +523,37 @@
 	}
 	chip_ps_state = CHIP_WAKEDUP;
 }
-#else
-static inline void chip_wakeup(struct wilc *wilc)
-{
-	u32 reg, trials = 0;
+EXPORT_SYMBOL_GPL(chip_wakeup);
 
-	do {
-		if ((wilc->io_type & 0x1) == HIF_SPI) {
-			wilc->hif_func->hif_read_reg(wilc, 1, &reg);
-			wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1));
-			wilc->hif_func->hif_write_reg(wilc, 1, reg | BIT(1));
-			wilc->hif_func->hif_write_reg(wilc, 1, reg  & ~BIT(1));
-		} else if ((wilc->io_type & 0x1) == HIF_SDIO)	 {
-			wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
-			wilc->hif_func->hif_write_reg(wilc, 0xf0,
-						      reg & ~BIT(0));
-			wilc->hif_func->hif_write_reg(wilc, 0xf0,
-						      reg | BIT(0));
-			wilc->hif_func->hif_write_reg(wilc, 0xf0,
-						      reg  & ~BIT(0));
-		}
-
-		do {
-			mdelay(3);
-
-			if ((wilc_get_chipid(wilc, true) == 0))
-				wilc_debug(N_ERR, "Couldn't read chip id. Wake up failed\n");
-
-		} while ((wilc_get_chipid(wilc, true) == 0) && ((++trials % 3) == 0));
-
-	} while (wilc_get_chipid(wilc, true) == 0);
-
-	if (chip_ps_state == CHIP_SLEEPING_MANUAL) {
-		wilc->hif_func->hif_read_reg(wilc, 0x1C0C, &reg);
-		reg &= ~BIT(0);
-		wilc->hif_func->hif_write_reg(wilc, 0x1C0C, reg);
-
-		if (wilc_get_chipid(wilc, false) >= 0x1002b0) {
-			u32 val32;
-
-			wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32);
-			val32 |= BIT(6);
-			wilc->hif_func->hif_write_reg(wilc, 0x1e1c, val32);
-
-			wilc->hif_func->hif_read_reg(wilc, 0x1e9c, &val32);
-			val32 |= BIT(6);
-			wilc->hif_func->hif_write_reg(wilc, 0x1e9c, val32);
-		}
-	}
-	chip_ps_state = CHIP_WAKEDUP;
-}
-#endif
 void wilc_chip_sleep_manually(struct wilc *wilc)
 {
 	if (chip_ps_state != CHIP_WAKEDUP)
 		return;
 	acquire_bus(wilc, ACQUIRE_ONLY);
 
-#ifdef WILC_OPTIMIZE_SLEEP_INT
 	chip_allow_sleep(wilc);
-#endif
 	wilc->hif_func->hif_write_reg(wilc, 0x10a8, 1);
 
 	chip_ps_state = CHIP_SLEEPING_MANUAL;
 	release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
+
+void host_wakeup_notify(struct wilc *wilc)
+{
+	acquire_bus(wilc, ACQUIRE_ONLY);
+	wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
+	release_bus(wilc, RELEASE_ONLY);
+}
+EXPORT_SYMBOL_GPL(host_wakeup_notify);
+
+void host_sleep_notify(struct wilc *wilc)
+{
+	acquire_bus(wilc, ACQUIRE_ONLY);
+	wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
+	release_bus(wilc, RELEASE_ONLY);
+}
+EXPORT_SYMBOL_GPL(host_sleep_notify);
 
 int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
 {
@@ -690,10 +582,7 @@
 
 		wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
 					CFG_PKTS_TIMEOUT);
-#ifdef	TCP_ACK_FILTER
 		wilc_wlan_txq_filter_dup_tcp_ack(dev);
-#endif
-		PRINT_D(TX_DBG, "Getting the head of the TxQ\n");
 		tqe = wilc_wlan_txq_get_first(wilc);
 		i = 0;
 		sum = 0;
@@ -709,65 +598,49 @@
 					vmm_sz = HOST_HDR_OFFSET;
 
 				vmm_sz += tqe->buffer_size;
-				PRINT_D(TX_DBG, "VMM Size before alignment = %d\n", vmm_sz);
+
 				if (vmm_sz & 0x3)
 					vmm_sz = (vmm_sz + 4) & ~0x3;
 
 				if ((sum + vmm_sz) > LINUX_TX_SIZE)
 					break;
 
-				PRINT_D(TX_DBG, "VMM Size AFTER alignment = %d\n", vmm_sz);
 				vmm_table[i] = vmm_sz / 4;
-				PRINT_D(TX_DBG, "VMMTable entry size = %d\n",
-					vmm_table[i]);
-
-				if (tqe->type == WILC_CFG_PKT) {
+				if (tqe->type == WILC_CFG_PKT)
 					vmm_table[i] |= BIT(10);
-					PRINT_D(TX_DBG, "VMMTable entry changed for CFG packet = %d\n", vmm_table[i]);
-				}
 				vmm_table[i] = cpu_to_le32(vmm_table[i]);
 
 				i++;
 				sum += vmm_sz;
-				PRINT_D(TX_DBG, "sum = %d\n", sum);
 				tqe = wilc_wlan_txq_get_next(wilc, tqe);
 			} else {
 				break;
 			}
 		} while (1);
 
-		if (i == 0) {
-			PRINT_D(TX_DBG, "Nothing in TX-Q\n");
+		if (i == 0)
 			break;
-		} else {
-			PRINT_D(TX_DBG, "Mark the last entry in VMM table - number of previous entries = %d\n", i);
+		else
 			vmm_table[i] = 0x0;
-		}
+
 		acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
 		counter = 0;
 		do {
-			ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_TX_CTRL,
-						       &reg);
-			if (!ret) {
-				wilc_debug(N_ERR, "[wilc txq]: fail can't read reg vmm_tbl_entry..\n");
+			ret = wilc->hif_func->hif_read_reg(wilc,
+							   WILC_HOST_TX_CTRL,
+							   &reg);
+			if (!ret)
 				break;
-			}
 
 			if ((reg & 0x1) == 0) {
-				PRINT_D(TX_DBG, "Writing VMM table ... with Size = %d\n", ((i + 1) * 4));
 				break;
 			} else {
 				counter++;
 				if (counter > 200) {
 					counter = 0;
-					PRINT_D(TX_DBG, "Looping in tx ctrl , forcce quit\n");
 					ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
 					break;
 				}
-				PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait...\n");
-				release_bus(wilc, RELEASE_ALLOW_SLEEP);
-				usleep_range(3000, 3000);
-				acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
 			}
 		} while (!wilc->quit);
 
@@ -777,32 +650,24 @@
 		timeout = 200;
 		do {
 			ret = wilc->hif_func->hif_block_tx(wilc, WILC_VMM_TBL_RX_SHADOW_BASE, (u8 *)vmm_table, ((i + 1) * 4));
-			if (!ret) {
-				wilc_debug(N_ERR, "ERR block TX of VMM table.\n");
+			if (!ret)
 				break;
-			}
 
-			ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL,
-							0x2);
-			if (!ret) {
-				wilc_debug(N_ERR, "[wilc txq]: fail can't write reg host_vmm_ctl..\n");
+			ret = wilc->hif_func->hif_write_reg(wilc,
+							    WILC_HOST_VMM_CTL,
+							    0x2);
+			if (!ret)
 				break;
-			}
 
 			do {
 				ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg);
-				if (!ret) {
-					wilc_debug(N_ERR, "[wilc txq]: fail can't read reg host_vmm_ctl..\n");
+				if (!ret)
 					break;
-				}
 				if ((reg >> 2) & 0x1) {
 					entries = ((reg >> 3) & 0x3f);
 					break;
 				} else {
 					release_bus(wilc, RELEASE_ALLOW_SLEEP);
-					usleep_range(3000, 3000);
-					acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
-					PRINT_WRN(GENERIC_DBG, "Can't get VMM entery - reg = %2x\n", reg);
 				}
 			} while (--timeout);
 			if (timeout <= 0) {
@@ -814,19 +679,13 @@
 				break;
 
 			if (entries == 0) {
-				PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]]\n", reg, i, vmm_table[i - 1]);
-
 				ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
-				if (!ret) {
-					wilc_debug(N_ERR, "[wilc txq]: fail can't read reg WILC_HOST_TX_CTRL..\n");
+				if (!ret)
 					break;
-				}
 				reg &= ~BIT(0);
 				ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg);
-				if (!ret) {
-					wilc_debug(N_ERR, "[wilc txq]: fail can't write reg WILC_HOST_TX_CTRL..\n");
+				if (!ret)
 					break;
-				}
 				break;
 			} else {
 				break;
@@ -866,7 +725,7 @@
 				if (tqe->type == WILC_CFG_PKT) {
 					buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET;
 				} else if (tqe->type == WILC_NET_PKT) {
-					char *bssid = ((struct tx_complete_data *)(tqe->priv))->pBssid;
+					char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid;
 
 					buffer_offset = ETH_ETHERNET_HDR_OFFSET;
 					memcpy(&txb[offset + 4], bssid, 6);
@@ -882,10 +741,9 @@
 				if (tqe->tx_complete_func)
 					tqe->tx_complete_func(tqe->priv,
 							      tqe->status);
-				#ifdef TCP_ACK_FILTER
-				if (tqe->tcp_pending_ack_idx != NOT_TCP_ACK)
+				if (tqe->tcp_pending_ack_idx != NOT_TCP_ACK &&
+				    tqe->tcp_pending_ack_idx < MAX_PENDING_ACKS)
 					pending_acks_info[tqe->tcp_pending_ack_idx].txqe = NULL;
-				#endif
 				kfree(tqe);
 			} else {
 				break;
@@ -895,16 +753,12 @@
 		acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
 
 		ret = wilc->hif_func->hif_clear_int_ext(wilc, ENABLE_TX_VMM);
-		if (!ret) {
-			wilc_debug(N_ERR, "[wilc txq]: fail can't start tx VMM ...\n");
+		if (!ret)
 			goto _end_;
-		}
 
 		ret = wilc->hif_func->hif_block_tx_ext(wilc, 0, txb, offset);
-		if (!ret) {
-			wilc_debug(N_ERR, "[wilc txq]: fail can't block tx ext...\n");
+		if (!ret)
 			goto _end_;
-		}
 
 _end_:
 
@@ -915,14 +769,13 @@
 	up(&wilc->txq_add_to_head_cs);
 
 	wilc->txq_exit = 1;
-	PRINT_D(TX_DBG, "THREAD: Exiting txq\n");
 	*txq_count = wilc->txq_entries;
 	return ret;
 }
 
 static void wilc_wlan_handle_rxq(struct wilc *wilc)
 {
-	int offset = 0, size, has_packet = 0;
+	int offset = 0, size;
 	u8 *buffer;
 	struct rxq_entry_t *rqe;
 
@@ -930,19 +783,15 @@
 
 	do {
 		if (wilc->quit) {
-			PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function\n");
 			up(&wilc->cfg_event);
 			break;
 		}
 		rqe = wilc_wlan_rxq_remove(wilc);
-		if (!rqe) {
-			PRINT_D(RX_DBG, "nothing in the queue - exit 1st do-while\n");
+		if (!rqe)
 			break;
-		}
+
 		buffer = rqe->buffer;
 		size = rqe->buffer_size;
-		PRINT_D(RX_DBG, "rxQ entery Size = %d - Address = %p\n",
-			size, buffer);
 		offset = 0;
 
 		do {
@@ -950,21 +799,16 @@
 			u32 pkt_len, pkt_offset, tp_len;
 			int is_cfg_packet;
 
-			PRINT_D(RX_DBG, "In the 2nd do-while\n");
 			memcpy(&header, &buffer[offset], 4);
 			header = cpu_to_le32(header);
-			PRINT_D(RX_DBG, "Header = %04x - Offset = %d\n",
-				header, offset);
 
 			is_cfg_packet = (header >> 31) & 0x1;
 			pkt_offset = (header >> 22) & 0x1ff;
 			tp_len = (header >> 11) & 0x7ff;
 			pkt_len = header & 0x7ff;
 
-			if (pkt_len == 0 || tp_len == 0) {
-				wilc_debug(N_RXQ, "[wilc rxq]: data corrupt, packet len or tp_len is 0 [%d][%d]\n", pkt_len, tp_len);
+			if (pkt_len == 0 || tp_len == 0)
 				break;
-			}
 
 			#define IS_MANAGMEMENT				0x100
 			#define IS_MANAGMEMENT_CALLBACK			0x080
@@ -983,14 +827,12 @@
 							      &buffer[offset],
 							      pkt_len,
 							      pkt_offset);
-						has_packet = 1;
 					}
 				} else {
 					struct wilc_cfg_rsp rsp;
 
 					wilc_wlan_cfg_indicate_rx(wilc, &buffer[pkt_offset + offset], pkt_len, &rsp);
 					if (rsp.type == WILC_CFG_RSP) {
-						PRINT_D(RX_DBG, "wilc->cfg_seq_no = %d - rsp.seq_no = %d\n", wilc->cfg_seq_no, rsp.seq_no);
 						if (wilc->cfg_seq_no == rsp.seq_no)
 							up(&wilc->cfg_event);
 					} else if (rsp.type == WILC_CFG_RSP_STATUS) {
@@ -1006,14 +848,9 @@
 				break;
 		} while (1);
 		kfree(rqe);
-
-		if (has_packet)
-			wilc_rx_complete(wilc);
-
 	} while (1);
 
 	wilc->rxq_exit = 1;
-	PRINT_D(RX_DBG, "THREAD: Exiting RX thread\n");
 }
 
 static void wilc_unknown_isr_ext(struct wilc *wilc)
@@ -1032,18 +869,13 @@
 	else
 		mdelay(WILC_PLL_TO_SPI);
 
-	while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials)) {
-		PRINT_D(TX_DBG, "PLL update retrying\n");
+	while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials))
 		mdelay(1);
-	}
 }
 
 static void wilc_sleeptimer_isr_ext(struct wilc *wilc, u32 int_stats1)
 {
 	wilc->hif_func->hif_clear_int_ext(wilc, SLEEP_INT_CLR);
-#ifndef WILC_OPTIMIZE_SLEEP_INT
-	chip_ps_state = CHIP_SLEEPING_AUTO;
-#endif
 }
 
 static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
@@ -1055,14 +887,11 @@
 	int ret = 0;
 	struct rxq_entry_t *rqe;
 
-	size = ((int_status & 0x7fff) << 2);
+	size = (int_status & 0x7fff) << 2;
 
 	while (!size && retries < 10) {
-		u32 time = 0;
-
-		wilc_debug(N_ERR, "RX Size equal zero ... Trying to read it again for %d time\n", time++);
 		wilc->hif_func->hif_read_size(wilc, &size);
-		size = ((size & 0x7fff) << 2);
+		size = (size & 0x7fff) << 2;
 		retries++;
 	}
 
@@ -1070,21 +899,17 @@
 		if (LINUX_RX_SIZE - offset < size)
 			offset = 0;
 
-		if (wilc->rx_buffer) {
+		if (wilc->rx_buffer)
 			buffer = &wilc->rx_buffer[offset];
-		} else {
-			wilc_debug(N_ERR, "[wilc isr]: fail Rx Buffer is NULL...drop the packets (%d)\n", size);
+		else
 			goto _end_;
-		}
 
 		wilc->hif_func->hif_clear_int_ext(wilc,
 					      DATA_INT_CLR | ENABLE_RX_VMM);
 		ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
 
-		if (!ret) {
-			wilc_debug(N_ERR, "[wilc isr]: fail block rx...\n");
+		if (!ret)
 			goto _end_;
-		}
 _end_:
 		if (ret) {
 			offset += size;
@@ -1093,7 +918,6 @@
 			if (rqe) {
 				rqe->buffer = buffer;
 				rqe->buffer_size = size;
-				PRINT_D(RX_DBG, "rxq entery Size= %d - Address = %p\n", rqe->buffer_size, rqe->buffer);
 				wilc_wlan_rxq_add(wilc, rqe);
 			}
 		}
@@ -1111,23 +935,21 @@
 	if (int_status & PLL_INT_EXT)
 		wilc_pllupdate_isr_ext(wilc, int_status);
 
-	if (int_status & DATA_INT_EXT) {
+	if (int_status & DATA_INT_EXT)
 		wilc_wlan_handle_isr_ext(wilc, int_status);
-	#ifndef WILC_OPTIMIZE_SLEEP_INT
-		chip_ps_state = CHIP_WAKEDUP;
-	#endif
-	}
+
 	if (int_status & SLEEP_INT_EXT)
 		wilc_sleeptimer_isr_ext(wilc, int_status);
 
-	if (!(int_status & (ALL_INT_EXT))) {
+	if (!(int_status & (ALL_INT_EXT)))
 		wilc_unknown_isr_ext(wilc);
-	}
+
 	release_bus(wilc, RELEASE_ALLOW_SLEEP);
 }
 EXPORT_SYMBOL_GPL(wilc_handle_isr);
 
-int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size)
+int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
+				u32 buffer_size)
 {
 	u32 offset;
 	u32 addr, size, size2, blksz;
@@ -1139,12 +961,9 @@
 	dma_buffer = kmalloc(blksz, GFP_KERNEL);
 	if (!dma_buffer) {
 		ret = -EIO;
-		PRINT_ER("Can't allocate buffer for firmware download IO error\n ");
 		goto _fail_1;
 	}
 
-	PRINT_D(INIT_DBG, "Downloading firmware size = %d ...\n", buffer_size);
-
 	offset = 0;
 	do {
 		memcpy(&addr, &buffer[offset], 4);
@@ -1160,8 +979,8 @@
 				size2 = blksz;
 
 			memcpy(dma_buffer, &buffer[offset], size2);
-			ret = wilc->hif_func->hif_block_tx(wilc, addr, dma_buffer,
-						       size2);
+			ret = wilc->hif_func->hif_block_tx(wilc, addr,
+							   dma_buffer, size2);
 			if (!ret)
 				break;
 
@@ -1173,10 +992,8 @@
 
 		if (!ret) {
 			ret = -EIO;
-			PRINT_ER("Can't download firmware IO error\n ");
 			goto _fail_;
 		}
-		PRINT_D(INIT_DBG, "Offset = %d\n", offset);
 	} while (offset < buffer_size);
 
 _fail_:
@@ -1203,7 +1020,6 @@
 	acquire_bus(wilc, ACQUIRE_ONLY);
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg);
 	if (!ret) {
-		wilc_debug(N_ERR, "[wilc start]: fail write reg vmm_core_cfg...\n");
 		release_bus(wilc, RELEASE_ONLY);
 		ret = -EIO;
 		return ret;
@@ -1226,7 +1042,7 @@
 #ifdef WILC_EXT_PA_INV_TX_RX
 	reg |= WILC_HAVE_EXT_PA_INV_TX_RX;
 #endif
-
+	reg |= WILC_HAVE_USE_IRQ_AS_HOST_WAKE;
 	reg |= WILC_HAVE_LEGACY_RF_SETTINGS;
 #ifdef XTAL_24
 	reg |= WILC_HAVE_XTAL_24;
@@ -1237,7 +1053,6 @@
 
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg);
 	if (!ret) {
-		wilc_debug(N_ERR, "[wilc start]: fail write WILC_GP_REG_1 ...\n");
 		release_bus(wilc, RELEASE_ONLY);
 		ret = -EIO;
 		return ret;
@@ -1247,7 +1062,6 @@
 
 	ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
 	if (!ret) {
-		wilc_debug(N_ERR, "[wilc start]: fail read reg 0x1000 ...\n");
 		release_bus(wilc, RELEASE_ONLY);
 		ret = -EIO;
 		return ret;
@@ -1268,22 +1082,16 @@
 	return (ret < 0) ? ret : 0;
 }
 
-void wilc_wlan_global_reset(struct wilc *wilc)
-{
-	acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
-	wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, 0x0);
-	release_bus(wilc, RELEASE_ONLY);
-}
 int wilc_wlan_stop(struct wilc *wilc)
 {
 	u32 reg = 0;
 	int ret;
 	u8 timeout = 10;
+
 	acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
 
 	ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
 	if (!ret) {
-		PRINT_ER("Error while reading reg\n");
 		release_bus(wilc, RELEASE_ALLOW_SLEEP);
 		return ret;
 	}
@@ -1291,40 +1099,32 @@
 	reg &= ~BIT(10);
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
 	if (!ret) {
-		PRINT_ER("Error while writing reg\n");
 		release_bus(wilc, RELEASE_ALLOW_SLEEP);
 		return ret;
 	}
 
 	do {
-		ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+		ret = wilc->hif_func->hif_read_reg(wilc,
+						   WILC_GLB_RESET_0, &reg);
 		if (!ret) {
-			PRINT_ER("Error while reading reg\n");
 			release_bus(wilc, RELEASE_ALLOW_SLEEP);
 			return ret;
 		}
-		PRINT_D(GENERIC_DBG, "Read RESET Reg %x : Retry%d\n",
-			reg, timeout);
 
 		if ((reg & BIT(10))) {
-			PRINT_D(GENERIC_DBG, "Bit 10 not reset : Retry %d\n",
-				timeout);
 			reg &= ~BIT(10);
-			ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0,
-							reg);
+			ret = wilc->hif_func->hif_write_reg(wilc,
+							    WILC_GLB_RESET_0,
+							    reg);
 			timeout--;
 		} else {
-			PRINT_D(GENERIC_DBG, "Bit 10 reset after : Retry %d\n",
-				timeout);
-			ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0,
-						       &reg);
+			ret = wilc->hif_func->hif_read_reg(wilc,
+							   WILC_GLB_RESET_0,
+							   &reg);
 			if (!ret) {
-				PRINT_ER("Error while reading reg\n");
 				release_bus(wilc, RELEASE_ALLOW_SLEEP);
 				return ret;
 			}
-			PRINT_D(GENERIC_DBG, "Read RESET Reg %x : Retry%d\n",
-				reg, timeout);
 			break;
 		}
 
@@ -1379,23 +1179,22 @@
 	acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
 
 	ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
-	if (!ret) {
-		PRINT_ER("Error while reading reg\n");
+	if (!ret)
 		release_bus(wilc, RELEASE_ALLOW_SLEEP);
-	}
-	PRINT_ER("Writing ABORT reg\n");
+
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
 					(reg | ABORT_INT));
-	if (!ret) {
-		PRINT_ER("Error while writing reg\n");
+	if (!ret)
 		release_bus(wilc, RELEASE_ALLOW_SLEEP);
-	}
+
 	release_bus(wilc, RELEASE_ALLOW_SLEEP);
 	wilc->hif_func->hif_deinit(NULL);
 }
 
-static int wilc_wlan_cfg_commit(struct wilc *wilc, int type, u32 drv_handler)
+static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
+				u32 drv_handler)
 {
+	struct wilc *wilc = vif->wilc;
 	struct wilc_cfg_frame *cfg = &wilc->cfg_frame;
 	int total_len = wilc->cfg_frame_offset + 4 + DRIVER_HANDLER_SIZE;
 	int seq_no = wilc->cfg_seq_no % 256;
@@ -1414,17 +1213,18 @@
 	cfg->wid_header[7] = (u8)(driver_handler >> 24);
 	wilc->cfg_seq_no = seq_no;
 
-	if (!wilc_wlan_txq_add_cfg_pkt(wilc, &cfg->wid_header[0], total_len))
+	if (!wilc_wlan_txq_add_cfg_pkt(vif, &cfg->wid_header[0], total_len))
 		return -1;
 
 	return 0;
 }
 
-int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
 		      u32 buffer_size, int commit, u32 drv_handler)
 {
 	u32 offset;
 	int ret_size;
+	struct wilc *wilc = vif->wilc;
 
 	if (wilc->cfg_frame_in_use)
 		return 0;
@@ -1439,17 +1239,18 @@
 	wilc->cfg_frame_offset = offset;
 
 	if (commit) {
-		PRINT_D(TX_DBG, "[WILC]PACKET Commit with sequence number %d\n",
-			wilc->cfg_seq_no);
-		PRINT_D(RX_DBG, "Processing cfg_set()\n");
+		netdev_dbg(vif->ndev,
+			   "[WILC]PACKET Commit with sequence number %d\n",
+			   wilc->cfg_seq_no);
+		netdev_dbg(vif->ndev, "Processing cfg_set()\n");
 		wilc->cfg_frame_in_use = 1;
 
-		if (wilc_wlan_cfg_commit(wilc, WILC_CFG_SET, drv_handler))
+		if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler))
 			ret_size = 0;
 
 		if (wilc_lock_timeout(wilc, &wilc->cfg_event,
 					    CFG_PKTS_TIMEOUT)) {
-			PRINT_D(TX_DBG, "Set Timed Out\n");
+			netdev_dbg(vif->ndev, "Set Timed Out\n");
 			ret_size = 0;
 		}
 		wilc->cfg_frame_in_use = 0;
@@ -1460,11 +1261,12 @@
 	return ret_size;
 }
 
-int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
 		      u32 drv_handler)
 {
 	u32 offset;
 	int ret_size;
+	struct wilc *wilc = vif->wilc;
 
 	if (wilc->cfg_frame_in_use)
 		return 0;
@@ -1481,15 +1283,14 @@
 	if (commit) {
 		wilc->cfg_frame_in_use = 1;
 
-		if (wilc_wlan_cfg_commit(wilc, WILC_CFG_QUERY, drv_handler))
+		if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler))
 			ret_size = 0;
 
 		if (wilc_lock_timeout(wilc, &wilc->cfg_event,
 					    CFG_PKTS_TIMEOUT)) {
-			PRINT_D(TX_DBG, "Get Timed Out\n");
+			netdev_dbg(vif->ndev, "Get Timed Out\n");
 			ret_size = 0;
 		}
-		PRINT_D(GENERIC_DBG, "[WILC]Get Response received\n");
 		wilc->cfg_frame_in_use = 0;
 		wilc->cfg_frame_offset = 0;
 		wilc->cfg_seq_no += 1;
@@ -1500,9 +1301,43 @@
 
 int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size)
 {
-	int ret;
+	return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+}
 
-	ret = wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
+			 u32 count, u32 drv)
+{
+	int i;
+	int ret = 0;
+
+	if (mode == GET_CFG) {
+		for (i = 0; i < count; i++) {
+			if (!wilc_wlan_cfg_get(vif, !i,
+					       wids[i].id,
+					       (i == count - 1),
+					       drv)) {
+				ret = -ETIMEDOUT;
+				break;
+			}
+		}
+		for (i = 0; i < count; i++) {
+			wids[i].size = wilc_wlan_cfg_get_val(wids[i].id,
+							     wids[i].val,
+							     wids[i].size);
+		}
+	} else if (mode == SET_CFG) {
+		for (i = 0; i < count; i++) {
+			if (!wilc_wlan_cfg_set(vif, !i,
+					       wids[i].id,
+					       wids[i].val,
+					       wids[i].size,
+					       (i == count - 1),
+					       drv)) {
+				ret = -ETIMEDOUT;
+				break;
+			}
+		}
+	}
 
 	return ret;
 }
@@ -1524,18 +1359,18 @@
 	if ((chipid & 0xfff) != 0xa0) {
 		ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
 		if (!ret) {
-			wilc_debug(N_ERR, "[wilc start]: fail read reg 0x1118 ...\n");
+			netdev_err(dev, "fail read reg 0x1118\n");
 			return ret;
 		}
 		reg |= BIT(0);
 		ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
 		if (!ret) {
-			wilc_debug(N_ERR, "[wilc start]: fail write reg 0x1118 ...\n");
+			netdev_err(dev, "fail write reg 0x1118\n");
 			return ret;
 		}
 		ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
 		if (!ret) {
-			wilc_debug(N_ERR, "[wilc start]: fail write reg 0xc0000 ...\n");
+			netdev_err(dev, "fail write reg 0xc0000\n");
 			return ret;
 		}
 	}
@@ -1545,36 +1380,31 @@
 	return ret;
 }
 
-u32 wilc_get_chipid(struct wilc *wilc, u8 update)
+u32 wilc_get_chipid(struct wilc *wilc, bool update)
 {
 	static u32 chipid;
 	u32 tempchipid = 0;
-	u32 rfrevid;
+	u32 rfrevid = 0;
 
-	if (chipid == 0 || update != 0) {
+	if (chipid == 0 || update) {
 		wilc->hif_func->hif_read_reg(wilc, 0x1000, &tempchipid);
 		wilc->hif_func->hif_read_reg(wilc, 0x13f4, &rfrevid);
 		if (!ISWILC1000(tempchipid)) {
 			chipid = 0;
-			goto _fail_;
+			return chipid;
 		}
 		if (tempchipid == 0x1002a0) {
-			if (rfrevid == 0x1) {
-			} else {
+			if (rfrevid != 0x1)
 				tempchipid = 0x1002a1;
-			}
 		} else if (tempchipid == 0x1002b0) {
-			if (rfrevid == 3) {
-			} else if (rfrevid == 4) {
+			if (rfrevid == 0x4)
 				tempchipid = 0x1002b1;
-			} else {
+			else if (rfrevid != 0x3)
 				tempchipid = 0x1002b2;
-			}
 		}
 
 		chipid = tempchipid;
 	}
-_fail_:
 	return chipid;
 }
 
@@ -1586,34 +1416,31 @@
 
 	wilc = vif->wilc;
 
-	PRINT_D(INIT_DBG, "Initializing WILC_Wlan ...\n");
+	wilc->quit = 0;
 
-	if (!wilc->hif_func->hif_init(wilc)) {
+	if (!wilc->hif_func->hif_init(wilc, false)) {
 		ret = -EIO;
 		goto _fail_;
 	}
 
-	if (!wilc_wlan_cfg_init(wilc_debug)) {
+	if (!wilc_wlan_cfg_init()) {
 		ret = -ENOBUFS;
 		goto _fail_;
 	}
 
 	if (!wilc->tx_buffer)
 		wilc->tx_buffer = kmalloc(LINUX_TX_SIZE, GFP_KERNEL);
-	PRINT_D(TX_DBG, "wilc->tx_buffer = %p\n", wilc->tx_buffer);
 
 	if (!wilc->tx_buffer) {
 		ret = -ENOBUFS;
-		PRINT_ER("Can't allocate Tx Buffer");
 		goto _fail_;
 	}
 
 	if (!wilc->rx_buffer)
 		wilc->rx_buffer = kmalloc(LINUX_RX_SIZE, GFP_KERNEL);
-	PRINT_D(TX_DBG, "wilc->rx_buffer =%p\n", wilc->rx_buffer);
+
 	if (!wilc->rx_buffer) {
 		ret = -ENOBUFS;
-		PRINT_ER("Can't allocate Rx Buffer");
 		goto _fail_;
 	}
 
@@ -1621,9 +1448,7 @@
 		ret = -EIO;
 		goto _fail_;
 	}
-#ifdef	TCP_ACK_FILTER
 	init_tcp_tracking();
-#endif
 
 	return 1;
 
@@ -1636,35 +1461,3 @@
 
 	return ret;
 }
-
-u16 wilc_set_machw_change_vir_if(struct net_device *dev, bool value)
-{
-	u16 ret;
-	u32 reg;
-	struct wilc_vif *vif;
-	struct wilc *wilc;
-
-	vif = netdev_priv(dev);
-	wilc = vif->wilc;
-
-	mutex_lock(&wilc->hif_cs);
-	ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHANGING_VIR_IF,
-					       &reg);
-	if (!ret)
-		PRINT_ER("Error while Reading reg WILC_CHANGING_VIR_IF\n");
-
-	if (value)
-		reg |= BIT(31);
-	else
-		reg &= ~BIT(31);
-
-	ret = wilc->hif_func->hif_write_reg(wilc, WILC_CHANGING_VIR_IF,
-						reg);
-
-	if (!ret)
-		PRINT_ER("Error while writing reg WILC_CHANGING_VIR_IF\n");
-
-	mutex_unlock(&wilc->hif_cs);
-
-	return ret;
-}
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 2edd744..bcd4bfa 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -106,6 +106,7 @@
 #define WILC_HAVE_LEGACY_RF_SETTINGS	BIT(5)
 #define WILC_HAVE_XTAL_24		BIT(6)
 #define WILC_HAVE_DISABLE_WILC_UART	BIT(7)
+#define WILC_HAVE_USE_IRQ_AS_HOST_WAKE	BIT(8)
 
 /********************************************
  *
@@ -127,6 +128,11 @@
 #define WILC_PLL_TO_SPI		2
 #define ABORT_INT		BIT(31)
 
+#define LINUX_RX_SIZE		(96 * 1024)
+#define LINUX_TX_SIZE		(64 * 1024)
+
+#define MODALIAS		"WILC_SPI"
+#define GPIO_NUM		0x44
 /*******************************************/
 /*        E0 and later Interrupt flags.    */
 /*******************************************/
@@ -226,7 +232,7 @@
  ********************************************/
 struct wilc;
 struct wilc_hif_func {
-	int (*hif_init)(struct wilc *);
+	int (*hif_init)(struct wilc *, bool resume);
 	int (*hif_deinit)(struct wilc *);
 	int (*hif_read_reg)(struct wilc *, u32, u32 *);
 	int (*hif_write_reg)(struct wilc *, u32, u32);
@@ -267,8 +273,10 @@
 };
 
 struct wilc;
+struct wilc_vif;
 
-int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size);
+int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
+				u32 buffer_size);
 int wilc_wlan_start(struct wilc *);
 int wilc_wlan_stop(struct wilc *);
 int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
@@ -276,9 +284,9 @@
 int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
 void wilc_handle_isr(struct wilc *wilc);
 void wilc_wlan_cleanup(struct net_device *dev);
-int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
 		      u32 buffer_size, int commit, u32 drv_handler);
-int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
 		      u32 drv_handler);
 int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size);
 int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
@@ -292,9 +300,12 @@
 int wilc_mac_open(struct net_device *ndev);
 int wilc_mac_close(struct net_device *ndev);
 
-int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *pBSSID);
 void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
-
+void host_wakeup_notify(struct wilc *wilc);
+void host_sleep_notify(struct wilc *wilc);
 extern bool wilc_enable_ps;
-
+void chip_allow_sleep(struct wilc *wilc);
+void chip_wakeup(struct wilc *wilc);
+int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
+			 u32 count, u32 drv);
 #endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index b72c77b..b992243 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -19,9 +19,7 @@
  *
  ********************************************/
 
-typedef struct {
-	wilc_debug_func dPrint;
-
+struct wilc_mac_cfg {
 	int mac_status;
 	u8 mac_address[7];
 	u8 ip_address[5];
@@ -40,11 +38,11 @@
 	u8 firmware_info[8];
 	u8 scan_result[256];
 	u8 scan_result1[256];
-} wilc_mac_cfg_t;
+};
 
-static wilc_mac_cfg_t g_mac;
+static struct wilc_mac_cfg g_mac;
 
-static wilc_cfg_byte_t g_cfg_byte[] = {
+static struct wilc_cfg_byte g_cfg_byte[] = {
 	{WID_BSS_TYPE, 0},
 	{WID_CURRENT_TX_RATE, 0},
 	{WID_CURRENT_CHANNEL, 0},
@@ -87,7 +85,7 @@
 	{WID_NIL, 0}
 };
 
-static wilc_cfg_hword_t g_cfg_hword[] = {
+static struct wilc_cfg_hword g_cfg_hword[] = {
 	{WID_LINK_LOSS_THRESHOLD, 0},
 	{WID_RTS_THRESHOLD, 0},
 	{WID_FRAG_THRESHOLD, 0},
@@ -108,7 +106,7 @@
 	{WID_NIL, 0}
 };
 
-static wilc_cfg_word_t g_cfg_word[] = {
+static struct wilc_cfg_word g_cfg_word[] = {
 	{WID_FAILED_COUNT, 0},
 	{WID_RETRY_COUNT, 0},
 	{WID_MULTIPLE_RETRY_COUNT, 0},
@@ -131,7 +129,7 @@
 
 };
 
-static wilc_cfg_str_t g_cfg_str[] = {
+static struct wilc_cfg_str g_cfg_str[] = {
 	{WID_SSID, g_mac.ssid},	/* 33 + 1 bytes */
 	{WID_FIRMWARE_VERSION, g_mac.firmware_version},
 	{WID_OPERATIONAL_RATE_SET, g_mac.supp_rate},
@@ -270,13 +268,12 @@
 static void wilc_wlan_parse_response_frame(u8 *info, int size)
 {
 	u32 wid, len = 0, i = 0;
-	static int seq;
 
 	while (size > 0) {
 		i = 0;
 		wid = info[0] | (info[1] << 8);
 		wid = cpu_to_le32(wid);
-		PRINT_INFO(GENERIC_DBG, "Processing response for %d seq %d\n", wid, seq++);
+
 		switch ((wid >> 12) & 0x7) {
 		case WID_CHAR:
 			do {
@@ -329,10 +326,6 @@
 					if (wid == WID_SITE_SURVEY_RESULTS) {
 						static int toggle;
 
-						PRINT_INFO(GENERIC_DBG, "Site survey results received[%d]\n",
-							   size);
-
-						PRINT_INFO(GENERIC_DBG, "Site survey results value[%d]toggle[%d]\n", size, toggle);
 						i += toggle;
 						toggle ^= 1;
 					}
@@ -354,14 +347,14 @@
 
 static int wilc_wlan_parse_info_frame(u8 *info, int size)
 {
-	wilc_mac_cfg_t *pd = &g_mac;
+	struct wilc_mac_cfg *pd = &g_mac;
 	u32 wid, len;
 	int type = WILC_CFG_RSP_STATUS;
 
 	wid = info[0] | (info[1] << 8);
 
 	len = info[2];
-	PRINT_INFO(GENERIC_DBG, "Status Len = %d Id= %d\n", len, wid);
+
 	if ((len == 1) && (wid == WID_STATUS)) {
 		pd->mac_status = info[3];
 		type = WILC_CFG_RSP_STATUS;
@@ -394,8 +387,6 @@
 		ret = wilc_wlan_cfg_set_str(frame, offset, id, buf, size);
 	} else if (type == 4) {                 /* binary command */
 		ret = wilc_wlan_cfg_set_bin(frame, offset, id, buf, size);
-	} else {
-		g_mac.dPrint(N_ERR, "illegal id\n");
 	}
 
 	return ret;
@@ -475,8 +466,6 @@
 					if (g_cfg_str[i].id == WID_SITE_SURVEY_RESULTS)	{
 						static int toggle;
 
-						PRINT_INFO(GENERIC_DBG, "Site survey results value[%d]\n",
-							   size);
 						i += toggle;
 						toggle ^= 1;
 
@@ -488,8 +477,6 @@
 			}
 			i++;
 		} while (1);
-	} else {
-		g_mac.dPrint(N_ERR, "[CFG]: illegal type (%08x)\n", wid);
 	}
 
 	return ret;
@@ -522,7 +509,6 @@
 		rsp->type = wilc_wlan_parse_info_frame(frame, size);
 		rsp->seq_no = msg_id;
 		/*call host interface info parse as well*/
-		PRINT_INFO(RX_DBG, "Info message received\n");
 		wilc_gnrl_async_info_received(wilc, frame - 4, size + 4);
 		break;
 
@@ -532,14 +518,10 @@
 		break;
 
 	case 'S':
-		PRINT_INFO(RX_DBG, "Scan Notification Received\n");
 		wilc_scan_complete_received(wilc, frame - 4, size + 4);
 		break;
 
 	default:
-		PRINT_INFO(RX_DBG, "Receive unknown message type[%d-%d-%d-%d-%d-%d-%d-%d]\n",
-			   frame[0], frame[1], frame[2], frame[3], frame[4],
-			   frame[5], frame[6], frame[7]);
 		rsp->type = 0;
 		rsp->seq_no = msg_id;
 		ret = 0;
@@ -549,9 +531,8 @@
 	return ret;
 }
 
-int wilc_wlan_cfg_init(wilc_debug_func func)
+int wilc_wlan_cfg_init(void)
 {
-	memset((void *)&g_mac, 0, sizeof(wilc_mac_cfg_t));
-	g_mac.dPrint = func;
+	memset((void *)&g_mac, 0, sizeof(struct wilc_mac_cfg));
 	return 1;
 }
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wilc_wlan_cfg.h
index 5f74eb8..b8641a2 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.h
@@ -10,25 +10,25 @@
 #ifndef WILC_WLAN_CFG_H
 #define WILC_WLAN_CFG_H
 
-typedef struct {
+struct wilc_cfg_byte {
 	u16 id;
 	u16 val;
-} wilc_cfg_byte_t;
+};
 
-typedef struct {
+struct wilc_cfg_hword {
 	u16 id;
 	u16 val;
-} wilc_cfg_hword_t;
+};
 
-typedef struct {
+struct wilc_cfg_word {
 	u32 id;
 	u32 val;
-} wilc_cfg_word_t;
+};
 
-typedef struct {
+struct wilc_cfg_str {
 	u32 id;
 	u8 *str;
-} wilc_cfg_str_t;
+};
 
 struct wilc;
 int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size);
@@ -36,6 +36,6 @@
 int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size);
 int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
 			      struct wilc_cfg_rsp *rsp);
-int wilc_wlan_cfg_init(wilc_debug_func func);
+int wilc_wlan_cfg_init(void);
 
 #endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 618903c..fbe34eb 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -11,7 +11,6 @@
 #define WILC_WLAN_IF_H
 
 #include <linux/semaphore.h>
-#include "linux_wlan_common.h"
 #include <linux/netdevice.h>
 
 /********************************************
@@ -82,7 +81,7 @@
 struct tx_complete_data {
 	int size;
 	void *buff;
-	u8 *pBssid;
+	u8 *bssid;
 	struct sk_buff *skb;
 };
 
@@ -95,7 +94,7 @@
  *      Wlan Configuration ID
  *
  ********************************************/
-
+#define WILC_MULTICAST_TABLE_SIZE	8
 #define MAX_SSID_LEN            33
 #define MAX_RATES_SUPPORTED     12
 
@@ -300,6 +299,13 @@
 	WID_TYPE_FORCE_32BIT	= 0xFFFFFFFF
 };
 
+struct wid {
+	u16 id;
+	enum wid_type type;
+	s32 size;
+	s8 *val;
+};
+
 typedef enum {
 	WID_NIL				= 0xffff,
 
@@ -761,6 +767,7 @@
 	WID_DEL_BEACON			= 0x00CA,
 
 	WID_LOGTerminal_Switch		= 0x00CD,
+	WID_TX_POWER			= 0x00CE,
 	/*  EMAC Short WID list */
 	/*  RTS Threshold */
 	/*
@@ -832,7 +839,6 @@
 
 	/* Custom Integer WID list */
 	WID_GET_INACTIVE_TIME		= 0x2084,
-	WID_SET_DRV_HANDLER		= 0X2085,
 	WID_SET_OPERATION_MODE		= 0X2086,
 	/* EMAC String WID list */
 	WID_SSID			= 0x3000,
@@ -865,6 +871,7 @@
 	WID_MODEL_NAME			= 0x3027, /*Added for CAPI tool */
 	WID_MODEL_NUM			= 0x3028, /*Added for CAPI tool */
 	WID_DEVICE_NAME			= 0x3029, /*Added for CAPI tool */
+	WID_SET_DRV_HANDLER		= 0x3030,
 
 	/* NMAC String WID list */
 	WID_11N_P_ACTION_REQ		= 0x3080,
@@ -913,6 +920,6 @@
 int wilc_wlan_init(struct net_device *dev);
 void wilc_bus_set_max_speed(void);
 void wilc_bus_set_default_speed(void);
-u32 wilc_get_chipid(struct wilc *wilc, u8 update);
+u32 wilc_get_chipid(struct wilc *wilc, bool update);
 
 #endif
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 8c1e3f0..8bad018 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -73,13 +73,13 @@
 {
 	struct p80211msg_dot11req_mibset msg;
 	p80211item_uint32_t *mibitem =
-			(p80211item_uint32_t *) &msg.mibattribute.data;
+			(p80211item_uint32_t *)&msg.mibattribute.data;
 
 	msg.msgcode = DIDmsg_dot11req_mibset;
 	mibitem->did = did;
 	mibitem->data = data;
 
-	return p80211req_dorequest(wlandev, (u8 *) &msg);
+	return p80211req_dorequest(wlandev, (u8 *)&msg);
 }
 
 static int prism2_domibset_pstr32(wlandevice_t *wlandev,
@@ -87,14 +87,14 @@
 {
 	struct p80211msg_dot11req_mibset msg;
 	p80211item_pstr32_t *mibitem =
-			(p80211item_pstr32_t *) &msg.mibattribute.data;
+			(p80211item_pstr32_t *)&msg.mibattribute.data;
 
 	msg.msgcode = DIDmsg_dot11req_mibset;
 	mibitem->did = did;
 	mibitem->data.len = len;
 	memcpy(mibitem->data.data, data, len);
 
-	return p80211req_dorequest(wlandev, (u8 *) &msg);
+	return p80211req_dorequest(wlandev, (u8 *)&msg);
 }
 
 /* The interface functions, called by the cfg80211 layer */
@@ -239,7 +239,9 @@
 	int result = 0;
 
 	/* There is no direct way in the hardware (AFAIK) of removing
-	   a key, so we will cheat by setting the key to a bogus value */
+	 * a key, so we will cheat by setting the key to a bogus value
+	 */
+
 	/* send key to driver */
 	switch (key_index) {
 	case 0:
@@ -315,7 +317,7 @@
 	if (wlandev->mlmerequest == NULL)
 		return -EOPNOTSUPP;
 
-	result = wlandev->mlmerequest(wlandev, (struct p80211msg *) &quality);
+	result = wlandev->mlmerequest(wlandev, (struct p80211msg *)&quality);
 
 	if (result == 0) {
 		sinfo->txrate.legacy = quality.txrate.data;
@@ -387,7 +389,7 @@
 	msg1.maxchanneltime.data = 250;
 	msg1.minchanneltime.data = 200;
 
-	result = p80211req_dorequest(wlandev, (u8 *) &msg1);
+	result = p80211req_dorequest(wlandev, (u8 *)&msg1);
 	if (result) {
 		err = prism2_result2err(msg1.resultcode.data);
 		goto exit;
@@ -402,7 +404,7 @@
 		msg2.msgcode = DIDmsg_dot11req_scan_results;
 		msg2.bssindex.data = i;
 
-		result = p80211req_dorequest(wlandev, (u8 *) &msg2);
+		result = p80211req_dorequest(wlandev, (u8 *)&msg2);
 		if ((result != 0) ||
 		    (msg2.resultcode.data != P80211ENUM_resultcode_success)) {
 			break;
@@ -417,7 +419,7 @@
 		bss = cfg80211_inform_bss(wiphy,
 			ieee80211_get_channel(wiphy, freq),
 			CFG80211_BSS_FTYPE_UNKNOWN,
-			(const u8 *) &(msg2.bssid.data.data),
+			(const u8 *)&(msg2.bssid.data.data),
 			msg2.timestamp.data, msg2.capinfo.data,
 			msg2.beaconperiod.data,
 			ie_buf,
@@ -558,12 +560,12 @@
 							(u8 *)sme->key);
 			if (result)
 				goto exit;
-
 		}
 
 		/* Assume we should set privacy invoked and exclude unencrypted
-		   We could possibly use sme->privacy here, but the assumption
-		   seems reasonable anyway */
+		 * We could possible use sme->privacy here, but the assumption
+		 * seems reasonable anyways
+		 */
 		result = prism2_domibset_uint32(wlandev,
 						DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
 						P80211ENUM_truth_true);
@@ -578,7 +580,8 @@
 
 	} else {
 		/* Assume we should unset privacy invoked
-		   and exclude unencrypted */
+		 * and exclude unencrypted
+		 */
 		result = prism2_domibset_uint32(wlandev,
 						DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
 						P80211ENUM_truth_false);
@@ -590,17 +593,17 @@
 						P80211ENUM_truth_false);
 		if (result)
 			goto exit;
-
 	}
 
 	/* Now do the actual join. Note there is no way that I can
-	   see to request a specific bssid */
+	 * see to request a specific bssid
+	 */
 	msg_join.msgcode = DIDmsg_lnxreq_autojoin;
 
 	memcpy(msg_join.ssid.data.data, sme->ssid, length);
 	msg_join.ssid.data.len = length;
 
-	result = p80211req_dorequest(wlandev, (u8 *) &msg_join);
+	result = p80211req_dorequest(wlandev, (u8 *)&msg_join);
 
 exit:
 	if (result)
@@ -623,7 +626,7 @@
 	memcpy(msg_join.ssid.data.data, "---", 3);
 	msg_join.ssid.data.len = 3;
 
-	result = p80211req_dorequest(wlandev, (u8 *) &msg_join);
+	result = p80211req_dorequest(wlandev, (u8 *)&msg_join);
 
 	if (result)
 		err = -EFAULT;
@@ -679,12 +682,12 @@
 	int result;
 	int err = 0;
 
-	mibitem = (p80211item_uint32_t *) &msg.mibattribute.data;
+	mibitem = (p80211item_uint32_t *)&msg.mibattribute.data;
 	msg.msgcode = DIDmsg_dot11req_mibget;
 	mibitem->did =
 	    DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel;
 
-	result = p80211req_dorequest(wlandev, (u8 *) &msg);
+	result = p80211req_dorequest(wlandev, (u8 *)&msg);
 
 	if (result) {
 		err = -EFAULT;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 8dfe438..cec6d0b 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1360,7 +1360,6 @@
 
 int
 hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis);
-int hfa384x_drvr_commtallies(hfa384x_t *hw);
 int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport);
 int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport);
 int hfa384x_drvr_flashdl_enable(hfa384x_t *hw);
@@ -1391,10 +1390,6 @@
 }
 
 int
-hfa384x_drvr_getconfig_async(hfa384x_t *hw,
-			     u16 rid, ctlx_usercb_t usercb, void *usercb_data);
-
-int
 hfa384x_drvr_setconfig_async(hfa384x_t *hw,
 			     u16 rid,
 			     void *buf,
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 7551ac2..09b4b2c 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -145,11 +145,11 @@
 	DOASYNC
 };
 
-#define THROTTLE_JIFFIES	(HZ/8)
+#define THROTTLE_JIFFIES	(HZ / 8)
 #define URB_ASYNC_UNLINK 0
 #define USB_QUEUE_BULK 0
 
-#define ROUNDUP64(a) (((a)+63)&~63)
+#define ROUNDUP64(a) (((a) + 63) & ~63)
 
 #ifdef DEBUG_USB
 static void dbprint_urb(struct urb *urb);
@@ -213,8 +213,6 @@
 
 static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx);
 
-static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx);
-
 static int
 usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp,
 		   hfa384x_cmdresult_t *result);
@@ -816,43 +814,6 @@
 	}
 }
 
-/*----------------------------------------------------------------
-* hfa384x_cb_rrid
-*
-* CTLX completion handler for async RRID type control exchanges.
-*
-* Note: If the handling is changed here, it should probably be
-*       changed in dorrid as well.
-*
-* Arguments:
-*	hw		hw struct
-*	ctlx		completed CTLX
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
-static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
-{
-	if (ctlx->usercb != NULL) {
-		hfa384x_rridresult_t rridresult;
-
-		if (ctlx->state != CTLX_COMPLETE) {
-			memset(&rridresult, 0, sizeof(rridresult));
-			rridresult.rid = le16_to_cpu(ctlx->outbuf.rridreq.rid);
-		} else {
-			usbctlx_get_rridresult(&ctlx->inbuf.rridresp,
-					       &rridresult);
-		}
-
-		ctlx->usercb(hw, &rridresult, ctlx->usercb_data);
-	}
-}
-
 static inline int hfa384x_docmd_wait(hfa384x_t *hw, hfa384x_metacmd_t *cmd)
 {
 	return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
@@ -1012,7 +973,6 @@
 ----------------------------------------------------------------*/
 int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport)
 {
-	int result = 0;
 	hfa384x_metacmd_t cmd;
 
 	cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DISABLE) |
@@ -1021,9 +981,7 @@
 	cmd.parm1 = 0;
 	cmd.parm2 = 0;
 
-	result = hfa384x_docmd_wait(hw, &cmd);
-
-	return result;
+	return hfa384x_docmd_wait(hw, &cmd);
 }
 
 /*----------------------------------------------------------------
@@ -1048,7 +1006,6 @@
 ----------------------------------------------------------------*/
 int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport)
 {
-	int result = 0;
 	hfa384x_metacmd_t cmd;
 
 	cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_ENABLE) |
@@ -1057,9 +1014,7 @@
 	cmd.parm1 = 0;
 	cmd.parm2 = 0;
 
-	result = hfa384x_docmd_wait(hw, &cmd);
-
-	return result;
+	return hfa384x_docmd_wait(hw, &cmd);
 }
 
 /*----------------------------------------------------------------
@@ -1093,7 +1048,6 @@
 ----------------------------------------------------------------*/
 int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable)
 {
-	int result = 0;
 	hfa384x_metacmd_t cmd;
 
 	cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_MONITOR) |
@@ -1102,9 +1056,7 @@
 	cmd.parm1 = 0;
 	cmd.parm2 = 0;
 
-	result = hfa384x_docmd_wait(hw, &cmd);
-
-	return result;
+	return hfa384x_docmd_wait(hw, &cmd);
 }
 
 /*----------------------------------------------------------------
@@ -1148,7 +1100,6 @@
 int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr,
 			 u16 highaddr, u16 codelen)
 {
-	int result = 0;
 	hfa384x_metacmd_t cmd;
 
 	pr_debug("mode=%d, lowaddr=0x%04x, highaddr=0x%04x, codelen=%d\n",
@@ -1161,9 +1112,7 @@
 	cmd.parm1 = highaddr;
 	cmd.parm2 = codelen;
 
-	result = hfa384x_docmd_wait(hw, &cmd);
-
-	return result;
+	return hfa384x_docmd_wait(hw, &cmd);
 }
 
 /*----------------------------------------------------------------
@@ -1747,37 +1696,6 @@
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_commtallies
-*
-* Send a commtallies inquiry to the MAC.  Note that this is an async
-* call that will result in an info frame arriving sometime later.
-*
-* Arguments:
-*	hw		device structure
-*
-* Returns:
-*	zero		success.
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
-int hfa384x_drvr_commtallies(hfa384x_t *hw)
-{
-	hfa384x_metacmd_t cmd;
-
-	cmd.cmd = HFA384x_CMDCODE_INQ;
-	cmd.parm0 = HFA384x_IT_COMMTALLIES;
-	cmd.parm1 = 0;
-	cmd.parm2 = 0;
-
-	hfa384x_docmd_async(hw, &cmd, NULL, NULL, NULL);
-
-	return 0;
-}
-
-/*----------------------------------------------------------------
 * hfa384x_drvr_disable
 *
 * Issues the disable command to stop communications on one of
@@ -2122,41 +2040,6 @@
 }
 
 /*----------------------------------------------------------------
- * hfa384x_drvr_getconfig_async
- *
- * Performs the sequence necessary to perform an async read of
- * of a config/info item.
- *
- * Arguments:
- *       hw              device structure
- *       rid             config/info record id (host order)
- *       buf             host side record buffer.  Upon return it will
- *                       contain the body portion of the record (minus the
- *                       RID and len).
- *       len             buffer length (in bytes, should match record length)
- *       cbfn            caller supplied callback, called when the command
- *                       is done (successful or not).
- *       cbfndata        pointer to some caller supplied data that will be
- *                       passed in as an argument to the cbfn.
- *
- * Returns:
- *       nothing         the cbfn gets a status argument identifying if
- *                       any errors occur.
- * Side effects:
- *       Queues an hfa384x_usbcmd_t for subsequent execution.
- *
- * Call context:
- *       Any
- ----------------------------------------------------------------*/
-int
-hfa384x_drvr_getconfig_async(hfa384x_t *hw,
-			     u16 rid, ctlx_usercb_t usercb, void *usercb_data)
-{
-	return hfa384x_dorrid_async(hw, rid, NULL, 0,
-				    hfa384x_cb_rrid, usercb, usercb_data);
-}
-
-/*----------------------------------------------------------------
  * hfa384x_drvr_setconfig_async
  *
  * Performs the sequence necessary to write a config/info item.
@@ -2810,8 +2693,7 @@
 static void hfa384x_usbctlx_reaper_task(unsigned long data)
 {
 	hfa384x_t *hw = (hfa384x_t *)data;
-	struct list_head *entry;
-	struct list_head *temp;
+	hfa384x_usbctlx_t *ctlx, *temp;
 	unsigned long flags;
 
 	spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -2819,10 +2701,7 @@
 	/* This list is guaranteed to be empty if someone
 	 * has unplugged the adapter.
 	 */
-	list_for_each_safe(entry, temp, &hw->ctlxq.reapable) {
-		hfa384x_usbctlx_t *ctlx;
-
-		ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
+	list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.reapable, list) {
 		list_del(&ctlx->list);
 		kfree(ctlx);
 	}
@@ -2847,8 +2726,7 @@
 static void hfa384x_usbctlx_completion_task(unsigned long data)
 {
 	hfa384x_t *hw = (hfa384x_t *)data;
-	struct list_head *entry;
-	struct list_head *temp;
+	hfa384x_usbctlx_t *ctlx, *temp;
 	unsigned long flags;
 
 	int reap = 0;
@@ -2858,11 +2736,7 @@
 	/* This list is guaranteed to be empty if someone
 	 * has unplugged the adapter ...
 	 */
-	list_for_each_safe(entry, temp, &hw->ctlxq.completing) {
-		hfa384x_usbctlx_t *ctlx;
-
-		ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
-
+	list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.completing, list) {
 		/* Call the completion function that this
 		 * command was assigned, assuming it has one.
 		 */
@@ -3985,8 +3859,7 @@
 	pr_debug("flags=0x%lx\n", hw->usb_flags);
 	if (!hw->wlandev->hwremoved &&
 	    ((test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) &&
-	      !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags))
-	     |
+	      !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) |
 	     (test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) &&
 	      !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags))
 	    )) {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 1b02cdf..0a8f396 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -49,7 +49,8 @@
 *
 * --------------------------------------------------------------------
 *
-*================================================================ */
+*================================================================
+*/
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -101,12 +102,12 @@
 *
 * Call context:
 *	May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
 int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
 			struct sk_buff *skb, union p80211_hdr *p80211_hdr,
 			struct p80211_metawep *p80211_wep)
 {
-
 	__le16 fc;
 	u16 proto;
 	struct wlan_ethhdr e_hdr;
@@ -148,11 +149,11 @@
 
 			/* tack on SNAP */
 			e_snap =
-			    (struct wlan_snap *) skb_push(skb,
+			    (struct wlan_snap *)skb_push(skb,
 				sizeof(struct wlan_snap));
 			e_snap->type = htons(proto);
-			if (ethconv == WLAN_ETHCONV_8021h
-			    && p80211_stt_findproto(proto)) {
+			if (ethconv == WLAN_ETHCONV_8021h &&
+			    p80211_stt_findproto(proto)) {
 				memcpy(e_snap->oui, oui_8021h,
 				       WLAN_IEEE_OUI_LEN);
 			} else {
@@ -162,12 +163,11 @@
 
 			/* tack on llc */
 			e_llc =
-			    (struct wlan_llc *) skb_push(skb,
+			    (struct wlan_llc *)skb_push(skb,
 				sizeof(struct wlan_llc));
 			e_llc->dsap = 0xAA;	/* SNAP, see IEEE 802 */
 			e_llc->ssap = 0xAA;
 			e_llc->ctl = 0x03;
-
 		}
 	}
 
@@ -202,8 +202,8 @@
 
 	p80211_wep->data = NULL;
 
-	if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED)
-	    && (wlandev->hostwep & HOSTWEP_ENCRYPT)) {
+	if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) &&
+	    (wlandev->hostwep & HOSTWEP_ENCRYPT)) {
 		/* XXXX need to pick keynum other than default? */
 
 		p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC);
@@ -215,8 +215,8 @@
 				  p80211_wep->iv, p80211_wep->icv);
 		if (foo) {
 			netdev_warn(wlandev->netdev,
-			       "Host en-WEP failed, dropping frame (%d).\n",
-			       foo);
+				    "Host en-WEP failed, dropping frame (%d).\n",
+				    foo);
 			return 2;
 		}
 		fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1));
@@ -238,10 +238,10 @@
 	int i;
 
 	/* Gather wireless spy statistics: for each packet, compare the
-	 * source address with out list, and if match, get the stats... */
+	 * source address with out list, and if match, get the stats...
+	 */
 
 	for (i = 0; i < wlandev->spy_number; i++) {
-
 		if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
 			memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
 			wlandev->spy_stat[i].level = rxmeta->signal;
@@ -273,7 +273,8 @@
 *
 * Call context:
 *	May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
 int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
 			struct sk_buff *skb)
 {
@@ -293,19 +294,19 @@
 	payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN;
 	payload_offset = WLAN_HDR_A3_LEN;
 
-	w_hdr = (union p80211_hdr *) skb->data;
+	w_hdr = (union p80211_hdr *)skb->data;
 
 	/* setup some vars for convenience */
 	fc = le16_to_cpu(w_hdr->a3.fc);
 	if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
 		ether_addr_copy(daddr, w_hdr->a3.a1);
 		ether_addr_copy(saddr, w_hdr->a3.a2);
-	} else if ((WLAN_GET_FC_TODS(fc) == 0)
-			&& (WLAN_GET_FC_FROMDS(fc) == 1)) {
+	} else if ((WLAN_GET_FC_TODS(fc) == 0) &&
+		   (WLAN_GET_FC_FROMDS(fc) == 1)) {
 		ether_addr_copy(daddr, w_hdr->a3.a1);
 		ether_addr_copy(saddr, w_hdr->a3.a3);
-	} else if ((WLAN_GET_FC_TODS(fc) == 1)
-			&& (WLAN_GET_FC_FROMDS(fc) == 0)) {
+	} else if ((WLAN_GET_FC_TODS(fc) == 1) &&
+		   (WLAN_GET_FC_FROMDS(fc) == 0)) {
 		ether_addr_copy(daddr, w_hdr->a3.a3);
 		ether_addr_copy(saddr, w_hdr->a3.a2);
 	} else {
@@ -320,18 +321,19 @@
 	}
 
 	/* perform de-wep if necessary.. */
-	if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && WLAN_GET_FC_ISWEP(fc)
-	    && (wlandev->hostwep & HOSTWEP_DECRYPT)) {
+	if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) &&
+	    WLAN_GET_FC_ISWEP(fc) &&
+	    (wlandev->hostwep & HOSTWEP_DECRYPT)) {
 		if (payload_length <= 8) {
 			netdev_err(netdev,
 				   "WEP frame too short (%u).\n", skb->len);
 			return 1;
 		}
 		foo = wep_decrypt(wlandev, skb->data + payload_offset + 4,
-				       payload_length - 8, -1,
-				       skb->data + payload_offset,
-				       skb->data + payload_offset +
-				       payload_length - 4);
+				  payload_length - 8, -1,
+				  skb->data + payload_offset,
+				  skb->data + payload_offset +
+				  payload_length - 4);
 		if (foo) {
 			/* de-wep failed, drop skb. */
 			pr_debug("Host de-WEP failed, dropping frame (%d).\n",
@@ -350,11 +352,11 @@
 		wlandev->rx.decrypt++;
 	}
 
-	e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset);
+	e_hdr = (struct wlan_ethhdr *)(skb->data + payload_offset);
 
-	e_llc = (struct wlan_llc *) (skb->data + payload_offset);
+	e_llc = (struct wlan_llc *)(skb->data + payload_offset);
 	e_snap =
-	    (struct wlan_snap *) (skb->data + payload_offset +
+	    (struct wlan_snap *)(skb->data + payload_offset +
 		sizeof(struct wlan_llc));
 
 	/* Test for the various encodings */
@@ -369,7 +371,7 @@
 			/* A bogus length ethfrm has been encap'd. */
 			/* Is someone trying an oflow attack? */
 			netdev_err(netdev, "ENCAP frame too large (%d > %d)\n",
-			       payload_length, netdev->mtu + ETH_HLEN);
+				   payload_length, netdev->mtu + ETH_HLEN);
 			return 1;
 		}
 
@@ -379,15 +381,15 @@
 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
 
 	} else if ((payload_length >= sizeof(struct wlan_llc) +
-		sizeof(struct wlan_snap))
-		&& (e_llc->dsap == 0xaa)
-		&& (e_llc->ssap == 0xaa)
-		&& (e_llc->ctl == 0x03)
-		   &&
-		   (((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0)
-		     && (ethconv == WLAN_ETHCONV_8021h)
-		     && (p80211_stt_findproto(le16_to_cpu(e_snap->type))))
-		    || (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
+		sizeof(struct wlan_snap)) &&
+		(e_llc->dsap == 0xaa) &&
+		(e_llc->ssap == 0xaa) &&
+		(e_llc->ctl == 0x03) &&
+		   (((memcmp(e_snap->oui, oui_rfc1042,
+		   WLAN_IEEE_OUI_LEN) == 0) &&
+		   (ethconv == WLAN_ETHCONV_8021h) &&
+		   (p80211_stt_findproto(le16_to_cpu(e_snap->type)))) ||
+		   (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
 			0))) {
 		pr_debug("SNAP+RFC1042 len: %d\n", payload_length);
 		/* it's a SNAP + RFC1042 frame && protocol is in STT */
@@ -398,7 +400,7 @@
 			/* A bogus length ethfrm has been sent. */
 			/* Is someone trying an oflow attack? */
 			netdev_err(netdev, "SNAP frame too large (%d > %d)\n",
-			       payload_length, netdev->mtu);
+				   payload_length, netdev->mtu);
 			return 1;
 		}
 
@@ -415,13 +417,14 @@
 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
 
 	} else if ((payload_length >= sizeof(struct wlan_llc) +
-		sizeof(struct wlan_snap))
-		&& (e_llc->dsap == 0xaa)
-		&& (e_llc->ssap == 0xaa)
-		&& (e_llc->ctl == 0x03)) {
+		sizeof(struct wlan_snap)) &&
+		(e_llc->dsap == 0xaa) &&
+		(e_llc->ssap == 0xaa) &&
+		(e_llc->ctl == 0x03)) {
 		pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
 		/* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
-		   build a DIXII + RFC894 */
+		 * build a DIXII + RFC894
+		 */
 
 		/* Test for an overlength frame */
 		if ((payload_length - sizeof(struct wlan_llc) -
@@ -430,9 +433,9 @@
 			/* A bogus length ethfrm has been sent. */
 			/* Is someone trying an oflow attack? */
 			netdev_err(netdev, "DIXII frame too large (%ld > %d)\n",
-			       (long int)(payload_length -
-					sizeof(struct wlan_llc) -
-					sizeof(struct wlan_snap)), netdev->mtu);
+				   (long int)(payload_length -
+				   sizeof(struct wlan_llc) -
+				   sizeof(struct wlan_snap)), netdev->mtu);
 			return 1;
 		}
 
@@ -465,7 +468,7 @@
 			/* A bogus length ethfrm has been sent. */
 			/* Is someone trying an oflow attack? */
 			netdev_err(netdev, "OTHER frame too large (%d > %d)\n",
-			       payload_length, netdev->mtu);
+				   payload_length, netdev->mtu);
 			return 1;
 		}
 
@@ -480,7 +483,6 @@
 
 		/* chop off the 802.11 CRC */
 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
-
 	}
 
 	/*
@@ -521,14 +523,15 @@
 *
 * Call context:
 *	May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
 int p80211_stt_findproto(u16 proto)
 {
 	/* Always return found for now.  This is the behavior used by the */
-	/*  Zoom Win95 driver when 802.1h mode is selected */
+	/* Zoom Win95 driver when 802.1h mode is selected */
 	/* TODO: If necessary, add an actual search we'll probably
-	   need this to match the CMAC's way of doing things.
-	   Need to do some testing to confirm.
+	 * need this to match the CMAC's way of doing things.
+	 * Need to do some testing to confirm.
 	 */
 
 	if (proto == ETH_P_AARP)	/* APPLETALK */
@@ -551,24 +554,25 @@
 *
 * Call context:
 *	May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
 void p80211skb_rxmeta_detach(struct sk_buff *skb)
 {
 	struct p80211_rxmeta *rxmeta;
 	struct p80211_frmmeta *frmmeta;
 
 	/* Sanity checks */
-	if (skb == NULL) {	/* bad skb */
+	if (!skb) {	/* bad skb */
 		pr_debug("Called w/ null skb.\n");
 		return;
 	}
 	frmmeta = P80211SKB_FRMMETA(skb);
-	if (frmmeta == NULL) {	/* no magic */
+	if (!frmmeta) {	/* no magic */
 		pr_debug("Called w/ bad frmmeta magic.\n");
 		return;
 	}
 	rxmeta = frmmeta->rx;
-	if (rxmeta == NULL) {	/* bad meta ptr */
+	if (!rxmeta) {	/* bad meta ptr */
 		pr_debug("Called w/ bad rxmeta ptr.\n");
 		return;
 	}
@@ -595,7 +599,8 @@
 *
 * Call context:
 *	May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
 int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
 {
 	int result = 0;
@@ -603,7 +608,7 @@
 	struct p80211_frmmeta *frmmeta;
 
 	/* If these already have metadata, we error out! */
-	if (P80211SKB_RXMETA(skb) != NULL) {
+	if (P80211SKB_RXMETA(skb)) {
 		netdev_err(wlandev->netdev,
 			   "%s: RXmeta already attached!\n", wlandev->name);
 		result = 0;
@@ -613,7 +618,7 @@
 	/* Allocate the rxmeta */
 	rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
 
-	if (rxmeta == NULL) {
+	if (!rxmeta) {
 		netdev_err(wlandev->netdev,
 			   "%s: Failed to allocate rxmeta.\n", wlandev->name);
 		result = 1;
@@ -626,7 +631,7 @@
 
 	/* Overlay a frmmeta_t onto skb->cb */
 	memset(skb->cb, 0, sizeof(struct p80211_frmmeta));
-	frmmeta = (struct p80211_frmmeta *) (skb->cb);
+	frmmeta = (struct p80211_frmmeta *)(skb->cb);
 	frmmeta->magic = P80211_FRMMETA_MAGIC;
 	frmmeta->rx = rxmeta;
 exit:
@@ -648,7 +653,8 @@
 *
 * Call context:
 *	May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
 void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
 {
 	struct p80211_frmmeta *meta;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a9c1e0b..88255ce 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -328,7 +328,7 @@
 
 	p80211_wep.data = NULL;
 
-	if (skb == NULL)
+	if (!skb)
 		return NETDEV_TX_OK;
 
 	if (wlandev->state != WLAN_DEVICE_OPEN) {
@@ -388,7 +388,7 @@
 			goto failed;
 		}
 	}
-	if (wlandev->txframe == NULL) {
+	if (!wlandev->txframe) {
 		result = 1;
 		goto failed;
 	}
@@ -736,7 +736,7 @@
 
 	/* Allocate and initialize the wiphy struct */
 	wiphy = wlan_create_wiphy(physdev, wlandev);
-	if (wiphy == NULL) {
+	if (!wiphy) {
 		dev_err(physdev, "Failed to alloc wiphy.\n");
 		return 1;
 	}
@@ -744,7 +744,7 @@
 	/* Allocate and initialize the struct device */
 	netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
 			      NET_NAME_UNKNOWN, ether_setup);
-	if (netdev == NULL) {
+	if (!netdev) {
 		dev_err(physdev, "Failed to alloc netdev.\n");
 		wlan_free_wiphy(wiphy);
 		result = 1;
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index c363456..22c7970 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -140,8 +140,8 @@
 }
 
 /*
-  4-byte IV at start of buffer, 4-byte ICV at end of buffer.
-  if successful, buf start is payload begin, length -= 8;
+ * 4-byte IV at start of buffer, 4-byte ICV at end of buffer.
+ * if successful, buf start is payload begin, length -= 8;
  */
 int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
 		u8 *iv, u8 *icv)
@@ -188,7 +188,8 @@
 
 	/* Apply the RC4 to the data, update the CRC32 */
 	crc = ~0;
-	i = j = 0;
+	i = 0;
+	j = 0;
 	for (k = 0; k < len; k++) {
 		i = (i + 1) & 0xff;
 		j = (j + s[i]) & 0xff;
@@ -260,7 +261,8 @@
 
 	/* Update CRC32 then apply RC4 to the data */
 	crc = ~0;
-	i = j = 0;
+	i = 0;
+	j = 0;
 	for (k = 0; k < len; k++) {
 		crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
 		i = (i + 1) & 0xff;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 7a9f424..e647203 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -87,7 +87,6 @@
 * Prism2 data types
 ---------------------------------------------------------------*/
 /* byte area conversion functions*/
-void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr);
 void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len);
 
 /* byte string conversion functions*/
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index cdda07d..e1b2e47 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -388,7 +388,7 @@
 		prism2mgmt_bytearea2pstr(bytebuf, pstr, mib->parm2);
 	} else {
 		memset(bytebuf, 0, mib->parm2);
-		prism2mgmt_pstr2bytearea(bytebuf, pstr);
+		memcpy(bytebuf, pstr->data, pstr->len);
 		result =
 		    hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, mib->parm2);
 	}
@@ -543,7 +543,7 @@
 		len = (pstr->len > 5) ? HFA384x_RID_CNFWEP128DEFAULTKEY_LEN :
 		    HFA384x_RID_CNFWEPDEFAULTKEY_LEN;
 		memset(bytebuf, 0, len);
-		prism2mgmt_pstr2bytearea(bytebuf, pstr);
+		memcpy(bytebuf, pstr->data, pstr->len);
 		result = hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, len);
 	}
 
@@ -759,26 +759,6 @@
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_pstr2bytearea
-*
-* Convert the pstr data in the WLAN message structure into an hfa384x
-* byte area format.
-*
-* Arguments:
-*	bytearea	hfa384x byte area data type
-*	pstr		wlan message data
-*
-* Returns:
-*	Nothing
-*
-----------------------------------------------------------------*/
-
-void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr)
-{
-	memcpy(bytearea, pstr->data, pstr->len);
-}
-
-/*----------------------------------------------------------------
 * prism2mgmt_bytestr2pstr
 *
 * Convert the data in an hfa384x byte string format into a
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 8abf3f8..ae6a53c 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -139,8 +139,7 @@
 	wlandev = (wlandevice_t *)usb_get_intfdata(interface);
 	if (wlandev != NULL) {
 		LIST_HEAD(cleanlist);
-		struct list_head *entry;
-		struct list_head *temp;
+		hfa384x_usbctlx_t *ctlx, *temp;
 		unsigned long flags;
 
 		hfa384x_t *hw = wlandev->priv;
@@ -178,18 +177,15 @@
 		tasklet_kill(&hw->completion_bh);
 		tasklet_kill(&hw->reaper_bh);
 
-		flush_scheduled_work();
+		cancel_work_sync(&hw->link_bh);
+		cancel_work_sync(&hw->commsqual_bh);
 
 		/* Now we complete any outstanding commands
 		 * and tell everyone who is waiting for their
 		 * responses that we have shut down.
 		 */
-		list_for_each(entry, &cleanlist) {
-			hfa384x_usbctlx_t *ctlx;
-
-			ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
+		list_for_each_entry(ctlx, &cleanlist, list)
 			complete(&ctlx->done);
-		}
 
 		/* Give any outstanding synchronous commands
 		 * a chance to complete. All they need to do
@@ -199,12 +195,8 @@
 		msleep(100);
 
 		/* Now delete the CTLXs, because no-one else can now. */
-		list_for_each_safe(entry, temp, &cleanlist) {
-			hfa384x_usbctlx_t *ctlx;
-
-			ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
+		list_for_each_entry_safe(ctlx, temp, &cleanlist, list)
 			kfree(ctlx);
-		}
 
 		/* Unhook the wlandev */
 		unregister_wlandev(wlandev);
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 89f5b55..cc55a17 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -655,26 +655,26 @@
 
 	switch (xgifb_info->display2) {
 	case XGIFB_DISP_CRT:
-		cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
+		cr30 = SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE;
 		cr31 |= SIS_DRIVER_MODE;
 		break;
 	case XGIFB_DISP_LCD:
-		cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
+		cr30 = SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE;
 		cr31 |= SIS_DRIVER_MODE;
 		break;
 	case XGIFB_DISP_TV:
 		if (xgifb_info->TV_type == TVMODE_HIVISION)
-			cr30 = (SIS_VB_OUTPUT_HIVISION
-					| SIS_SIMULTANEOUS_VIEW_ENABLE);
+			cr30 = SIS_VB_OUTPUT_HIVISION
+					| SIS_SIMULTANEOUS_VIEW_ENABLE;
 		else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
-			cr30 = (SIS_VB_OUTPUT_SVIDEO
-					| SIS_SIMULTANEOUS_VIEW_ENABLE);
+			cr30 = SIS_VB_OUTPUT_SVIDEO
+					| SIS_SIMULTANEOUS_VIEW_ENABLE;
 		else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
-			cr30 = (SIS_VB_OUTPUT_COMPOSITE
-					| SIS_SIMULTANEOUS_VIEW_ENABLE);
+			cr30 = SIS_VB_OUTPUT_COMPOSITE
+					| SIS_SIMULTANEOUS_VIEW_ENABLE;
 		else if (xgifb_info->TV_plug == TVPLUG_SCART)
-			cr30 = (SIS_VB_OUTPUT_SCART
-					| SIS_SIMULTANEOUS_VIEW_ENABLE);
+			cr30 = SIS_VB_OUTPUT_SCART
+					| SIS_SIMULTANEOUS_VIEW_ENABLE;
 		cr31 |= SIS_DRIVER_MODE;
 
 		if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
@@ -2064,8 +2064,6 @@
 	.remove = xgifb_remove
 };
 
-
-
 /*****************************************************/
 /*                      MODULE                       */
 /*****************************************************/
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 879a7e6..26b539b 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -57,7 +57,8 @@
 		data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
 		/* HOTPLUG_SUPPORT */
 		/* for current XG20 & XG21, GPIOH is floating, driver will
-		 * fix DDR temporarily */
+		 * fix DDR temporarily
+		 */
 		/* DVI read GPIOH */
 		data &= 0x01; /* 1=DDRII, 0=DDR */
 		/* ~HOTPLUG_SUPPORT */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index c886dd2..2078d57 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -61,7 +61,6 @@
 		if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
 			pVBInfo->XGINew_CR97 = 0x80;
 	}
-
 }
 
 static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
@@ -155,7 +154,6 @@
 
 static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
 {
-
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x20);
 	xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[0].SR2B);
 	xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[0].SR2C);
@@ -274,12 +272,12 @@
 
 	for (i = 0x01; i <= 0x04; i++) {
 		data = pVBInfo->TimingH.data[i];
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 1), data);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 1), data);
 	}
 
 	for (i = 0x05; i <= 0x06; i++) {
 		data = pVBInfo->TimingH.data[i];
-		xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i + 6), data);
+		xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i + 6), data);
 	}
 
 	j = xgifb_reg_get(pVBInfo->P3c4, 0x0e);
@@ -325,17 +323,17 @@
 
 	for (i = 0x00; i <= 0x01; i++) {
 		data = pVBInfo->TimingV.data[i];
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 6), data);
 	}
 
 	for (i = 0x02; i <= 0x03; i++) {
 		data = pVBInfo->TimingV.data[i];
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x0e), data);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x0e), data);
 	}
 
 	for (i = 0x04; i <= 0x05; i++) {
 		data = pVBInfo->TimingV.data[i];
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x11), data);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x11), data);
 	}
 
 	j = xgifb_reg_get(pVBInfo->P3c4, 0x0a);
@@ -433,7 +431,7 @@
 		Temp2 |= 0x40; /* Temp2 + 0x40 */
 
 	Temp2 &= 0xFF;
-	Tempax = (unsigned char) Temp2; /* Tempax: HRE[7:0] */
+	Tempax = (unsigned char)Temp2; /* Tempax: HRE[7:0] */
 	Tempax <<= 2; /* Tempax[7:2]: HRE[5:0] */
 	Tempdx >>= 6; /* Tempdx[7:6]->[1:0] HRS[9:8] */
 	Tempax |= Tempdx; /* HRE[5:0]HRS[9:8] */
@@ -483,11 +481,11 @@
 		Temp2 |= 0x20; /* VRE + 0x20 */
 
 	Temp2 &= 0xFF;
-	Tempax = (unsigned char) Temp2; /* Tempax: VRE[7:0] */
+	Tempax = (unsigned char)Temp2; /* Tempax: VRE[7:0] */
 	Tempax <<= 2; /* Tempax[7:0]; VRE[5:0]00 */
 	Temp1 &= 0x600; /* Temp1[10:9]: VRS[10:9] */
 	Temp1 >>= 9; /* Temp1[1:0]: VRS[10:9] */
-	Tempbx = (unsigned char) Temp1;
+	Tempbx = (unsigned char)Temp1;
 	Tempax |= Tempbx; /* Tempax[7:0]: VRE[5:0]VRS[10:9] */
 	Tempax &= 0x7F;
 	/* SR3F D[7:2]->VRE D[1:0]->VRS */
@@ -592,7 +590,6 @@
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
 	/* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
 }
 
 static void xgifb_set_lcd(int chip_id,
@@ -716,10 +713,10 @@
 	data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
 	data &= 0x7F;
 	xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
-	xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short) (tempcx & 0xff));
+	xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
 	xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
-			(unsigned short) ((tempcx & 0x0ff00) >> 10));
-	xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short) (tempbx & 0xff));
+			(unsigned short)((tempcx & 0x0ff00) >> 10));
+	xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
 	tempax = 0;
 	tempbx >>= 8;
 
@@ -930,7 +927,6 @@
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
 	/* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
 }
 
 static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
@@ -990,7 +986,6 @@
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
 	if (HwDeviceExtension->jChipType >= XG27)
 		xgifb_reg_and_or(pVBInfo->P3c4, 0x40, 0xFC, data2 & 0x03);
-
 }
 
 static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
@@ -1072,7 +1067,6 @@
 			data = 0x6c;
 		xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
 	}
-
 }
 
 static void XGI_WriteDAC(unsigned short dl,
@@ -1905,8 +1899,8 @@
 	push <<= 8;
 	tempax = temp << 8;
 	tempbx = tempbx | tempax;
-	temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
-		| SetInSlaveMode | DisableCRT2Display);
+	temp = SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
+		| SetInSlaveMode | DisableCRT2Display;
 	temp = 0xFFFF ^ temp;
 	tempbx &= temp;
 
@@ -2887,7 +2881,7 @@
 	xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp);
 	temp = tempcx & 0x00FF;
 	xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp);
-	tempcx = (pVBInfo->VGAVT - 1);
+	tempcx = pVBInfo->VGAVT - 1;
 	temp = tempcx & 0x00FF;
 
 	xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
@@ -2925,7 +2919,7 @@
 	temp = tempbx & 0x00FF;
 	xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
 	temp = ((tempbx & 0xFF00) >> 8) << 4;
-	temp = ((tempcx & 0x000F) | (temp));
+	temp = (tempcx & 0x000F) | (temp);
 	xgifb_reg_set(pVBInfo->Part1Port, 0x11, temp);
 	tempax = 0;
 
@@ -4080,7 +4074,7 @@
 	tempcx |= 0x04000;
 
 	if (tempeax <= tempebx) {
-		tempcx = (tempcx & (~0x4000));
+		tempcx = tempcx & (~0x4000);
 		tempeax = pVBInfo->VGAVDE;
 	} else {
 		tempeax -= tempebx;
@@ -4130,7 +4124,7 @@
 		temp = (tempax & 0xFF00) >> 8;
 		temp = (temp & 0x0003) << 4;
 		xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
-		temp = (tempax & 0x00FF);
+		temp = tempax & 0x00FF;
 		xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
 
 		if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
@@ -4932,7 +4926,7 @@
 			tempcl -= ModeVGA;
 			if (tempcl >= 0) {
 				/* BT Color */
-				tempah = (0x008 >> tempcl);
+				tempah = 0x008 >> tempcl;
 				if (tempah == 0)
 					tempah = 1;
 				tempah |= 0x040;
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 61fa10f..de80e5c 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -27,14 +27,16 @@
 					    /* of Linear VGA memory */
 
 	unsigned long ulVideoMemorySize; /* size, in bytes, of the
-					    memory on the board */
+					  * memory on the board
+					  */
 
 	unsigned char jChipType; /* Used to Identify Graphics Chip */
 				 /* defined in the data structure type  */
 				 /* "XGI_CHIP_TYPE" */
 
 	unsigned char jChipRevision; /* Used to Identify Graphics
-					Chip Revision */
+				      * Chip Revision
+				      */
 
 	unsigned char ujVBChipID; /* the ID of video bridge */
 				  /* defined in the data structure type */
@@ -46,4 +48,3 @@
 /* Additional IOCTL for communication xgifb <> X driver        */
 /* If changing this, xgifb.h must also be changed (for xgifb) */
 #endif
-
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index b79a74a..5fbeab3 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -202,7 +202,7 @@
 	bridge = ca91cx42_bridge->driver_priv;
 
 	/* Need pdev */
-	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+	pdev = to_pci_dev(ca91cx42_bridge->parent);
 
 	INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers);
 
@@ -293,8 +293,7 @@
 	iowrite32(tmp, bridge->base + LINT_EN);
 
 	if ((state == 0) && (sync != 0)) {
-		pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
-			dev);
+		pdev = to_pci_dev(ca91cx42_bridge->parent);
 
 		synchronize_irq(pdev->irq);
 	}
@@ -518,7 +517,7 @@
 		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
 		return -EINVAL;
 	}
-	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+	pdev = to_pci_dev(ca91cx42_bridge->parent);
 
 	existing_size = (unsigned long long)(image->bus_resource.end -
 		image->bus_resource.start);
@@ -1519,7 +1518,7 @@
 	struct pci_dev *pdev;
 
 	/* Find pci_dev container of dev */
-	pdev = container_of(parent, struct pci_dev, dev);
+	pdev = to_pci_dev(parent);
 
 	return pci_alloc_consistent(pdev, size, dma);
 }
@@ -1530,7 +1529,7 @@
 	struct pci_dev *pdev;
 
 	/* Find pci_dev container of dev */
-	pdev = container_of(parent, struct pci_dev, dev);
+	pdev = to_pci_dev(parent);
 
 	pci_free_consistent(pdev, size, vaddr, dma);
 }
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 0e2f43b..a2eec97 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -618,7 +618,6 @@
 
 	hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
 			      ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
-	hdq_data->hdq_usecount = 0;
 
 	/* Write followed by a read, release the module */
 	if (hdq_data->init_trans) {
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c9a7ff6..89a7847 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1147,7 +1147,6 @@
 			jremain = 1;
 		}
 
-		try_to_freeze();
 		__set_current_state(TASK_INTERRUPTIBLE);
 
 		/* hold list_mutex until after interruptible to prevent loosing
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0f6d851..86c2392 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1214,6 +1214,21 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called sbc_epx_c3.
 
+config INTEL_MEI_WDT
+	tristate "Intel MEI iAMT Watchdog"
+	depends on INTEL_MEI && X86
+	select WATCHDOG_CORE
+	---help---
+	  A device driver for the Intel MEI iAMT watchdog.
+
+	  The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
+	  Whenever the OS hangs or crashes, iAMT will send an event
+	  to any subscriber to this event. The watchdog doesn't reset the
+	  the platform.
+
+	  To compile this driver as a module, choose M here:
+	  the module will be called mei_wdt.
+
 # M32R Architecture
 
 # M68K Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f566753..efc4f78 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -126,6 +126,7 @@
 obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
 obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
 obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
+obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
 
 # M32R Architecture
 
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
new file mode 100644
index 0000000..630bd18
--- /dev/null
+++ b/drivers/watchdog/mei_wdt.c
@@ -0,0 +1,724 @@
+/*
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/watchdog.h>
+
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+
+/*
+ * iAMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "iamt_wdt"
+
+#define MEI_WDT_DEFAULT_TIMEOUT   120  /* seconds */
+#define MEI_WDT_MIN_TIMEOUT       120  /* seconds */
+#define MEI_WDT_MAX_TIMEOUT     65535  /* seconds */
+
+/* Commands */
+#define MEI_MANAGEMENT_CONTROL 0x02
+
+/* MEI Management Control version number */
+#define MEI_MC_VERSION_NUMBER  0x10
+
+/* Sub Commands */
+#define MEI_MC_START_WD_TIMER_REQ  0x13
+#define MEI_MC_START_WD_TIMER_RES  0x83
+#define   MEI_WDT_STATUS_SUCCESS 0
+#define   MEI_WDT_WDSTATE_NOT_REQUIRED 0x1
+#define MEI_MC_STOP_WD_TIMER_REQ   0x14
+
+/**
+ * enum mei_wdt_state - internal watchdog state
+ *
+ * @MEI_WDT_PROBE: wd in probing stage
+ * @MEI_WDT_IDLE: wd is idle and not opened
+ * @MEI_WDT_START: wd was opened, start was called
+ * @MEI_WDT_RUNNING: wd is expecting keep alive pings
+ * @MEI_WDT_STOPPING: wd is stopping and will move to IDLE
+ * @MEI_WDT_NOT_REQUIRED: wd device is not required
+ */
+enum mei_wdt_state {
+	MEI_WDT_PROBE,
+	MEI_WDT_IDLE,
+	MEI_WDT_START,
+	MEI_WDT_RUNNING,
+	MEI_WDT_STOPPING,
+	MEI_WDT_NOT_REQUIRED,
+};
+
+static const char *mei_wdt_state_str(enum mei_wdt_state state)
+{
+	switch (state) {
+	case MEI_WDT_PROBE:
+		return "PROBE";
+	case MEI_WDT_IDLE:
+		return "IDLE";
+	case MEI_WDT_START:
+		return "START";
+	case MEI_WDT_RUNNING:
+		return "RUNNING";
+	case MEI_WDT_STOPPING:
+		return "STOPPING";
+	case MEI_WDT_NOT_REQUIRED:
+		return "NOT_REQUIRED";
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * struct mei_wdt - mei watchdog driver
+ * @wdd: watchdog device
+ *
+ * @cldev: mei watchdog client device
+ * @state: watchdog internal state
+ * @resp_required: ping required response
+ * @response: ping response completion
+ * @unregister: unregister worker
+ * @reg_lock: watchdog device registration lock
+ * @timeout: watchdog current timeout
+ *
+ * @dbgfs_dir: debugfs dir entry
+ */
+struct mei_wdt {
+	struct watchdog_device wdd;
+
+	struct mei_cl_device *cldev;
+	enum mei_wdt_state state;
+	bool resp_required;
+	struct completion response;
+	struct work_struct unregister;
+	struct mutex reg_lock;
+	u16 timeout;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+/*
+ * struct mei_mc_hdr - Management Control Command Header
+ *
+ * @command: Management Control (0x2)
+ * @bytecount: Number of bytes in the message beyond this byte
+ * @subcommand: Management Control Subcommand
+ * @versionnumber: Management Control Version (0x10)
+ */
+struct mei_mc_hdr {
+	u8 command;
+	u8 bytecount;
+	u8 subcommand;
+	u8 versionnumber;
+};
+
+/**
+ * struct mei_wdt_start_request watchdog start/ping
+ *
+ * @hdr: Management Control Command Header
+ * @timeout: timeout value
+ * @reserved: reserved (legacy)
+ */
+struct mei_wdt_start_request {
+	struct mei_mc_hdr hdr;
+	u16 timeout;
+	u8 reserved[17];
+} __packed;
+
+/**
+ * struct mei_wdt_start_response watchdog start/ping response
+ *
+ * @hdr: Management Control Command Header
+ * @status: operation status
+ * @wdstate: watchdog status bit mask
+ */
+struct mei_wdt_start_response {
+	struct mei_mc_hdr hdr;
+	u8 status;
+	u8 wdstate;
+} __packed;
+
+/**
+ * struct mei_wdt_stop_request - watchdog stop
+ *
+ * @hdr: Management Control Command Header
+ */
+struct mei_wdt_stop_request {
+	struct mei_mc_hdr hdr;
+} __packed;
+
+/**
+ * mei_wdt_ping - send wd start/ping command
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 on success,
+ *         negative errno code on failure
+ */
+static int mei_wdt_ping(struct mei_wdt *wdt)
+{
+	struct mei_wdt_start_request req;
+	const size_t req_len = sizeof(req);
+	int ret;
+
+	memset(&req, 0, req_len);
+	req.hdr.command = MEI_MANAGEMENT_CONTROL;
+	req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
+	req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ;
+	req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
+	req.timeout = wdt->timeout;
+
+	ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * mei_wdt_stop - send wd stop command
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 on success,
+ *         negative errno code on failure
+ */
+static int mei_wdt_stop(struct mei_wdt *wdt)
+{
+	struct mei_wdt_stop_request req;
+	const size_t req_len = sizeof(req);
+	int ret;
+
+	memset(&req, 0, req_len);
+	req.hdr.command = MEI_MANAGEMENT_CONTROL;
+	req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
+	req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ;
+	req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
+
+	ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * mei_wdt_ops_start - wd start command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 on success or -ENODEV;
+ */
+static int mei_wdt_ops_start(struct watchdog_device *wdd)
+{
+	struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	wdt->state = MEI_WDT_START;
+	wdd->timeout = wdt->timeout;
+	return 0;
+}
+
+/**
+ * mei_wdt_ops_stop - wd stop command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_ops_stop(struct watchdog_device *wdd)
+{
+	struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+	int ret;
+
+	if (wdt->state != MEI_WDT_RUNNING)
+		return 0;
+
+	wdt->state = MEI_WDT_STOPPING;
+
+	ret = mei_wdt_stop(wdt);
+	if (ret)
+		return ret;
+
+	wdt->state = MEI_WDT_IDLE;
+
+	return 0;
+}
+
+/**
+ * mei_wdt_ops_ping - wd ping command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 if success, negative errno code on failure
+ */
+static int mei_wdt_ops_ping(struct watchdog_device *wdd)
+{
+	struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+	int ret;
+
+	if (wdt->state != MEI_WDT_START && wdt->state != MEI_WDT_RUNNING)
+		return 0;
+
+	if (wdt->resp_required)
+		init_completion(&wdt->response);
+
+	wdt->state = MEI_WDT_RUNNING;
+	ret = mei_wdt_ping(wdt);
+	if (ret)
+		return ret;
+
+	if (wdt->resp_required)
+		ret = wait_for_completion_killable(&wdt->response);
+
+	return ret;
+}
+
+/**
+ * mei_wdt_ops_set_timeout - wd set timeout command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ * @timeout: timeout value to set
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_ops_set_timeout(struct watchdog_device *wdd,
+				   unsigned int timeout)
+{
+
+	struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	/* valid value is already checked by the caller */
+	wdt->timeout = timeout;
+	wdd->timeout = timeout;
+
+	return 0;
+}
+
+static const struct watchdog_ops wd_ops = {
+	.owner       = THIS_MODULE,
+	.start       = mei_wdt_ops_start,
+	.stop        = mei_wdt_ops_stop,
+	.ping        = mei_wdt_ops_ping,
+	.set_timeout = mei_wdt_ops_set_timeout,
+};
+
+/* not const as the firmware_version field need to be retrieved */
+static struct watchdog_info wd_info = {
+	.identity = INTEL_AMT_WATCHDOG_ID,
+	.options  = WDIOF_KEEPALIVEPING |
+		    WDIOF_SETTIMEOUT |
+		    WDIOF_ALARMONLY,
+};
+
+/**
+ * __mei_wdt_is_registered - check if wdt is registered
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: true if the wdt is registered with the watchdog subsystem
+ * Locking: should be called under wdt->reg_lock
+ */
+static inline bool __mei_wdt_is_registered(struct mei_wdt *wdt)
+{
+	return !!watchdog_get_drvdata(&wdt->wdd);
+}
+
+/**
+ * mei_wdt_unregister - unregister from the watchdog subsystem
+ *
+ * @wdt: mei watchdog device
+ */
+static void mei_wdt_unregister(struct mei_wdt *wdt)
+{
+	mutex_lock(&wdt->reg_lock);
+
+	if (__mei_wdt_is_registered(wdt)) {
+		watchdog_unregister_device(&wdt->wdd);
+		watchdog_set_drvdata(&wdt->wdd, NULL);
+		memset(&wdt->wdd, 0, sizeof(wdt->wdd));
+	}
+
+	mutex_unlock(&wdt->reg_lock);
+}
+
+/**
+ * mei_wdt_register - register with the watchdog subsystem
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_register(struct mei_wdt *wdt)
+{
+	struct device *dev;
+	int ret;
+
+	if (!wdt || !wdt->cldev)
+		return -EINVAL;
+
+	dev = &wdt->cldev->dev;
+
+	mutex_lock(&wdt->reg_lock);
+
+	if (__mei_wdt_is_registered(wdt)) {
+		ret = 0;
+		goto out;
+	}
+
+	wdt->wdd.info = &wd_info;
+	wdt->wdd.ops = &wd_ops;
+	wdt->wdd.parent = dev;
+	wdt->wdd.timeout = MEI_WDT_DEFAULT_TIMEOUT;
+	wdt->wdd.min_timeout = MEI_WDT_MIN_TIMEOUT;
+	wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT;
+
+	watchdog_set_drvdata(&wdt->wdd, wdt);
+	ret = watchdog_register_device(&wdt->wdd);
+	if (ret) {
+		dev_err(dev, "unable to register watchdog device = %d.\n", ret);
+		watchdog_set_drvdata(&wdt->wdd, NULL);
+	}
+
+	wdt->state = MEI_WDT_IDLE;
+
+out:
+	mutex_unlock(&wdt->reg_lock);
+	return ret;
+}
+
+static void mei_wdt_unregister_work(struct work_struct *work)
+{
+	struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister);
+
+	mei_wdt_unregister(wdt);
+}
+
+/**
+ * mei_wdt_event_rx - callback for data receive
+ *
+ * @cldev: bus device
+ */
+static void mei_wdt_event_rx(struct mei_cl_device *cldev)
+{
+	struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+	struct mei_wdt_start_response res;
+	const size_t res_len = sizeof(res);
+	int ret;
+
+	ret = mei_cldev_recv(wdt->cldev, (u8 *)&res, res_len);
+	if (ret < 0) {
+		dev_err(&cldev->dev, "failure in recv %d\n", ret);
+		return;
+	}
+
+	/* Empty response can be sent on stop */
+	if (ret == 0)
+		return;
+
+	if (ret < sizeof(struct mei_mc_hdr)) {
+		dev_err(&cldev->dev, "recv small data %d\n", ret);
+		return;
+	}
+
+	if (res.hdr.command != MEI_MANAGEMENT_CONTROL ||
+	    res.hdr.versionnumber != MEI_MC_VERSION_NUMBER) {
+		dev_err(&cldev->dev, "wrong command received\n");
+		return;
+	}
+
+	if (res.hdr.subcommand != MEI_MC_START_WD_TIMER_RES) {
+		dev_warn(&cldev->dev, "unsupported command %d :%s[%d]\n",
+			 res.hdr.subcommand,
+			 mei_wdt_state_str(wdt->state),
+			 wdt->state);
+		return;
+	}
+
+	/* Run the unregistration in a worker as this can be
+	 * run only after ping completion, otherwise the flow will
+	 * deadlock on watchdog core mutex.
+	 */
+	if (wdt->state == MEI_WDT_RUNNING) {
+		if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
+			wdt->state = MEI_WDT_NOT_REQUIRED;
+			schedule_work(&wdt->unregister);
+		}
+		goto out;
+	}
+
+	if (wdt->state == MEI_WDT_PROBE) {
+		if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
+			wdt->state = MEI_WDT_NOT_REQUIRED;
+		} else {
+			/* stop the watchdog and register watchdog device */
+			mei_wdt_stop(wdt);
+			mei_wdt_register(wdt);
+		}
+		return;
+	}
+
+	dev_warn(&cldev->dev, "not in correct state %s[%d]\n",
+			 mei_wdt_state_str(wdt->state), wdt->state);
+
+out:
+	if (!completion_done(&wdt->response))
+		complete(&wdt->response);
+}
+
+/*
+ * mei_wdt_notify_event - callback for event notification
+ *
+ * @cldev: bus device
+ */
+static void mei_wdt_notify_event(struct mei_cl_device *cldev)
+{
+	struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+
+	if (wdt->state != MEI_WDT_NOT_REQUIRED)
+		return;
+
+	mei_wdt_register(wdt);
+}
+
+/**
+ * mei_wdt_event - callback for event receive
+ *
+ * @cldev: bus device
+ * @events: event mask
+ * @context: callback context
+ */
+static void mei_wdt_event(struct mei_cl_device *cldev,
+			  u32 events, void *context)
+{
+	if (events & BIT(MEI_CL_EVENT_RX))
+		mei_wdt_event_rx(cldev);
+
+	if (events & BIT(MEI_CL_EVENT_NOTIF))
+		mei_wdt_notify_event(cldev);
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
+					size_t cnt, loff_t *ppos)
+{
+	struct mei_wdt *wdt = file->private_data;
+	const size_t bufsz = 32;
+	char buf[32];
+	ssize_t pos;
+
+	mutex_lock(&wdt->reg_lock);
+	pos = scnprintf(buf, bufsz, "%s\n",
+		__mei_wdt_is_registered(wdt) ? "activated" : "deactivated");
+	mutex_unlock(&wdt->reg_lock);
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+}
+
+static const struct file_operations dbgfs_fops_activation = {
+	.open    = simple_open,
+	.read    = mei_dbgfs_read_activation,
+	.llseek  = generic_file_llseek,
+};
+
+static ssize_t mei_dbgfs_read_state(struct file *file, char __user *ubuf,
+				    size_t cnt, loff_t *ppos)
+{
+	struct mei_wdt *wdt = file->private_data;
+	const size_t bufsz = 32;
+	char buf[bufsz];
+	ssize_t pos;
+
+	pos = scnprintf(buf, bufsz, "state: %s\n",
+			 mei_wdt_state_str(wdt->state));
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+}
+
+static const struct file_operations dbgfs_fops_state = {
+	.open = simple_open,
+	.read = mei_dbgfs_read_state,
+	.llseek = generic_file_llseek,
+};
+
+static void dbgfs_unregister(struct mei_wdt *wdt)
+{
+	debugfs_remove_recursive(wdt->dbgfs_dir);
+	wdt->dbgfs_dir = NULL;
+}
+
+static int dbgfs_register(struct mei_wdt *wdt)
+{
+	struct dentry *dir, *f;
+
+	dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	wdt->dbgfs_dir = dir;
+	f = debugfs_create_file("state", S_IRUSR, dir, wdt, &dbgfs_fops_state);
+	if (!f)
+		goto err;
+
+	f = debugfs_create_file("activation",  S_IRUSR,
+				dir, wdt, &dbgfs_fops_activation);
+	if (!f)
+		goto err;
+
+	return 0;
+err:
+	dbgfs_unregister(wdt);
+	return -ENODEV;
+}
+
+#else
+
+static inline void dbgfs_unregister(struct mei_wdt *wdt) {}
+
+static inline int dbgfs_register(struct mei_wdt *wdt)
+{
+	return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int mei_wdt_probe(struct mei_cl_device *cldev,
+			 const struct mei_cl_device_id *id)
+{
+	struct mei_wdt *wdt;
+	int ret;
+
+	wdt = kzalloc(sizeof(struct mei_wdt), GFP_KERNEL);
+	if (!wdt)
+		return -ENOMEM;
+
+	wdt->timeout = MEI_WDT_DEFAULT_TIMEOUT;
+	wdt->state = MEI_WDT_PROBE;
+	wdt->cldev = cldev;
+	wdt->resp_required = mei_cldev_ver(cldev) > 0x1;
+	mutex_init(&wdt->reg_lock);
+	init_completion(&wdt->response);
+	INIT_WORK(&wdt->unregister, mei_wdt_unregister_work);
+
+	mei_cldev_set_drvdata(cldev, wdt);
+
+	ret = mei_cldev_enable(cldev);
+	if (ret < 0) {
+		dev_err(&cldev->dev, "Could not enable cl device\n");
+		goto err_out;
+	}
+
+	ret = mei_cldev_register_event_cb(wdt->cldev,
+					  BIT(MEI_CL_EVENT_RX) |
+					  BIT(MEI_CL_EVENT_NOTIF),
+					  mei_wdt_event, NULL);
+
+	/* on legacy devices notification is not supported
+	 * this doesn't fail the registration for RX event
+	 */
+	if (ret && ret != -EOPNOTSUPP) {
+		dev_err(&cldev->dev, "Could not register event ret=%d\n", ret);
+		goto err_disable;
+	}
+
+	wd_info.firmware_version = mei_cldev_ver(cldev);
+
+	if (wdt->resp_required)
+		ret = mei_wdt_ping(wdt);
+	else
+		ret = mei_wdt_register(wdt);
+
+	if (ret)
+		goto err_disable;
+
+	if (dbgfs_register(wdt))
+		dev_warn(&cldev->dev, "cannot register debugfs\n");
+
+	return 0;
+
+err_disable:
+	mei_cldev_disable(cldev);
+
+err_out:
+	kfree(wdt);
+
+	return ret;
+}
+
+static int mei_wdt_remove(struct mei_cl_device *cldev)
+{
+	struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+
+	/* Free the caller in case of fw initiated or unexpected reset */
+	if (!completion_done(&wdt->response))
+		complete(&wdt->response);
+
+	cancel_work_sync(&wdt->unregister);
+
+	mei_wdt_unregister(wdt);
+
+	mei_cldev_disable(cldev);
+
+	dbgfs_unregister(wdt);
+
+	kfree(wdt);
+
+	return 0;
+}
+
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+			    0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
+static struct mei_cl_device_id mei_wdt_tbl[] = {
+	{ .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY },
+	/* required last entry */
+	{ }
+};
+MODULE_DEVICE_TABLE(mei, mei_wdt_tbl);
+
+static struct mei_cl_driver mei_wdt_driver = {
+	.id_table = mei_wdt_tbl,
+	.name = KBUILD_MODNAME,
+
+	.probe = mei_wdt_probe,
+	.remove = mei_wdt_remove,
+};
+
+static int __init mei_wdt_init(void)
+{
+	int ret;
+
+	ret = mei_cldev_driver_register(&mei_wdt_driver);
+	if (ret) {
+		pr_err(KBUILD_MODNAME ": module registration failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+static void __exit mei_wdt_exit(void)
+{
+	mei_cldev_driver_unregister(&mei_wdt_driver);
+}
+
+module_init(mei_wdt_init);
+module_exit(mei_wdt_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog");
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6402eaf..bd01b92 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1040,28 +1040,6 @@
 /* PPPOX */
 COMPATIBLE_IOCTL(PPPOEIOCSFWD)
 COMPATIBLE_IOCTL(PPPOEIOCDFWD)
-/* ppdev */
-COMPATIBLE_IOCTL(PPSETMODE)
-COMPATIBLE_IOCTL(PPRSTATUS)
-COMPATIBLE_IOCTL(PPRCONTROL)
-COMPATIBLE_IOCTL(PPWCONTROL)
-COMPATIBLE_IOCTL(PPFCONTROL)
-COMPATIBLE_IOCTL(PPRDATA)
-COMPATIBLE_IOCTL(PPWDATA)
-COMPATIBLE_IOCTL(PPCLAIM)
-COMPATIBLE_IOCTL(PPRELEASE)
-COMPATIBLE_IOCTL(PPYIELD)
-COMPATIBLE_IOCTL(PPEXCL)
-COMPATIBLE_IOCTL(PPDATADIR)
-COMPATIBLE_IOCTL(PPNEGOT)
-COMPATIBLE_IOCTL(PPWCTLONIRQ)
-COMPATIBLE_IOCTL(PPCLRIRQ)
-COMPATIBLE_IOCTL(PPSETPHASE)
-COMPATIBLE_IOCTL(PPGETMODES)
-COMPATIBLE_IOCTL(PPGETMODE)
-COMPATIBLE_IOCTL(PPGETPHASE)
-COMPATIBLE_IOCTL(PPGETFLAGS)
-COMPATIBLE_IOCTL(PPSETFLAGS)
 /* Big A */
 /* sparc only */
 /* Big Q for sound/OSS */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 9006c4e..3d8dcdd 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -163,4 +163,13 @@
 #define module_amba_driver(__amba_drv) \
 	module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
 
+/*
+ * builtin_amba_driver() - Helper macro for drivers that don't do anything
+ * special in driver initcall.  This eliminates a lot of boilerplate.  Each
+ * driver may only use this macro once, and calling it replaces the instance
+ * device_initcall().
+ */
+#define builtin_amba_driver(__amba_drv) \
+	builtin_driver(__amba_drv, amba_driver_register)
+
 #endif
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
new file mode 100644
index 0000000..7d41026
--- /dev/null
+++ b/include/linux/coresight-pmu.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_CORESIGHT_PMU_H
+#define _LINUX_CORESIGHT_PMU_H
+
+#define CORESIGHT_ETM_PMU_NAME "cs_etm"
+#define CORESIGHT_ETM_PMU_SEED  0x10
+
+/* ETMv3.5/PTM's ETMCR config bit */
+#define ETM_OPT_CYCACC  12
+#define ETM_OPT_TS      28
+
+static inline int coresight_get_trace_id(int cpu)
+{
+	/*
+	 * A trace ID of value 0 is invalid, so let's start at some
+	 * random value that fits in 7 bits and go from there.  Since
+	 * the common convention is to have data trace IDs be I(N) + 1,
+	 * set instruction trace IDs as a function of the CPU number.
+	 */
+	return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
+}
+
+#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a7cabfa2..385d62e 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
 #define _LINUX_CORESIGHT_H
 
 #include <linux/device.h>
+#include <linux/perf_event.h>
 #include <linux/sched.h>
 
 /* Peripheral id registers (0xFD0-0xFEC) */
@@ -152,7 +153,6 @@
 		by @coresight_ops.
  * @dev:	The device entity associated to this component.
  * @refcnt:	keep track of what is in use.
- * @path_link:	link of current component into the path being enabled.
  * @orphan:	true if the component has connections that haven't been linked.
  * @enable:	'true' if component is currently part of an active path.
  * @activated:	'true' only if a _sink_ has been activated.  A sink can be
@@ -168,7 +168,6 @@
 	const struct coresight_ops *ops;
 	struct device dev;
 	atomic_t *refcnt;
-	struct list_head path_link;
 	bool orphan;
 	bool enable;	/* true only if configured as part of a path */
 	bool activated;	/* true only if a sink is part of a path */
@@ -183,12 +182,29 @@
 /**
  * struct coresight_ops_sink - basic operations for a sink
  * Operations available for sinks
- * @enable:	enables the sink.
- * @disable:	disables the sink.
+ * @enable:		enables the sink.
+ * @disable:		disables the sink.
+ * @alloc_buffer:	initialises perf's ring buffer for trace collection.
+ * @free_buffer:	release memory allocated in @get_config.
+ * @set_buffer:		initialises buffer mechanic before a trace session.
+ * @reset_buffer:	finalises buffer mechanic after a trace session.
+ * @update_buffer:	update buffer pointers after a trace session.
  */
 struct coresight_ops_sink {
-	int (*enable)(struct coresight_device *csdev);
+	int (*enable)(struct coresight_device *csdev, u32 mode);
 	void (*disable)(struct coresight_device *csdev);
+	void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
+			      void **pages, int nr_pages, bool overwrite);
+	void (*free_buffer)(void *config);
+	int (*set_buffer)(struct coresight_device *csdev,
+			  struct perf_output_handle *handle,
+			  void *sink_config);
+	unsigned long (*reset_buffer)(struct coresight_device *csdev,
+				      struct perf_output_handle *handle,
+				      void *sink_config, bool *lost);
+	void (*update_buffer)(struct coresight_device *csdev,
+			      struct perf_output_handle *handle,
+			      void *sink_config);
 };
 
 /**
@@ -205,14 +221,18 @@
 /**
  * struct coresight_ops_source - basic operations for a source
  * Operations available for sources.
+ * @cpu_id:	returns the value of the CPU number this component
+ *		is associated to.
  * @trace_id:	returns the value of the component's trace ID as known
-		to the HW.
+ *		to the HW.
  * @enable:	enables tracing for a source.
  * @disable:	disables tracing for a source.
  */
 struct coresight_ops_source {
+	int (*cpu_id)(struct coresight_device *csdev);
 	int (*trace_id)(struct coresight_device *csdev);
-	int (*enable)(struct coresight_device *csdev);
+	int (*enable)(struct coresight_device *csdev,
+		      struct perf_event_attr *attr,  u32 mode);
 	void (*disable)(struct coresight_device *csdev);
 };
 
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 0679181..885f587 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -3,16 +3,25 @@
  * platform description for 93xx46 EEPROMs.
  */
 
+struct gpio_desc;
+
 struct eeprom_93xx46_platform_data {
 	unsigned char	flags;
 #define EE_ADDR8	0x01		/*  8 bit addr. cfg */
 #define EE_ADDR16	0x02		/* 16 bit addr. cfg */
 #define EE_READONLY	0x08		/* forbid writing */
 
+	unsigned int	quirks;
+/* Single word read transfers only; no sequential read. */
+#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ		(1 << 0)
+/* Instructions such as EWEN are (addrlen + 2) in length. */
+#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH		(1 << 1)
+
 	/*
 	 * optional hooks to control additional logic
 	 * before and after spi transfer.
 	 */
 	void (*prepare)(void *);
 	void (*finish)(void *);
+	struct gpio_desc *select;
 };
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index fa05e04..d8414502 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,6 +97,12 @@
 	FILEID_FAT_WITH_PARENT = 0x72,
 
 	/*
+	 * 128 bit child FID (struct lu_fid)
+	 * 128 bit parent FID (struct lu_fid)
+	 */
+	FILEID_LUSTRE = 0x97,
+
+	/*
 	 * Filesystems must not use 0xff file ID.
 	 */
 	FILEID_INVALID = 0xff,
diff --git a/include/linux/fence.h b/include/linux/fence.h
index bb52201..605bd88 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -79,6 +79,8 @@
 	unsigned long flags;
 	ktime_t timestamp;
 	int status;
+	struct list_head child_list;
+	struct list_head active_list;
 };
 
 enum fence_flag_bits {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 753dbad..d23dab0 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -235,6 +235,7 @@
 #define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
 #define VMBUS_CHANNEL_PARENT_OFFER			0x200
 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER		0x2000
 
 struct vmpacket_descriptor {
 	u16 type;
@@ -391,6 +392,10 @@
 	CHANNELMSG_VERSION_RESPONSE		= 15,
 	CHANNELMSG_UNLOAD			= 16,
 	CHANNELMSG_UNLOAD_RESPONSE		= 17,
+	CHANNELMSG_18				= 18,
+	CHANNELMSG_19				= 19,
+	CHANNELMSG_20				= 20,
+	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
 	CHANNELMSG_COUNT
 };
 
@@ -561,6 +566,13 @@
 	u64 monitor_page2;
 } __packed;
 
+/* Hyper-V socket: guest's connect()-ing to host */
+struct vmbus_channel_tl_connect_request {
+	struct vmbus_channel_message_header header;
+	uuid_le guest_endpoint_id;
+	uuid_le host_service_id;
+} __packed;
+
 struct vmbus_channel_version_response {
 	struct vmbus_channel_message_header header;
 	u8 version_supported;
@@ -633,6 +645,32 @@
 	HV_SIGNAL_POLICY_EXPLICIT,
 };
 
+enum vmbus_device_type {
+	HV_IDE = 0,
+	HV_SCSI,
+	HV_FC,
+	HV_NIC,
+	HV_ND,
+	HV_PCIE,
+	HV_FB,
+	HV_KBD,
+	HV_MOUSE,
+	HV_KVP,
+	HV_TS,
+	HV_HB,
+	HV_SHUTDOWN,
+	HV_FCOPY,
+	HV_BACKUP,
+	HV_DM,
+	HV_UNKOWN,
+};
+
+struct vmbus_device {
+	u16  dev_type;
+	uuid_le guid;
+	bool perf_device;
+};
+
 struct vmbus_channel {
 	/* Unique channel id */
 	int id;
@@ -728,6 +766,12 @@
 	void (*sc_creation_callback)(struct vmbus_channel *new_sc);
 
 	/*
+	 * Channel rescind callback. Some channels (the hvsock ones), need to
+	 * register a callback which is invoked in vmbus_onoffer_rescind().
+	 */
+	void (*chn_rescind_callback)(struct vmbus_channel *channel);
+
+	/*
 	 * The spinlock to protect the structure. It is being used to protect
 	 * test-and-set access to various attributes of the structure as well
 	 * as all sc_list operations.
@@ -767,8 +811,30 @@
 	 * signaling control.
 	 */
 	enum hv_signal_policy  signal_policy;
+	/*
+	 * On the channel send side, many of the VMBUS
+	 * device drivers explicity serialize access to the
+	 * outgoing ring buffer. Give more control to the
+	 * VMBUS device drivers in terms how to serialize
+	 * accesss to the outgoing ring buffer.
+	 * The default behavior will be to aquire the
+	 * ring lock to preserve the current behavior.
+	 */
+	bool acquire_ring_lock;
+
 };
 
+static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
+{
+	c->acquire_ring_lock = state;
+}
+
+static inline bool is_hvsock_channel(const struct vmbus_channel *c)
+{
+	return !!(c->offermsg.offer.chn_flags &
+		  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
 static inline void set_channel_signal_state(struct vmbus_channel *c,
 					    enum hv_signal_policy policy)
 {
@@ -790,6 +856,12 @@
 	return c->per_channel_state;
 }
 
+static inline void set_channel_pending_send_size(struct vmbus_channel *c,
+						 u32 size)
+{
+	c->outbound.ring_buffer->pending_send_sz = size;
+}
+
 void vmbus_onmessage(void *context);
 
 int vmbus_request_offers(void);
@@ -801,6 +873,9 @@
 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
 			void (*sc_cr_cb)(struct vmbus_channel *new_sc));
 
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+		void (*chn_rescind_cb)(struct vmbus_channel *));
+
 /*
  * Retrieve the (sub) channel on which to send an outgoing request.
  * When a primary channel has multiple sub-channels, we choose a
@@ -940,6 +1015,20 @@
 struct hv_driver {
 	const char *name;
 
+	/*
+	 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
+	 * channel flag, actually doesn't mean a synthetic device because the
+	 * offer's if_type/if_instance can change for every new hvsock
+	 * connection.
+	 *
+	 * However, to facilitate the notification of new-offer/rescind-offer
+	 * from vmbus driver to hvsock driver, we can handle hvsock offer as
+	 * a special vmbus device, and hence we need the below flag to
+	 * indicate if the driver is the hvsock driver or not: we need to
+	 * specially treat the hvosck offer & driver in vmbus_match().
+	 */
+	bool hvsock;
+
 	/* the device type supported by this driver */
 	uuid_le dev_type;
 	const struct hv_vmbus_device_id *id_table;
@@ -959,6 +1048,8 @@
 
 	/* the device instance id of this device */
 	uuid_le dev_instance;
+	u16 vendor_id;
+	u16 device_id;
 
 	struct device device;
 
@@ -994,6 +1085,8 @@
 					 const char *mod_name);
 void vmbus_driver_unregister(struct hv_driver *hv_driver);
 
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
+
 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
 			resource_size_t min, resource_size_t max,
 			resource_size_t size, resource_size_t align,
@@ -1242,4 +1335,6 @@
 
 extern __u32 vmbus_proto_version;
 
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+				  const uuid_le *shv_host_servie_id);
 #endif /* _HYPERV_H */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2fe939c..6670c3d 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -119,6 +119,8 @@
  * @addr: address of the register.
  * @mask_int1: mask to enable/disable IRQ on INT1 pin.
  * @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * @addr_ihl: address to enable/disable active low on the INT lines.
+ * @mask_ihl: mask to enable/disable active low on the INT lines.
  * struct ig1 - represents the Interrupt Generator 1 of sensors.
  * @en_addr: address of the enable ig1 register.
  * @en_mask: mask to write the on/off value for enable.
@@ -127,6 +129,8 @@
 	u8 addr;
 	u8 mask_int1;
 	u8 mask_int2;
+	u8 addr_ihl;
+	u8 mask_ihl;
 	struct {
 		u8 en_addr;
 		u8 en_mask;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b589411..ce9e9c1 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -448,7 +448,7 @@
  * @buffer:		[DRIVER] any buffer present
  * @buffer_list:	[INTERN] list of all buffers currently attached
  * @scan_bytes:		[INTERN] num bytes captured to be fed to buffer demux
- * @mlock:		[INTERN] lock used to prevent simultaneous device state
+ * @mlock:		[DRIVER] lock used to prevent simultaneous device state
  *			changes
  * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
  * @masklength:		[INTERN] the length of the mask established from
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 04579d9..0934d06 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -74,6 +74,7 @@
 	DOMAIN_BUS_PCI_MSI,
 	DOMAIN_BUS_PLATFORM_MSI,
 	DOMAIN_BUS_NEXUS,
+	DOMAIN_BUS_FSL_MC_MSI,
 };
 
 /**
diff --git a/include/linux/msi.h b/include/linux/msi.h
index a2a0068..8b425c6 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -33,6 +33,14 @@
 };
 
 /**
+ * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
+ * @msi_index:		The index of the MSI descriptor
+ */
+struct fsl_mc_msi_desc {
+	u16				msi_index;
+};
+
+/**
  * struct msi_desc - Descriptor structure for MSI based interrupts
  * @list:	List head for management
  * @irq:	The base interrupt number
@@ -87,6 +95,7 @@
 		 * tree wide cleanup.
 		 */
 		struct platform_msi_desc platform;
+		struct fsl_mc_msi_desc fsl_mc;
 	};
 };
 
diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h
new file mode 100644
index 0000000..7bd8ed7
--- /dev/null
+++ b/include/linux/platform_data/ad5761.h
@@ -0,0 +1,44 @@
+/*
+ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
+ *
+ * Copyright 2016 Qtechnology A/S
+ * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD5761_H__
+#define __LINUX_PLATFORM_DATA_AD5761_H__
+
+/**
+ * enum ad5761_voltage_range - Voltage range the AD5761 is configured for.
+ * @AD5761_VOLTAGE_RANGE_M10V_10V:  -10V to  10V
+ * @AD5761_VOLTAGE_RANGE_0V_10V:      0V to  10V
+ * @AD5761_VOLTAGE_RANGE_M5V_5V:     -5V to   5V
+ * @AD5761_VOLTAGE_RANGE_0V_5V:       0V to   5V
+ * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V
+ * @AD5761_VOLTAGE_RANGE_M3V_3V:     -3V to   3V
+ * @AD5761_VOLTAGE_RANGE_0V_16V:      0V to  16V
+ * @AD5761_VOLTAGE_RANGE_0V_20V:      0V to  20V
+ */
+
+enum ad5761_voltage_range {
+	AD5761_VOLTAGE_RANGE_M10V_10V,
+	AD5761_VOLTAGE_RANGE_0V_10V,
+	AD5761_VOLTAGE_RANGE_M5V_5V,
+	AD5761_VOLTAGE_RANGE_0V_5V,
+	AD5761_VOLTAGE_RANGE_M2V5_7V5,
+	AD5761_VOLTAGE_RANGE_M3V_3V,
+	AD5761_VOLTAGE_RANGE_0V_16V,
+	AD5761_VOLTAGE_RANGE_0V_20V,
+};
+
+/**
+ * struct ad5761_platform_data - AD5761 DAC driver platform data
+ * @voltage_range: Voltage range the AD5761 is configured for
+ */
+
+struct ad5761_platform_data {
+	enum ad5761_voltage_range voltage_range;
+};
+
+#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 9d0083d..1a79ed8 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -67,6 +67,16 @@
  * description. That is, the lowest master that can be allocated to software
  * writers is @sw_start and data from this writer will appear is @sw_start
  * master in the STP stream.
+ *
+ * The @packet callback should adhere to the following rules:
+ *   1) it must return the number of bytes it consumed from the payload;
+ *   2) therefore, if it sent a packet that does not have payload (like FLAG),
+ *      it must return zero;
+ *   3) if it does not support the requested packet type/flag combination,
+ *      it must return -ENOTSUPP.
+ *
+ * The @unlink callback is called when there are no more active writers so
+ * that the master/channel can be quiesced.
  */
 struct stm_data {
 	const char		*name;
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 65ac54c..1bd31a3 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -734,6 +734,41 @@
 }
 
 /*
+ * Helper to read a value from a head or tail pointer. For X86_32, the
+ * pointer is treated as a 32bit value, since the pointer value
+ * never exceeds a 32bit value in this case. Also, doing an
+ * atomic64_read on X86_32 uniprocessor systems may be implemented
+ * as a non locked cmpxchg8b, that may end up overwriting updates done
+ * by the VMCI device to the memory location. On 32bit SMP, the lock
+ * prefix will be used, so correctness isn't an issue, but using a
+ * 64bit operation still adds unnecessary overhead.
+ */
+static inline u64 vmci_q_read_pointer(atomic64_t *var)
+{
+#if defined(CONFIG_X86_32)
+	return atomic_read((atomic_t *)var);
+#else
+	return atomic64_read(var);
+#endif
+}
+
+/*
+ * Helper to set the value of a head or tail pointer. For X86_32, the
+ * pointer is treated as a 32bit value, since the pointer value
+ * never exceeds a 32bit value in this case. On 32bit SMP, using a
+ * locked cmpxchg8b adds unnecessary overhead.
+ */
+static inline void vmci_q_set_pointer(atomic64_t *var,
+				      u64 new_val)
+{
+#if defined(CONFIG_X86_32)
+	return atomic_set((atomic_t *)var, (u32)new_val);
+#else
+	return atomic64_set(var, new_val);
+#endif
+}
+
+/*
  * Helper to add a given offset to a head or tail pointer. Wraps the
  * value of the pointer around the max size of the queue.
  */
@@ -741,14 +776,14 @@
 				       size_t add,
 				       u64 size)
 {
-	u64 new_val = atomic64_read(var);
+	u64 new_val = vmci_q_read_pointer(var);
 
 	if (new_val >= size - add)
 		new_val -= size;
 
 	new_val += add;
 
-	atomic64_set(var, new_val);
+	vmci_q_set_pointer(var, new_val);
 }
 
 /*
@@ -758,7 +793,7 @@
 vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
 {
 	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
-	return atomic64_read(&qh->producer_tail);
+	return vmci_q_read_pointer(&qh->producer_tail);
 }
 
 /*
@@ -768,7 +803,7 @@
 vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
 {
 	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
-	return atomic64_read(&qh->consumer_head);
+	return vmci_q_read_pointer(&qh->consumer_head);
 }
 
 /*
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 7c63bd6..c077617 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -37,6 +37,7 @@
 	IIO_VELOCITY,
 	IIO_CONCENTRATION,
 	IIO_RESISTANCE,
+	IIO_PH,
 };
 
 enum iio_modifier {
diff --git a/lib/devres.c b/lib/devres.c
index 8c85672..cb1464c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -236,7 +236,7 @@
 
 static void pcim_iomap_release(struct device *gendev, void *res)
 {
-	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+	struct pci_dev *dev = to_pci_dev(gendev);
 	struct pcim_iomap_devres *this = res;
 	int i;
 
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 024a11a..0d8bd29 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -1,6 +1,6 @@
 #!/bin/sh
 # Before running this script please ensure that your PATH is
-# typical as you use for compilation/istallation. I use
+# typical as you use for compilation/installation. I use
 # /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may
 # differ on your system.
 #
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index a8ab795..a8c4644 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -5,6 +5,8 @@
 WARNINGS = -Wall -Wextra
 CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
 
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+
 all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
 %: %.c
 	$(CC) $(CFLAGS) -o $@ $^