Merge branch 'master' of git://gitorious.org/linux-can/linux-can-next
diff --git a/Documentation/devicetree/bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt
index 141087c..fd2bd56 100644
--- a/Documentation/devicetree/bindings/gpio/led.txt
+++ b/Documentation/devicetree/bindings/gpio/led.txt
@@ -7,9 +7,9 @@
node's name represents the name of the corresponding LED.
LED sub-node properties:
-- gpios : Should specify the LED's GPIO, see "Specifying GPIO information
- for devices" in Documentation/devicetree/booting-without-of.txt. Active
- low LEDs should be indicated using flags in the GPIO specifier.
+- gpios : Should specify the LED's GPIO, see "gpios property" in
+ Documentation/devicetree/gpio.txt. Active low LEDs should be
+ indicated using flags in the GPIO specifier.
- label : (optional) The label for this LED. If omitted, the label is
taken from the node name (excluding the unit address).
- linux,default-trigger : (optional) This parameter, if present, is a
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ecc6a6c..a20008a 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -30,6 +30,7 @@
nintendo Nintendo
nvidia NVIDIA
nxp NXP Semiconductors
+picochip Picochip Ltd
powervr Imagination Technologies
qcom Qualcomm, Inc.
ramtron Ramtron International
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index a22ecf4..52729a7 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -7,21 +7,29 @@
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf
- * IDT TSE2002B3, TS3000B3
- Prefix: 'tse2002b3', 'ts3000b3'
+ * Atmel AT30TS00
+ Prefix: 'at30ts00'
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
- http://www.idt.com/products/getdoc.cfm?docid=18715691
- http://www.idt.com/products/getdoc.cfm?docid=18715692
+ http://www.atmel.com/Images/doc8585.pdf
+ * IDT TSE2002B3, TSE2002GB2, TS3000B3, TS3000GB2
+ Prefix: 'tse2002', 'ts3000'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+ http://www.idt.com/sites/default/files/documents/IDT_TSE2002B3C_DST_20100512_120303152056.pdf
+ http://www.idt.com/sites/default/files/documents/IDT_TSE2002GB2A1_DST_20111107_120303145914.pdf
+ http://www.idt.com/sites/default/files/documents/IDT_TS3000B3A_DST_20101129_120303152013.pdf
+ http://www.idt.com/sites/default/files/documents/IDT_TS3000GB2A1_DST_20111104_120303151012.pdf
* Maxim MAX6604
Prefix: 'max6604'
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
- * Microchip MCP9805, MCP98242, MCP98243, MCP9843
- Prefixes: 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
+ * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843
+ Prefixes: 'mcp9804', 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
+ http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
@@ -48,6 +56,12 @@
Datasheets:
http://www.st.com/stonline/products/literature/ds/13447/stts424.pdf
http://www.st.com/stonline/products/literature/ds/13448/stts424e02.pdf
+ * ST Microelectronics STTS2002, STTS3000
+ Prefix: 'stts2002', 'stts3000'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+ http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATASHEET/CD00225278.pdf
+ http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATA_BRIEF/CD00270920.pdf
* JEDEC JC 42.4 compliant temperature sensor chips
Prefix: 'jc42'
Addresses scanned: I2C 0x18 - 0x1f
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index f274c28..2f95308 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -13,7 +13,8 @@
All ALPS touchpads should respond to the "E6 report" command sequence:
E8-E6-E6-E6-E9. An ALPS touchpad should respond with either 00-00-0A or
-00-00-64.
+00-00-64 if no buttons are pressed. The bits 0-2 of the first byte will be 1s
+if some buttons are pressed.
If the E6 report is successful, the touchpad model is identified using the "E7
report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
diff --git a/MAINTAINERS b/MAINTAINERS
index dd6decd..209a3862 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -962,7 +962,7 @@
F: drivers/platform/msm/
F: drivers/*/pm8???-*
F: include/linux/mfd/pm8xxx/
-T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
S: Maintained
ARM/TOSA MACHINE SUPPORT
@@ -5317,6 +5317,17 @@
S: Maintained
F: drivers/block/ps3vram.c
+PTP HARDWARE CLOCK SUPPORT
+M: Richard Cochran <richardcochran@gmail.com>
+S: Maintained
+W: http://linuxptp.sourceforge.net/
+F: Documentation/ABI/testing/sysfs-ptp
+F: Documentation/ptp/*
+F: drivers/net/gianfar_ptp.c
+F: drivers/net/phy/dp83640*
+F: drivers/ptp/*
+F: include/linux/ptp_cl*
+
PTRACE SUPPORT
M: Roland McGrath <roland@redhat.com>
M: Oleg Nesterov <oleg@redhat.com>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a48aecc..dfb0312 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1280,7 +1280,7 @@
depends on CPU_V7
help
This option enables the workaround for the 743622 Cortex-A9
- (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+ (r2p*) erratum. Under very rare conditions, a faulty
optimisation in the Cortex-A9 Store Buffer may lead to data
corruption. This workaround sets a specific bit in the diagnostic
register of the Cortex-A9 which disables the Store Buffer
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index ce1c5ff..3c79f85 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -3,3 +3,4 @@
xipImage
bootpImage
uImage
+*.dtb
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b5a5be2..90114fa 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -134,7 +134,7 @@
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow);
+ int idx);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index 4dd0eda..1651d49 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -242,6 +242,7 @@
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
+ vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf..b2abfa1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
@@ -232,7 +226,7 @@
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -518,7 +512,13 @@
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -680,6 +680,28 @@
}
/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+ .notifier_call = pmu_cpu_notify,
+};
+
+/*
* CPU PMU identification and registration.
*/
static int __init
@@ -730,6 +752,7 @@
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
+ register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be99..b78af0c 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -513,7 +496,8 @@
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
@@ -524,7 +508,7 @@
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244..4d7095a 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ isb();
+
return idx;
}
@@ -955,6 +960,10 @@
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@@ -963,7 +972,7 @@
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d82..71a21e6 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -592,11 +595,14 @@
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -663,7 +669,7 @@
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- unsigned long flags, ien, evtsel;
+ unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index b7582dd..96e2adc 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -38,10 +38,6 @@
#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
static u64 hdmac_dmamask = DMA_BIT_MASK(32);
-static struct at_dma_platform_data atdma_pdata = {
- .nr_channels = 8,
-};
-
static struct resource hdmac_resources[] = {
[0] = {
.start = AT91SAM9G45_BASE_DMA,
@@ -56,12 +52,11 @@
};
static struct platform_device at_hdmac_device = {
- .name = "at_hdmac",
+ .name = "at91sam9g45_dma",
.id = -1,
.dev = {
.dma_mask = &hdmac_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &atdma_pdata,
},
.resource = hdmac_resources,
.num_resources = ARRAY_SIZE(hdmac_resources),
@@ -69,9 +64,15 @@
void __init at91_add_device_hdmac(void)
{
- dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
- dma_cap_set(DMA_SLAVE, atdma_pdata.cap_mask);
- platform_device_register(&at_hdmac_device);
+#if defined(CONFIG_OF)
+ struct device_node *of_node =
+ of_find_node_by_name(NULL, "dma-controller");
+
+ if (of_node)
+ of_node_put(of_node);
+ else
+#endif
+ platform_device_register(&at_hdmac_device);
}
#else
void __init at91_add_device_hdmac(void) {}
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 61908dc..9be71c11 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -33,10 +33,6 @@
#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
static u64 hdmac_dmamask = DMA_BIT_MASK(32);
-static struct at_dma_platform_data atdma_pdata = {
- .nr_channels = 2,
-};
-
static struct resource hdmac_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_DMA,
@@ -51,12 +47,11 @@
};
static struct platform_device at_hdmac_device = {
- .name = "at_hdmac",
+ .name = "at91sam9rl_dma",
.id = -1,
.dev = {
.dma_mask = &hdmac_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &atdma_pdata,
},
.resource = hdmac_resources,
.num_resources = ARRAY_SIZE(hdmac_resources),
@@ -64,7 +59,6 @@
void __init at91_add_device_hdmac(void)
{
- dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
platform_device_register(&at_hdmac_device);
}
#else
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index d5fb44f..d67d0b4 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -34,6 +34,7 @@
#include <mach/ep93xx_spi.h>
#include <mach/gpio-ep93xx.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
@@ -361,6 +362,7 @@
.atag_offset = 0x100,
.map_io = vision_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = vision_init_machine,
.restart = ep93xx_restart,
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 0fc65ff..3893995 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/mfd/max8998.h>
#include <linux/regulator/machine.h>
@@ -595,6 +596,7 @@
.threshold = 0x28,
.voltage = 2800000, /* 2.8V */
.orient = MXT_DIAGONAL,
+ .irqflags = IRQF_TRIGGER_FALLING,
};
static struct i2c_board_info i2c3_devs[] __initdata = {
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 6c58266..719ee42 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -343,6 +343,7 @@
case 0xb944:
omap_revision = AM335X_REV_ES1_0;
*cpu_rev = "1.0";
+ break;
case 0xb8f2:
switch (rev) {
case 0:
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 2cc1aa0..415a6f1 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -420,8 +420,7 @@
platform_driver_unregister(&omap2_mbox_driver);
}
-/* must be ready before omap3isp is probed */
-subsys_initcall(omap2_mbox_init);
+module_init(omap2_mbox_init);
module_exit(omap2_mbox_exit);
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index b882204..ac49384 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -150,7 +150,8 @@
platform_device_put(omap_iommu_pdev[i]);
return err;
}
-module_init(omap_iommu_init);
+/* must be ready before omap3isp is probed */
+subsys_initcall(omap_iommu_init);
static void __exit omap_iommu_exit(void)
{
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index ebc5950..70de277 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -31,6 +31,7 @@
#include "common.h"
#include "omap4-sar-layout.h"
+#include <linux/export.h>
#ifdef CONFIG_CACHE_L2X0
static void __iomem *l2cache_base;
@@ -55,6 +56,7 @@
isb();
}
}
+EXPORT_SYMBOL(omap_bus_sync);
/* Steal one page physical memory for barrier implementation */
int __init omap_barrier_reserve_memblock(void)
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 10b20c6..4b57757 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -270,7 +270,6 @@
.constraints = {
.min_uV = 3300000,
.max_uV = 3300000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 0d729e6..42d5cca 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -49,7 +49,6 @@
#endif
extern struct syscore_ops pxa_irq_syscore_ops;
-extern struct syscore_ops pxa_gpio_syscore_ops;
extern struct syscore_ops pxa2xx_mfp_syscore_ops;
extern struct syscore_ops pxa3xx_mfp_syscore_ops;
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index f147755..29b62af 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -226,6 +226,12 @@
{
int i;
+ /* running before pxa_gpio_probe() */
+#ifdef CONFIG_CPU_PXA26x
+ pxa_last_gpio = 89;
+#else
+ pxa_last_gpio = 84;
+#endif
for (i = 0; i <= pxa_last_gpio; i++)
gpio_desc[i].valid = 1;
@@ -295,6 +301,7 @@
{
int i, gpio;
+ pxa_last_gpio = 120; /* running before pxa_gpio_probe() */
for (i = 0; i <= pxa_last_gpio; i++) {
/* skip GPIO2, 5, 6, 7, 8, they are not
* valid pins allow configuration
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 00d6eac..3352b37 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -208,6 +208,7 @@
INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"),
INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"),
INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL),
+ INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
};
static struct clk_lookup pxa25x_hwuart_clkreg =
@@ -367,7 +368,6 @@
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
- register_syscore_ops(&pxa_gpio_syscore_ops);
register_syscore_ops(&pxa2xx_clock_syscore_ops);
ret = platform_add_devices(pxa25x_devices,
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index c1673b3..6bce78e 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -229,6 +229,7 @@
INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
+ INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
};
#ifdef CONFIG_PM
@@ -455,7 +456,6 @@
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
- register_syscore_ops(&pxa_gpio_syscore_ops);
register_syscore_ops(&pxa2xx_clock_syscore_ops);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 4f402af..3918a67 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -462,7 +462,6 @@
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa3xx_mfp_syscore_ops);
- register_syscore_ops(&pxa_gpio_syscore_ops);
register_syscore_ops(&pxa3xx_clock_syscore_ops);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c
index d082a58..5ce434b 100644
--- a/arch/arm/mach-pxa/pxa95x.c
+++ b/arch/arm/mach-pxa/pxa95x.c
@@ -283,7 +283,6 @@
return ret;
register_syscore_ops(&pxa_irq_syscore_ops);
- register_syscore_ops(&pxa_gpio_syscore_ops);
register_syscore_ops(&pxa3xx_clock_syscore_ops);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-s3c2440/common.h b/arch/arm/mach-s3c2440/common.h
index db8a98a..0c1eb1d 100644
--- a/arch/arm/mach-s3c2440/common.h
+++ b/arch/arm/mach-s3c2440/common.h
@@ -12,6 +12,6 @@
#ifndef __ARCH_ARM_MACH_S3C2440_COMMON_H
#define __ARCH_ARM_MACH_S3C2440_COMMON_H
-void s3c2440_restart(char mode, const char *cmd);
+void s3c244x_restart(char mode, const char *cmd);
#endif /* __ARCH_ARM_MACH_S3C2440_COMMON_H */
diff --git a/arch/arm/mach-s3c2440/mach-anubis.c b/arch/arm/mach-s3c2440/mach-anubis.c
index 2456955..19b577b 100644
--- a/arch/arm/mach-s3c2440/mach-anubis.c
+++ b/arch/arm/mach-s3c2440/mach-anubis.c
@@ -487,5 +487,5 @@
.init_machine = anubis_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c
index d6a9763..d7ae49c 100644
--- a/arch/arm/mach-s3c2440/mach-at2440evb.c
+++ b/arch/arm/mach-s3c2440/mach-at2440evb.c
@@ -222,5 +222,5 @@
.init_machine = at2440evb_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 5859e60..9a4a5bc 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -601,5 +601,5 @@
.init_irq = s3c24xx_init_irq,
.init_machine = gta02_machine_init,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index adbbb85..5d66fb2 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -701,5 +701,5 @@
.init_machine = mini2440_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-nexcoder.c b/arch/arm/mach-s3c2440/mach-nexcoder.c
index 40eaf84..5198e3e 100644
--- a/arch/arm/mach-s3c2440/mach-nexcoder.c
+++ b/arch/arm/mach-s3c2440/mach-nexcoder.c
@@ -158,5 +158,5 @@
.init_machine = nexcoder_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index 4c480ef..c5daeb6 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -436,5 +436,5 @@
.init_irq = s3c24xx_init_irq,
.init_machine = osiris_init,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index 80077f6..6f68abf 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -822,5 +822,5 @@
.init_irq = s3c24xx_init_irq,
.init_machine = rx1950_init_machine,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx3715.c b/arch/arm/mach-s3c2440/mach-rx3715.c
index 20103ba..56af354 100644
--- a/arch/arm/mach-s3c2440/mach-rx3715.c
+++ b/arch/arm/mach-s3c2440/mach-rx3715.c
@@ -213,5 +213,5 @@
.init_irq = rx3715_init_irq,
.init_machine = rx3715_init_machine,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-smdk2440.c b/arch/arm/mach-s3c2440/mach-smdk2440.c
index 1deb60d..83a1036 100644
--- a/arch/arm/mach-s3c2440/mach-smdk2440.c
+++ b/arch/arm/mach-s3c2440/mach-smdk2440.c
@@ -183,5 +183,5 @@
.map_io = smdk2440_map_io,
.init_machine = smdk2440_machine_init,
.timer = &s3c24xx_timer,
- .restart = s3c2440_restart,
+ .restart = s3c244x_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index 517623a..2b3dddb 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -35,7 +35,6 @@
#include <plat/cpu.h>
#include <plat/s3c244x.h>
#include <plat/pm.h>
-#include <plat/watchdog-reset.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
@@ -74,15 +73,3 @@
s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up;
s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up;
}
-
-void s3c2440_restart(char mode, const char *cmd)
-{
- if (mode == 's') {
- soft_restart(0);
- }
-
- arch_wdt_reset();
-
- /* we'll take a jump through zero as a poor second */
- soft_restart(0);
-}
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c
index 36bc60f6..d15852f 100644
--- a/arch/arm/mach-s3c2440/s3c244x.c
+++ b/arch/arm/mach-s3c2440/s3c244x.c
@@ -46,6 +46,7 @@
#include <plat/pm.h>
#include <plat/pll.h>
#include <plat/nand-core.h>
+#include <plat/watchdog-reset.h>
static struct map_desc s3c244x_iodesc[] __initdata = {
IODESC_ENT(CLKPWR),
@@ -196,3 +197,14 @@
.suspend = s3c244x_suspend,
.resume = s3c244x_resume,
};
+
+void s3c244x_restart(char mode, const char *cmd)
+{
+ if (mode == 's')
+ soft_restart(0);
+
+ arch_wdt_reset();
+
+ /* we'll take a jump through zero as a poor second */
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 52af004..c59e8b8 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -5,7 +5,7 @@
default y
select ARM_GIC
select HAS_MTU
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_ERRATA_754322
select ARM_ERRATA_764369
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 9b3d0fb..88c3ba15 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -7,7 +7,7 @@
select ARM_GIC
select ARM_ERRATA_720789
select ARM_ERRATA_751472
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0404ccb..f1c8486 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -230,9 +230,7 @@
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
- teq r6, #0x20 @ present in r2p0
- teqne r6, #0x21 @ present in r2p1
- teqne r6, #0x22 @ present in r2p2
+ teq r5, #0x00200000 @ only present in r2p*
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 2efd645..37bbbbb 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -428,8 +428,16 @@
#define OMAP_GPMC_NR_IRQS 8
#define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS)
+/* PRCM IRQ handler */
+#ifdef CONFIG_ARCH_OMAP2PLUS
+#define OMAP_PRCM_IRQ_BASE (OMAP_GPMC_IRQ_END)
+#define OMAP_PRCM_NR_IRQS 64
+#define OMAP_PRCM_IRQ_END (OMAP_PRCM_IRQ_BASE + OMAP_PRCM_NR_IRQS)
+#else
+#define OMAP_PRCM_IRQ_END OMAP_GPMC_IRQ_END
+#endif
-#define NR_IRQS OMAP_GPMC_IRQ_END
+#define NR_IRQS OMAP_PRCM_IRQ_END
#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 9fe3534..2bab4c9 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1249,7 +1249,7 @@
struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
int channel;
- for (channel = dma_channels - 1; channel >= 0; cp++, channel--)
+ for (channel = dma_channels - 1; channel >= 0; cp--, channel--)
s3c2410_dma_resume_chan(cp);
}
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index f10768e..d21d744 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -1409,7 +1409,7 @@
#ifdef CONFIG_S3C_DEV_USB_HSOTG
static struct resource s3c_usb_hsotg_resources[] = {
- [0] = DEFINE_RES_MEM(S3C_PA_USB_HSOTG, SZ_16K),
+ [0] = DEFINE_RES_MEM(S3C_PA_USB_HSOTG, SZ_128K),
[1] = DEFINE_RES_IRQ(IRQ_OTG),
};
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index 0c77e42..abb5bde 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -145,11 +145,13 @@
static int clockevent_next_event(unsigned long cycles,
struct clock_event_device *clk_event_dev)
{
- u16 val;
+ u16 val = readw(gpt_base + CR(CLKEVT));
+
+ if (val & CTRL_ENABLE)
+ writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT));
writew(cycles, gpt_base + LOAD(CLKEVT));
- val = readw(gpt_base + CR(CLKEVT));
val |= CTRL_ENABLE | CTRL_INT_ENABLE;
writew(val, gpt_base + CR(CLKEVT));
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index 8154c4e..77ecbde 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -122,8 +122,8 @@
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (task_pt_regs(task)->pc)
-#define KSTK_ESP(tsk) (task_pt_regs(task)->sp)
+#define KSTK_EIP(task) (task_pt_regs(task)->pc)
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#define cpu_relax() do { } while (0)
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 4add173..4389de1 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,5 +3,5 @@
# under Linux.
#
-obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o
+obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index a84e3bb..d43ceff 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -107,8 +107,7 @@
value = eq + 1;
if ((eq - var) == strlen(name) &&
strncmp(var, name, (eq - var)) == 0) {
- snprintf(val, val_len, "%s", value);
- return 0;
+ return snprintf(val, val_len, "%s", value);
}
}
return NVRAM_ERR_ENVNOTFOUND;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index aab6b0c..19780aa9 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -3,7 +3,7 @@
* Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2006 Michael Buesch <m@bues.ch>
* Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
- * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -85,156 +85,7 @@
}
#ifdef CONFIG_BCM47XX_SSB
-#define READ_FROM_NVRAM(_outvar, name, buf) \
- if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\
- sprom->_outvar = simple_strtoul(buf, NULL, 0);
-
-#define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \
- if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \
- nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\
- sprom->_outvar = simple_strtoul(buf, NULL, 0);
-
-static inline int nvram_getprefix(const char *prefix, char *name,
- char *buf, int len)
-{
- if (prefix) {
- char key[100];
-
- snprintf(key, sizeof(key), "%s%s", prefix, name);
- return nvram_getenv(key, buf, len);
- }
-
- return nvram_getenv(name, buf, len);
-}
-
-static u32 nvram_getu32(const char *name, char *buf, int len)
-{
- int rv;
- char key[100];
- u16 var0, var1;
-
- snprintf(key, sizeof(key), "%s0", name);
- rv = nvram_getenv(key, buf, len);
- /* return 0 here so this looks like unset */
- if (rv < 0)
- return 0;
- var0 = simple_strtoul(buf, NULL, 0);
-
- snprintf(key, sizeof(key), "%s1", name);
- rv = nvram_getenv(key, buf, len);
- if (rv < 0)
- return 0;
- var1 = simple_strtoul(buf, NULL, 0);
- return var1 << 16 | var0;
-}
-
-static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
-{
- char buf[100];
- u32 boardflags;
-
- memset(sprom, 0, sizeof(struct ssb_sprom));
-
- sprom->revision = 1; /* Fallback: Old hardware does not define this. */
- READ_FROM_NVRAM(revision, "sromrev", buf);
- if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 ||
- nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0)
- nvram_parse_macaddr(buf, sprom->il0mac);
- if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0)
- nvram_parse_macaddr(buf, sprom->et0mac);
- if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0)
- nvram_parse_macaddr(buf, sprom->et1mac);
- READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf);
- READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf);
- READ_FROM_NVRAM(et0mdcport, "et0mdcport", buf);
- READ_FROM_NVRAM(et1mdcport, "et1mdcport", buf);
- READ_FROM_NVRAM(board_rev, "boardrev", buf);
- READ_FROM_NVRAM(country_code, "ccode", buf);
- READ_FROM_NVRAM(ant_available_a, "aa5g", buf);
- READ_FROM_NVRAM(ant_available_bg, "aa2g", buf);
- READ_FROM_NVRAM(pa0b0, "pa0b0", buf);
- READ_FROM_NVRAM(pa0b1, "pa0b1", buf);
- READ_FROM_NVRAM(pa0b2, "pa0b2", buf);
- READ_FROM_NVRAM(pa1b0, "pa1b0", buf);
- READ_FROM_NVRAM(pa1b1, "pa1b1", buf);
- READ_FROM_NVRAM(pa1b2, "pa1b2", buf);
- READ_FROM_NVRAM(pa1lob0, "pa1lob0", buf);
- READ_FROM_NVRAM(pa1lob2, "pa1lob1", buf);
- READ_FROM_NVRAM(pa1lob1, "pa1lob2", buf);
- READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf);
- READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf);
- READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf);
- READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf);
- READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf);
- READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf);
- READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf);
- READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf);
- READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf);
- READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf);
- READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf);
- READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf);
- READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf);
- READ_FROM_NVRAM(tri2g, "tri2g", buf);
- READ_FROM_NVRAM(tri5gl, "tri5gl", buf);
- READ_FROM_NVRAM(tri5g, "tri5g", buf);
- READ_FROM_NVRAM(tri5gh, "tri5gh", buf);
- READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf);
- READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf);
- READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf);
- READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf);
- READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf);
- READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf);
- READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf);
- READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf);
- READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf);
- READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf);
- READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf);
- READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf);
- READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf);
- READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf);
- READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf);
- READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf);
- READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf);
- READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf);
- READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf);
- READ_FROM_NVRAM(rssismc2g, "rssismc2g", buf);
- READ_FROM_NVRAM(rssismf2g, "rssismf2g", buf);
- READ_FROM_NVRAM(bxa2g, "bxa2g", buf);
- READ_FROM_NVRAM(rssisav5g, "rssisav5g", buf);
- READ_FROM_NVRAM(rssismc5g, "rssismc5g", buf);
- READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf);
- READ_FROM_NVRAM(bxa5g, "bxa5g", buf);
- READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf);
-
- sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf));
- sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf));
- sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf));
- sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf));
-
- READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf);
- READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf);
- READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf);
- READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf);
- memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24,
- sizeof(sprom->antenna_gain.ghz5));
-
- if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) {
- boardflags = simple_strtoul(buf, NULL, 0);
- if (boardflags) {
- sprom->boardflags_lo = (boardflags & 0x0000FFFFU);
- sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16;
- }
- }
- if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) {
- boardflags = simple_strtoul(buf, NULL, 0);
- if (boardflags) {
- sprom->boardflags2_lo = (boardflags & 0x0000FFFFU);
- sprom->boardflags2_hi = (boardflags & 0xFFFF0000U) >> 16;
- }
- }
-}
-
-int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
{
char prefix[10];
@@ -251,7 +102,7 @@
}
static int bcm47xx_get_invariants(struct ssb_bus *bus,
- struct ssb_init_invariants *iv)
+ struct ssb_init_invariants *iv)
{
char buf[20];
@@ -281,7 +132,7 @@
char buf[100];
struct ssb_mipscore *mcore;
- err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom);
+ err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb);
if (err)
printk(KERN_WARNING "bcm47xx: someone else already registered"
" a ssb SPROM callback handler (err %d)\n", err);
@@ -308,10 +159,41 @@
#endif
#ifdef CONFIG_BCM47XX_BCMA
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+ char prefix[10];
+ struct bcma_device *core;
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_fill_sprom(out, prefix);
+ return 0;
+ case BCMA_HOSTTYPE_SOC:
+ bcm47xx_fill_sprom_ethernet(out, NULL);
+ core = bcma_find_core(bus, BCMA_CORE_80211);
+ if (core) {
+ snprintf(prefix, sizeof(prefix), "sb/%u/",
+ core->core_index);
+ bcm47xx_fill_sprom(out, prefix);
+ }
+ return 0;
+ default:
+ pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+}
+
static void __init bcm47xx_register_bcma(void)
{
int err;
+ err = bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma);
+ if (err)
+ pr_warn("bcm47xx: someone else already registered a bcma SPROM callback handler (err %d)\n", err);
+
err = bcma_host_soc_register(&bcm47xx_bus.bcma);
if (err)
panic("Failed to initialize BCMA bus (err %d)", err);
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
new file mode 100644
index 0000000..5c8dcd2
--- /dev/null
+++ b/arch/mips/bcm47xx/sprom.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <bcm47xx.h>
+#include <nvram.h>
+
+static void create_key(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len)
+{
+ if (prefix && postfix)
+ snprintf(buf, len, "%s%s%s", prefix, name, postfix);
+ else if (prefix)
+ snprintf(buf, len, "%s%s", prefix, name);
+ else if (postfix)
+ snprintf(buf, len, "%s%s", name, postfix);
+ else
+ snprintf(buf, len, "%s", name);
+}
+
+#define NVRAM_READ_VAL(type) \
+static void nvram_read_ ## type (const char *prefix, \
+ const char *postfix, const char *name, \
+ type *val, type allset) \
+{ \
+ char buf[100]; \
+ char key[40]; \
+ int err; \
+ type var; \
+ \
+ create_key(prefix, postfix, name, key, sizeof(key)); \
+ \
+ err = nvram_getenv(key, buf, sizeof(buf)); \
+ if (err < 0) \
+ return; \
+ err = kstrto ## type (buf, 0, &var); \
+ if (err) { \
+ pr_warn("can not parse nvram name %s with value %s" \
+ " got %i", key, buf, err); \
+ return; \
+ } \
+ if (allset && var == allset) \
+ return; \
+ *val = var; \
+}
+
+NVRAM_READ_VAL(u8)
+NVRAM_READ_VAL(s8)
+NVRAM_READ_VAL(u16)
+NVRAM_READ_VAL(u32)
+
+#undef NVRAM_READ_VAL
+
+static void nvram_read_u32_2(const char *prefix, const char *name,
+ u16 *val_lo, u16 *val_hi)
+{
+ char buf[100];
+ char key[40];
+ int err;
+ u32 val;
+
+ create_key(prefix, NULL, name, key, sizeof(key));
+
+ err = nvram_getenv(key, buf, sizeof(buf));
+ if (err < 0)
+ return;
+ err = kstrtou32(buf, 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s with value %s got %i",
+ key, buf, err);
+ return;
+ }
+ *val_lo = (val & 0x0000FFFFU);
+ *val_hi = (val & 0xFFFF0000U) >> 16;
+}
+
+static void nvram_read_leddc(const char *prefix, const char *name,
+ u8 *leddc_on_time, u8 *leddc_off_time)
+{
+ char buf[100];
+ char key[40];
+ int err;
+ u32 val;
+
+ create_key(prefix, NULL, name, key, sizeof(key));
+
+ err = nvram_getenv(key, buf, sizeof(buf));
+ if (err < 0)
+ return;
+ err = kstrtou32(buf, 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s with value %s got %i",
+ key, buf, err);
+ return;
+ }
+
+ if (val == 0xffff || val == 0xffffffff)
+ return;
+
+ *leddc_on_time = val & 0xff;
+ *leddc_off_time = (val >> 16) & 0xff;
+}
+
+static void nvram_read_macaddr(const char *prefix, const char *name,
+ u8 (*val)[6])
+{
+ char buf[100];
+ char key[40];
+ int err;
+
+ create_key(prefix, NULL, name, key, sizeof(key));
+
+ err = nvram_getenv(key, buf, sizeof(buf));
+ if (err < 0)
+ return;
+ nvram_parse_macaddr(buf, *val);
+}
+
+static void nvram_read_alpha2(const char *prefix, const char *name,
+ char (*val)[2])
+{
+ char buf[10];
+ char key[40];
+ int err;
+
+ create_key(prefix, NULL, name, key, sizeof(key));
+
+ err = nvram_getenv(key, buf, sizeof(buf));
+ if (err < 0)
+ return;
+ if (buf[0] == '0')
+ return;
+ if (strlen(buf) > 2) {
+ pr_warn("alpha2 is too long %s", buf);
+ return;
+ }
+ memcpy(val, buf, sizeof(val));
+}
+
+static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
+ const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0);
+ nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0);
+ nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff);
+ nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff);
+ nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff);
+ nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff);
+ nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0);
+ nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0);
+ nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0);
+ nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0);
+ nvram_read_alpha2(prefix, "ccode", &sprom->alpha2);
+}
+
+static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom,
+ const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0);
+ nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0);
+ nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0);
+ nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0);
+ nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0);
+ nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0);
+ nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0);
+ nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0);
+ nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0);
+ nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0);
+}
+
+static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0);
+ nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0);
+}
+
+static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom,
+ const char *prefix)
+{
+ nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0);
+ nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0);
+ nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0);
+ nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0);
+ nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0);
+ nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0);
+ nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0);
+ nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0);
+ nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0);
+}
+
+static void bcm47xx_fill_sprom_r2(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi);
+ nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
+}
+
+static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0);
+ nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0);
+ nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0);
+ nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0);
+ nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0);
+ nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0);
+ nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0);
+ nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0);
+ nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0);
+ nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0);
+ nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0);
+ nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0);
+ nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0);
+ nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0);
+}
+
+static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi);
+ nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
+ nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0);
+ nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+ &sprom->leddc_off_time);
+}
+
+static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom,
+ const char *prefix)
+{
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi);
+ nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+ &sprom->boardflags2_hi);
+ nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
+ nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0);
+ nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0);
+ nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0);
+ nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf);
+ nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf);
+ nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff);
+ nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+ &sprom->leddc_off_time);
+}
+
+static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0);
+ nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0);
+ nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0);
+ nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0);
+ nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0);
+ nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0);
+ nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0);
+ nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0);
+ nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0);
+ nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0);
+ nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0);
+ nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0);
+}
+
+static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0);
+ nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0);
+ nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0);
+ nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0);
+ nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0);
+ nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0);
+ nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0);
+ nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0);
+ nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0);
+}
+
+static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0);
+ nvram_read_u8(prefix, NULL, "extpagain2g",
+ &sprom->fem.ghz2.extpa_gain, 0);
+ nvram_read_u8(prefix, NULL, "pdetrange2g",
+ &sprom->fem.ghz2.pdet_range, 0);
+ nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0);
+ nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0);
+ nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0);
+ nvram_read_u8(prefix, NULL, "extpagain5g",
+ &sprom->fem.ghz5.extpa_gain, 0);
+ nvram_read_u8(prefix, NULL, "pdetrange5g",
+ &sprom->fem.ghz5.pdet_range, 0);
+ nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0);
+ nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0);
+ nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0);
+ nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0);
+ nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0);
+ nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0);
+ nvram_read_u8(prefix, NULL, "tempsense_slope",
+ &sprom->tempsense_slope, 0);
+ nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0);
+ nvram_read_u8(prefix, NULL, "tempsense_option",
+ &sprom->tempsense_option, 0);
+ nvram_read_u8(prefix, NULL, "freqoffset_corr",
+ &sprom->freqoffset_corr, 0);
+ nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0);
+ nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0);
+ nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0);
+ nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0);
+ nvram_read_u8(prefix, NULL, "phycal_tempdelta",
+ &sprom->phycal_tempdelta, 0);
+ nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0);
+ nvram_read_u8(prefix, NULL, "temps_hysteresis",
+ &sprom->temps_hysteresis, 0);
+ nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0);
+ nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr2ga0",
+ &sprom->rxgainerr2ga[0], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr2ga1",
+ &sprom->rxgainerr2ga[1], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr2ga2",
+ &sprom->rxgainerr2ga[2], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gla0",
+ &sprom->rxgainerr5gla[0], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gla1",
+ &sprom->rxgainerr5gla[1], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gla2",
+ &sprom->rxgainerr5gla[2], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gma0",
+ &sprom->rxgainerr5gma[0], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gma1",
+ &sprom->rxgainerr5gma[1], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gma2",
+ &sprom->rxgainerr5gma[2], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gha0",
+ &sprom->rxgainerr5gha[0], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gha1",
+ &sprom->rxgainerr5gha[1], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gha2",
+ &sprom->rxgainerr5gha[2], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gua0",
+ &sprom->rxgainerr5gua[0], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gua1",
+ &sprom->rxgainerr5gua[1], 0);
+ nvram_read_u8(prefix, NULL, "rxgainerr5gua2",
+ &sprom->rxgainerr5gua[2], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gla0",
+ &sprom->noiselvl5gla[0], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gla1",
+ &sprom->noiselvl5gla[1], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gla2",
+ &sprom->noiselvl5gla[2], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gma0",
+ &sprom->noiselvl5gma[0], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gma1",
+ &sprom->noiselvl5gma[1], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gma2",
+ &sprom->noiselvl5gma[2], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gha0",
+ &sprom->noiselvl5gha[0], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gha1",
+ &sprom->noiselvl5gha[1], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gha2",
+ &sprom->noiselvl5gha[2], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gua0",
+ &sprom->noiselvl5gua[0], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gua1",
+ &sprom->noiselvl5gua[1], 0);
+ nvram_read_u8(prefix, NULL, "noiselvl5gua2",
+ &sprom->noiselvl5gua[2], 0);
+ nvram_read_u8(prefix, NULL, "pcieingress_war",
+ &sprom->pcieingress_war, 0);
+}
+
+static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0);
+ nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw202gpo",
+ &sprom->legofdmbw202gpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo",
+ &sprom->legofdmbw20ul2gpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw205glpo",
+ &sprom->legofdmbw205glpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo",
+ &sprom->legofdmbw20ul5glpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw205gmpo",
+ &sprom->legofdmbw205gmpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo",
+ &sprom->legofdmbw20ul5gmpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw205ghpo",
+ &sprom->legofdmbw205ghpo, 0);
+ nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo",
+ &sprom->legofdmbw20ul5ghpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo",
+ &sprom->mcsbw20ul5glpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo",
+ &sprom->mcsbw20ul5gmpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo",
+ &sprom->mcsbw20ul5ghpo, 0);
+ nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0);
+ nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0);
+ nvram_read_u16(prefix, NULL, "legofdm40duppo",
+ &sprom->legofdm40duppo, 0);
+ nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0);
+ nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0);
+}
+
+static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
+ const char *prefix)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i];
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u8(prefix, postfix, "maxp2ga",
+ &pwr_info->maxpwr_2g, 0);
+ nvram_read_u8(prefix, postfix, "itt2ga",
+ &pwr_info->itssi_2g, 0);
+ nvram_read_u8(prefix, postfix, "itt5ga",
+ &pwr_info->itssi_5g, 0);
+ nvram_read_u16(prefix, postfix, "pa2gw0a",
+ &pwr_info->pa_2g[0], 0);
+ nvram_read_u16(prefix, postfix, "pa2gw1a",
+ &pwr_info->pa_2g[1], 0);
+ nvram_read_u16(prefix, postfix, "pa2gw2a",
+ &pwr_info->pa_2g[2], 0);
+ nvram_read_u8(prefix, postfix, "maxp5ga",
+ &pwr_info->maxpwr_5g, 0);
+ nvram_read_u8(prefix, postfix, "maxp5gha",
+ &pwr_info->maxpwr_5gh, 0);
+ nvram_read_u8(prefix, postfix, "maxp5gla",
+ &pwr_info->maxpwr_5gl, 0);
+ nvram_read_u16(prefix, postfix, "pa5gw0a",
+ &pwr_info->pa_5g[0], 0);
+ nvram_read_u16(prefix, postfix, "pa5gw1a",
+ &pwr_info->pa_5g[1], 0);
+ nvram_read_u16(prefix, postfix, "pa5gw2a",
+ &pwr_info->pa_5g[2], 0);
+ nvram_read_u16(prefix, postfix, "pa5glw0a",
+ &pwr_info->pa_5gl[0], 0);
+ nvram_read_u16(prefix, postfix, "pa5glw1a",
+ &pwr_info->pa_5gl[1], 0);
+ nvram_read_u16(prefix, postfix, "pa5glw2a",
+ &pwr_info->pa_5gl[2], 0);
+ nvram_read_u16(prefix, postfix, "pa5ghw0a",
+ &pwr_info->pa_5gh[0], 0);
+ nvram_read_u16(prefix, postfix, "pa5ghw1a",
+ &pwr_info->pa_5gh[1], 0);
+ nvram_read_u16(prefix, postfix, "pa5ghw2a",
+ &pwr_info->pa_5gh[2], 0);
+ }
+}
+
+static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
+ const char *prefix)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i];
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u16(prefix, postfix, "pa2gw3a",
+ &pwr_info->pa_2g[3], 0);
+ nvram_read_u16(prefix, postfix, "pa5gw3a",
+ &pwr_info->pa_5g[3], 0);
+ nvram_read_u16(prefix, postfix, "pa5glw3a",
+ &pwr_info->pa_5gl[3], 0);
+ nvram_read_u16(prefix, postfix, "pa5ghw3a",
+ &pwr_info->pa_5gh[3], 0);
+ }
+}
+
+void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix)
+{
+ nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac);
+ nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0);
+ nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0);
+
+ nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac);
+ nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0);
+ nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0);
+
+ nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac);
+ nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac);
+}
+
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
+{
+ memset(sprom, 0, sizeof(struct ssb_sprom));
+
+ bcm47xx_fill_sprom_ethernet(sprom, prefix);
+
+ nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0);
+
+ switch (sprom->revision) {
+ case 1:
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r12389(sprom, prefix);
+ bcm47xx_fill_sprom_r1(sprom, prefix);
+ break;
+ case 2:
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r12389(sprom, prefix);
+ bcm47xx_fill_sprom_r2389(sprom, prefix);
+ bcm47xx_fill_sprom_r2(sprom, prefix);
+ break;
+ case 3:
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r12389(sprom, prefix);
+ bcm47xx_fill_sprom_r2389(sprom, prefix);
+ bcm47xx_fill_sprom_r389(sprom, prefix);
+ bcm47xx_fill_sprom_r3(sprom, prefix);
+ break;
+ case 4:
+ case 5:
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r4589(sprom, prefix);
+ bcm47xx_fill_sprom_r458(sprom, prefix);
+ bcm47xx_fill_sprom_r45(sprom, prefix);
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix);
+ bcm47xx_fill_sprom_path_r45(sprom, prefix);
+ break;
+ case 8:
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r12389(sprom, prefix);
+ bcm47xx_fill_sprom_r2389(sprom, prefix);
+ bcm47xx_fill_sprom_r389(sprom, prefix);
+ bcm47xx_fill_sprom_r4589(sprom, prefix);
+ bcm47xx_fill_sprom_r458(sprom, prefix);
+ bcm47xx_fill_sprom_r89(sprom, prefix);
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix);
+ break;
+ case 9:
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r12389(sprom, prefix);
+ bcm47xx_fill_sprom_r2389(sprom, prefix);
+ bcm47xx_fill_sprom_r389(sprom, prefix);
+ bcm47xx_fill_sprom_r4589(sprom, prefix);
+ bcm47xx_fill_sprom_r89(sprom, prefix);
+ bcm47xx_fill_sprom_r9(sprom, prefix);
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix);
+ break;
+ default:
+ pr_warn("Unsupported SPROM revision %d detected. Will extract"
+ " v1\n", sprom->revision);
+ sprom->revision = 1;
+ bcm47xx_fill_sprom_r1234589(sprom, prefix);
+ bcm47xx_fill_sprom_r12389(sprom, prefix);
+ bcm47xx_fill_sprom_r1(sprom, prefix);
+ }
+}
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index de95e07..5ecaf47 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -44,4 +44,7 @@
extern union bcm47xx_bus bcm47xx_bus;
extern enum bcm47xx_bus_type bcm47xx_bus_type;
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix);
+void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix);
+
#endif /* __ASM_BCM47XX_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h
index 184d5ec..69ef3ef 100644
--- a/arch/mips/include/asm/mach-bcm47xx/nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h
@@ -37,7 +37,7 @@
extern int nvram_getenv(char *name, char *val, size_t val_len);
-static inline void nvram_parse_macaddr(char *buf, u8 *macaddr)
+static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
{
if (strchr(buf, ':'))
sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index fc45ba8..e395693 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -48,9 +48,9 @@
}
/* TSC based delay: */
-static void delay_tsc(unsigned long loops)
+static void delay_tsc(unsigned long __loops)
{
- unsigned long bclock, now;
+ u32 bclock, now, loops = __loops;
int cpu;
preempt_disable();
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f581a18..8ecbb4b 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -333,13 +333,15 @@
* Lookup failure means no vma is above this address,
* i.e. return with success:
*/
- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+ vma = find_vma(mm, addr);
+ if (!vma)
return addr;
/*
* new region fits between prev_vma->vm_end and
* vma->vm_start, use it:
*/
+ prev_vma = vma->vm_prev;
if (addr + len <= vma->vm_start &&
(!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 956e9ac..485a11a6 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1873,7 +1873,7 @@
kfree(eni_dev->free_list);
free_irq:
- free_irq(eni_dev->irq, eni_dev);
+ free_irq(eni_dev->irq, dev);
out:
return error;
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 800163c..a058842 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -80,6 +80,7 @@
min_msk = 0x200D;
max_msk = 0xFFFF;
break;
+ case 0x4331:
case 43224:
case 43225:
break;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index b8379b9..7e138ec 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -61,7 +61,7 @@
.dev_attrs = bcma_device_attrs,
};
-static struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
+struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
{
struct bcma_device *core;
@@ -71,6 +71,7 @@
}
return NULL;
}
+EXPORT_SYMBOL_GPL(bcma_find_core);
static void bcma_release_core_dev(struct device *dev)
{
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index ca77525..cdcf75c 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -2,6 +2,8 @@
* Broadcom specific AMBA
* SPROM reading
*
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -14,6 +16,58 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
+
+/**
+ * bcma_arch_register_fallback_sprom - Registers a method providing a
+ * fallback SPROM if no SPROM is found.
+ *
+ * @sprom_callback: The callback function.
+ *
+ * With this function the architecture implementation may register a
+ * callback handler which fills the SPROM data structure. The fallback is
+ * used for PCI based BCMA devices, where no valid SPROM can be found
+ * in the shadow registers and to provide the SPROM for SoCs where BCMA is
+ * to controll the system bus.
+ *
+ * This function is useful for weird architectures that have a half-assed
+ * BCMA device hardwired to their PCI bus.
+ *
+ * This function is available for architecture code, only. So it is not
+ * exported.
+ */
+int bcma_arch_register_fallback_sprom(int (*sprom_callback)(struct bcma_bus *bus,
+ struct ssb_sprom *out))
+{
+ if (get_fallback_sprom)
+ return -EEXIST;
+ get_fallback_sprom = sprom_callback;
+
+ return 0;
+}
+
+static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
+ struct ssb_sprom *out)
+{
+ int err;
+
+ if (!get_fallback_sprom) {
+ err = -ENOENT;
+ goto fail;
+ }
+
+ err = get_fallback_sprom(bus, out);
+ if (err)
+ goto fail;
+
+ pr_debug("Using SPROM revision %d provided by"
+ " platform.\n", bus->sprom.revision);
+ return 0;
+fail:
+ pr_warn("Using fallback SPROM failed (err %d)\n", err);
+ return err;
+}
+
/**************************************************
* R/W ops.
**************************************************/
@@ -246,23 +300,128 @@
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
}
+/*
+ * Indicates the presence of external SPROM.
+ */
+static bool bcma_sprom_ext_available(struct bcma_bus *bus)
+{
+ u32 chip_status;
+ u32 srom_control;
+ u32 present_mask;
+
+ if (bus->drv_cc.core->id.rev >= 31) {
+ if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
+ return false;
+
+ srom_control = bcma_read32(bus->drv_cc.core,
+ BCMA_CC_SROM_CONTROL);
+ return srom_control & BCMA_CC_SROM_CONTROL_PRESENT;
+ }
+
+ /* older chipcommon revisions use chip status register */
+ chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
+ switch (bus->chipinfo.id) {
+ case 0x4313:
+ present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
+ break;
+
+ case 0x4331:
+ present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
+ break;
+
+ default:
+ return true;
+ }
+
+ return chip_status & present_mask;
+}
+
+/*
+ * Indicates that on-chip OTP memory is present and enabled.
+ */
+static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
+{
+ u32 chip_status;
+ u32 otpsize = 0;
+ bool present;
+
+ chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
+ switch (bus->chipinfo.id) {
+ case 0x4313:
+ present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
+ break;
+
+ case 0x4331:
+ present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
+ break;
+
+ case 43224:
+ case 43225:
+ /* for these chips OTP is always available */
+ present = true;
+ break;
+
+ default:
+ present = false;
+ break;
+ }
+
+ if (present) {
+ otpsize = bus->drv_cc.capabilities & BCMA_CC_CAP_OTPS;
+ otpsize >>= BCMA_CC_CAP_OTPS_SHIFT;
+ }
+
+ return otpsize != 0;
+}
+
+/*
+ * Verify OTP is filled and determine the byte
+ * offset where SPROM data is located.
+ *
+ * On error, returns 0; byte offset otherwise.
+ */
+static int bcma_sprom_onchip_offset(struct bcma_bus *bus)
+{
+ struct bcma_device *cc = bus->drv_cc.core;
+ u32 offset;
+
+ /* verify OTP status */
+ if ((bcma_read32(cc, BCMA_CC_OTPS) & BCMA_CC_OTPS_GU_PROG_HW) == 0)
+ return 0;
+
+ /* obtain bit offset from otplayout register */
+ offset = (bcma_read32(cc, BCMA_CC_OTPL) & BCMA_CC_OTPL_GURGN_OFFSET);
+ return BCMA_CC_SPROM + (offset >> 3);
+}
+
int bcma_sprom_get(struct bcma_bus *bus)
{
- u16 offset;
+ u16 offset = BCMA_CC_SPROM;
u16 *sprom;
- u32 sromctrl;
int err = 0;
if (!bus->drv_cc.core)
return -EOPNOTSUPP;
- if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
- return -ENOENT;
-
- if (bus->drv_cc.core->id.rev >= 32) {
- sromctrl = bcma_read32(bus->drv_cc.core, BCMA_CC_SROM_CONTROL);
- if (!(sromctrl & BCMA_CC_SROM_CONTROL_PRESENT))
- return -ENOENT;
+ if (!bcma_sprom_ext_available(bus)) {
+ /*
+ * External SPROM takes precedence so check
+ * on-chip OTP only when no external SPROM
+ * is present.
+ */
+ if (bcma_sprom_onchip_available(bus)) {
+ /* determine offset */
+ offset = bcma_sprom_onchip_offset(bus);
+ }
+ if (!offset) {
+ /*
+ * Maybe there is no SPROM on the device?
+ * Now we ask the arch code if there is some sprom
+ * available for this device in some other storage.
+ */
+ err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
+ return err;
+ }
}
sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
@@ -273,11 +432,6 @@
if (bus->chipinfo.id == 0x4331)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
- /* Most cards have SPROM moved by additional offset 0x30 (48 dwords).
- * According to brcm80211 this applies to cards with PCIe rev >= 6
- * TODO: understand this condition and use it */
- offset = (bus->chipinfo.id == 0x4331) ? BCMA_CC_SPROM :
- BCMA_CC_SPROM_PCIE6;
pr_debug("SPROM offset 0x%x\n", offset);
bcma_sprom_read(bus, offset, sprom);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 07f14d1..48442476 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -65,12 +65,14 @@
{ USB_DEVICE(0x0CF3, 0x3002) },
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0489, 0xE03D) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3004) },
+ { USB_DEVICE(0x13d3, 0x3375) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -87,6 +89,7 @@
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index a323bae..b8ac1c5 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -411,7 +411,7 @@
static int bfusb_open(struct hci_dev *hdev)
{
- struct bfusb_data *data = hdev->driver_data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
unsigned long flags;
int i, err;
@@ -437,7 +437,7 @@
static int bfusb_flush(struct hci_dev *hdev)
{
- struct bfusb_data *data = hdev->driver_data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
BT_DBG("hdev %p bfusb %p", hdev, data);
@@ -448,7 +448,7 @@
static int bfusb_close(struct hci_dev *hdev)
{
- struct bfusb_data *data = hdev->driver_data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
unsigned long flags;
BT_DBG("hdev %p bfusb %p", hdev, data);
@@ -483,7 +483,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- data = hdev->driver_data;
+ data = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -544,15 +544,6 @@
return 0;
}
-static void bfusb_destruct(struct hci_dev *hdev)
-{
- struct bfusb_data *data = hdev->driver_data;
-
- BT_DBG("hdev %p bfusb %p", hdev, data);
-
- kfree(data);
-}
-
static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -705,18 +696,15 @@
data->hdev = hdev;
hdev->bus = HCI_USB;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
SET_HCIDEV_DEV(hdev, &intf->dev);
hdev->open = bfusb_open;
hdev->close = bfusb_close;
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
- hdev->destruct = bfusb_destruct;
hdev->ioctl = bfusb_ioctl;
- hdev->owner = THIS_MODULE;
-
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
@@ -753,6 +741,7 @@
hci_unregister_dev(hdev);
hci_free_dev(hdev);
+ kfree(data);
}
static struct usb_driver bfusb_driver = {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index c6a0c61..1fcd923 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -561,7 +561,7 @@
static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
struct sk_buff *skb;
/* Ericsson baud rate command */
@@ -609,7 +609,7 @@
static int bluecard_hci_flush(struct hci_dev *hdev)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -620,7 +620,7 @@
static int bluecard_hci_open(struct hci_dev *hdev)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
@@ -640,7 +640,7 @@
static int bluecard_hci_close(struct hci_dev *hdev)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
unsigned int iobase = info->p_dev->resource[0]->start;
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
@@ -667,7 +667,7 @@
return -ENODEV;
}
- info = (bluecard_info_t *)(hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -691,11 +691,6 @@
}
-static void bluecard_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -734,18 +729,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = bluecard_hci_open;
hdev->close = bluecard_hci_close;
hdev->flush = bluecard_hci_flush;
hdev->send = bluecard_hci_send_frame;
- hdev->destruct = bluecard_hci_destruct;
hdev->ioctl = bluecard_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
id = inb(iobase + 0x30);
if ((id & 0x0f) == 0x02)
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 6283160..d894340 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -66,7 +66,7 @@
static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s queue %d buffer %p count %d", hdev->name,
queue, buf, count);
@@ -189,7 +189,7 @@
static void bpa10x_rx_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -219,7 +219,7 @@
static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -260,7 +260,7 @@
static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -301,7 +301,7 @@
static int bpa10x_open(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -329,7 +329,7 @@
static int bpa10x_close(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -343,7 +343,7 @@
static int bpa10x_flush(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -355,7 +355,7 @@
static int bpa10x_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
unsigned int pipe;
@@ -432,17 +432,6 @@
return 0;
}
-static void bpa10x_destruct(struct hci_dev *hdev)
-{
- struct bpa10x_data *data = hdev->driver_data;
-
- BT_DBG("%s", hdev->name);
-
- kfree_skb(data->rx_skb[0]);
- kfree_skb(data->rx_skb[1]);
- kfree(data);
-}
-
static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct bpa10x_data *data;
@@ -470,7 +459,7 @@
}
hdev->bus = HCI_USB;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
data->hdev = hdev;
@@ -480,9 +469,6 @@
hdev->close = bpa10x_close;
hdev->flush = bpa10x_flush;
hdev->send = bpa10x_send_frame;
- hdev->destruct = bpa10x_destruct;
-
- hdev->owner = THIS_MODULE;
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
@@ -512,6 +498,9 @@
hci_unregister_dev(data->hdev);
hci_free_dev(data->hdev);
+ kfree_skb(data->rx_skb[0]);
+ kfree_skb(data->rx_skb[1]);
+ kfree(data);
}
static struct usb_driver bpa10x_driver = {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 0c97e5d..9c09d6f 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -389,7 +389,7 @@
static int bt3c_hci_flush(struct hci_dev *hdev)
{
- bt3c_info_t *info = (bt3c_info_t *)(hdev->driver_data);
+ bt3c_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -428,7 +428,7 @@
return -ENODEV;
}
- info = (bt3c_info_t *) (hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -456,11 +456,6 @@
}
-static void bt3c_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -580,18 +575,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = bt3c_hci_open;
hdev->close = bt3c_hci_close;
hdev->flush = bt3c_hci_flush;
hdev->send = bt3c_hci_send_frame;
- hdev->destruct = bt3c_hci_destruct;
hdev->ioctl = bt3c_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
/* Load firmware */
err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
if (err < 0) {
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index 8ecf4c6..6c20bbb 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -384,7 +384,7 @@
void btmrvl_debugfs_init(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_debugfs_data *dbg;
if (!hdev->debugfs)
@@ -401,36 +401,34 @@
dbg->config_dir = debugfs_create_dir("config", hdev->debugfs);
dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_psmode_fops);
+ priv, &btmrvl_psmode_fops);
dbg->pscmd = debugfs_create_file("pscmd", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_pscmd_fops);
+ priv, &btmrvl_pscmd_fops);
dbg->gpiogap = debugfs_create_file("gpiogap", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_gpiogap_fops);
+ priv, &btmrvl_gpiogap_fops);
dbg->hsmode = debugfs_create_file("hsmode", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_hsmode_fops);
+ priv, &btmrvl_hsmode_fops);
dbg->hscmd = debugfs_create_file("hscmd", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_hscmd_fops);
+ priv, &btmrvl_hscmd_fops);
dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_hscfgcmd_fops);
+ priv, &btmrvl_hscfgcmd_fops);
dbg->status_dir = debugfs_create_dir("status", hdev->debugfs);
dbg->curpsmode = debugfs_create_file("curpsmode", 0444,
- dbg->status_dir,
- hdev->driver_data,
- &btmrvl_curpsmode_fops);
+ dbg->status_dir, priv,
+ &btmrvl_curpsmode_fops);
dbg->psstate = debugfs_create_file("psstate", 0444, dbg->status_dir,
- hdev->driver_data, &btmrvl_psstate_fops);
+ priv, &btmrvl_psstate_fops);
dbg->hsstate = debugfs_create_file("hsstate", 0444, dbg->status_dir,
- hdev->driver_data, &btmrvl_hsstate_fops);
+ priv, &btmrvl_hsstate_fops);
dbg->txdnldready = debugfs_create_file("txdnldready", 0444,
- dbg->status_dir,
- hdev->driver_data,
- &btmrvl_txdnldready_fops);
+ dbg->status_dir, priv,
+ &btmrvl_txdnldready_fops);
}
void btmrvl_debugfs_remove(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_debugfs_data *dbg = priv->debugfs_data;
if (!dbg)
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 6c3defa..d1209ad 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -387,10 +387,6 @@
return -ENOIOCTLCMD;
}
-static void btmrvl_destruct(struct hci_dev *hdev)
-{
-}
-
static int btmrvl_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
@@ -398,12 +394,13 @@
BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
- if (!hdev || !hdev->driver_data) {
+ if (!hdev) {
BT_ERR("Frame for unknown HCI device");
return -ENODEV;
}
- priv = (struct btmrvl_private *) hdev->driver_data;
+ priv = hci_get_drvdata(hdev);
+
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
@@ -434,7 +431,7 @@
static int btmrvl_flush(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
skb_queue_purge(&priv->adapter->tx_queue);
@@ -443,7 +440,7 @@
static int btmrvl_close(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
@@ -546,16 +543,14 @@
}
priv->btmrvl_dev.hcidev = hdev;
- hdev->driver_data = priv;
+ hci_set_drvdata(hdev, priv);
hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
hdev->flush = btmrvl_flush;
hdev->send = btmrvl_send_frame;
- hdev->destruct = btmrvl_destruct;
hdev->ioctl = btmrvl_ioctl;
- hdev->owner = THIS_MODULE;
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 792e32d..e10ea03 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -189,7 +189,7 @@
static int btsdio_open(struct hci_dev *hdev)
{
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -225,7 +225,7 @@
static int btsdio_close(struct hci_dev *hdev)
{
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -246,7 +246,7 @@
static int btsdio_flush(struct hci_dev *hdev)
{
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -258,7 +258,7 @@
static int btsdio_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -289,15 +289,6 @@
return 0;
}
-static void btsdio_destruct(struct hci_dev *hdev)
-{
- struct btsdio_data *data = hdev->driver_data;
-
- BT_DBG("%s", hdev->name);
-
- kfree(data);
-}
-
static int btsdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -330,7 +321,7 @@
}
hdev->bus = HCI_SDIO;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
if (id->class == SDIO_CLASS_BT_AMP)
hdev->dev_type = HCI_AMP;
@@ -345,9 +336,6 @@
hdev->close = btsdio_close;
hdev->flush = btsdio_flush;
hdev->send = btsdio_send_frame;
- hdev->destruct = btsdio_destruct;
-
- hdev->owner = THIS_MODULE;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -378,6 +366,7 @@
hci_unregister_dev(hdev);
hci_free_dev(hdev);
+ kfree(data);
}
static struct sdio_driver btsdio_driver = {
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 200b3a2..194224d 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -397,7 +397,7 @@
static int btuart_hci_flush(struct hci_dev *hdev)
{
- btuart_info_t *info = (btuart_info_t *)(hdev->driver_data);
+ btuart_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -435,7 +435,7 @@
return -ENODEV;
}
- info = (btuart_info_t *)(hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -459,11 +459,6 @@
}
-static void btuart_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -498,18 +493,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = btuart_hci_open;
hdev->close = btuart_hci_close;
hdev->flush = btuart_hci_flush;
hdev->send = btuart_hci_send_frame;
- hdev->destruct = btuart_hci_destruct;
hdev->ioctl = btuart_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 789c9b5..480cad9 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@
/* Broadcom BCM20702A0 */
{ USB_DEVICE(0x0a5c, 0x21e3) },
+ { USB_DEVICE(0x0a5c, 0x21e6) },
{ USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x413c, 0x8197) },
@@ -121,12 +122,14 @@
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
/* Atheros 3012 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -243,7 +246,7 @@
static void btusb_intr_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -282,7 +285,7 @@
static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -331,7 +334,7 @@
static void btusb_bulk_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -370,7 +373,7 @@
static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -417,7 +420,7 @@
static void btusb_isoc_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int i, err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -484,7 +487,7 @@
static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -537,7 +540,7 @@
{
struct sk_buff *skb = urb->context;
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s urb %p status %d count %d", hdev->name,
urb, urb->status, urb->actual_length);
@@ -584,7 +587,7 @@
static int btusb_open(struct hci_dev *hdev)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -634,7 +637,7 @@
static int btusb_close(struct hci_dev *hdev)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -664,7 +667,7 @@
static int btusb_flush(struct hci_dev *hdev)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -676,7 +679,7 @@
static int btusb_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
unsigned int pipe;
@@ -784,18 +787,9 @@
return err;
}
-static void btusb_destruct(struct hci_dev *hdev)
-{
- struct btusb_data *data = hdev->driver_data;
-
- BT_DBG("%s", hdev->name);
-
- kfree(data);
-}
-
static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s evt %d", hdev->name, evt);
@@ -807,7 +801,7 @@
static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_interface *intf = data->isoc;
struct usb_endpoint_descriptor *ep_desc;
int i, err;
@@ -995,7 +989,7 @@
}
hdev->bus = HCI_USB;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
data->hdev = hdev;
@@ -1005,11 +999,8 @@
hdev->close = btusb_close;
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
- hdev->destruct = btusb_destruct;
hdev->notify = btusb_notify;
- hdev->owner = THIS_MODULE;
-
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
@@ -1091,9 +1082,6 @@
return;
hdev = data->hdev;
-
- __hci_dev_hold(hdev);
-
usb_set_intfdata(data->intf, NULL);
if (data->isoc)
@@ -1106,9 +1094,8 @@
else if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
- __hci_dev_put(hdev);
-
hci_free_dev(hdev);
+ kfree(data);
}
#ifdef CONFIG_PM
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index b5f83b4..8869469 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -161,7 +161,7 @@
return -EBUSY;
/* provide contexts for callbacks from ST */
- hst = hdev->driver_data;
+ hst = hci_get_drvdata(hdev);
for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
ti_st_proto[i].priv_data = hst;
@@ -236,7 +236,7 @@
static int ti_st_close(struct hci_dev *hdev)
{
int err, i;
- struct ti_st *hst = hdev->driver_data;
+ struct ti_st *hst = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
@@ -264,7 +264,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- hst = hdev->driver_data;
+ hst = hci_get_drvdata(hdev);
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -291,14 +291,6 @@
return 0;
}
-static void ti_st_destruct(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
- /* do nothing here, since platform remove
- * would free the hdev->driver_data
- */
-}
-
static int bt_ti_probe(struct platform_device *pdev)
{
static struct ti_st *hst;
@@ -320,13 +312,11 @@
hst->hdev = hdev;
hdev->bus = HCI_UART;
- hdev->driver_data = hst;
+ hci_set_drvdata(hdev, hst);
hdev->open = ti_st_open;
hdev->close = ti_st_close;
hdev->flush = NULL;
hdev->send = ti_st_send_frame;
- hdev->destruct = ti_st_destruct;
- hdev->owner = THIS_MODULE;
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 969bb22..049c059 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -83,9 +83,6 @@
static int dtl1_config(struct pcmcia_device *link);
-static void dtl1_release(struct pcmcia_device *link);
-
-static void dtl1_detach(struct pcmcia_device *p_dev);
/* Transmit states */
@@ -367,7 +364,7 @@
static int dtl1_hci_flush(struct hci_dev *hdev)
{
- dtl1_info_t *info = (dtl1_info_t *)(hdev->driver_data);
+ dtl1_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -399,7 +396,7 @@
return -ENODEV;
}
- info = (dtl1_info_t *)(hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -442,11 +439,6 @@
}
-static void dtl1_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -483,18 +475,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = dtl1_hci_open;
hdev->close = dtl1_hci_close;
hdev->flush = dtl1_hci_flush;
hdev->send = dtl1_hci_send_frame;
- hdev->destruct = dtl1_hci_destruct;
hdev->ioctl = dtl1_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
@@ -579,8 +568,8 @@
{
dtl1_info_t *info = link->priv;
- dtl1_release(link);
-
+ dtl1_close(info);
+ pcmcia_disable_device(link);
kfree(info);
}
@@ -619,21 +608,10 @@
return 0;
failed:
- dtl1_release(link);
+ dtl1_detach(link);
return -ENODEV;
}
-
-static void dtl1_release(struct pcmcia_device *link)
-{
- dtl1_info_t *info = link->priv;
-
- dtl1_close(info);
-
- pcmcia_disable_device(link);
-}
-
-
static const struct pcmcia_device_id dtl1_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d),
PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82),
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 4093935..12172a6 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -112,7 +112,7 @@
BT_DBG("hu %p", hu);
- ath = kzalloc(sizeof(*ath), GFP_ATOMIC);
+ ath = kzalloc(sizeof(*ath), GFP_KERNEL);
if (!ath)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index a767d4de..661a8dc 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -692,7 +692,7 @@
BT_DBG("hu %p", hu);
- bcsp = kzalloc(sizeof(*bcsp), GFP_ATOMIC);
+ bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL);
if (!bcsp)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 2fcd8b3..7483294 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -69,7 +69,7 @@
BT_DBG("hu %p", hu);
- h4 = kzalloc(sizeof(*h4), GFP_ATOMIC);
+ h4 = kzalloc(sizeof(*h4), GFP_KERNEL);
if (!h4)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 0711448..fd5adb4 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -48,8 +48,6 @@
#define VERSION "2.2"
-static bool reset = 0;
-
static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
int hci_uart_register_proto(struct hci_uart_proto *p)
@@ -174,7 +172,7 @@
/* Reset device */
static int hci_uart_flush(struct hci_dev *hdev)
{
- struct hci_uart *hu = (struct hci_uart *) hdev->driver_data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
struct tty_struct *tty = hu->tty;
BT_DBG("hdev %p tty %p", hdev, tty);
@@ -220,7 +218,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- hu = (struct hci_uart *) hdev->driver_data;
+ hu = hci_get_drvdata(hdev);
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
@@ -231,15 +229,6 @@
return 0;
}
-static void hci_uart_destruct(struct hci_dev *hdev)
-{
- if (!hdev)
- return;
-
- BT_DBG("%s", hdev->name);
- kfree(hdev->driver_data);
-}
-
/* ------ LDISC part ------ */
/* hci_uart_tty_open
*
@@ -316,6 +305,8 @@
hci_free_dev(hdev);
}
}
+
+ kfree(hu);
}
}
@@ -391,23 +382,25 @@
hu->hdev = hdev;
hdev->bus = HCI_UART;
- hdev->driver_data = hu;
+ hci_set_drvdata(hdev, hu);
hdev->open = hci_uart_open;
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
- hdev->destruct = hci_uart_destruct;
hdev->parent = hu->tty->dev;
- hdev->owner = THIS_MODULE;
-
- if (!reset)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
-
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+
+ if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+ hdev->dev_type = HCI_AMP;
+ else
+ hdev->dev_type = HCI_BREDR;
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
@@ -594,9 +587,6 @@
module_init(hci_uart_init);
module_exit(hci_uart_exit);
-module_param(reset, bool, 0644);
-MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
-
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 7e4b435..b874c0e 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -125,7 +125,7 @@
BT_DBG("hu %p", hu);
- ll = kzalloc(sizeof(*ll), GFP_ATOMIC);
+ ll = kzalloc(sizeof(*ll), GFP_KERNEL);
if (!ll)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 99fb352..6cf6ab22 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -45,6 +45,8 @@
#define HCI_UART_ATH3K 5
#define HCI_UART_RAW_DEVICE 0
+#define HCI_UART_RESET_ON_INIT 1
+#define HCI_UART_CREATE_AMP 2
struct hci_uart;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 2ed6ab1..158bfe5 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -61,7 +61,7 @@
static int vhci_close_dev(struct hci_dev *hdev)
{
- struct vhci_data *data = hdev->driver_data;
+ struct vhci_data *data = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
@@ -73,7 +73,7 @@
static int vhci_flush(struct hci_dev *hdev)
{
- struct vhci_data *data = hdev->driver_data;
+ struct vhci_data *data = hci_get_drvdata(hdev);
skb_queue_purge(&data->readq);
@@ -93,7 +93,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- data = hdev->driver_data;
+ data = hci_get_drvdata(hdev);
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb_queue_tail(&data->readq, skb);
@@ -103,11 +103,6 @@
return 0;
}
-static void vhci_destruct(struct hci_dev *hdev)
-{
- kfree(hdev->driver_data);
-}
-
static inline ssize_t vhci_get_user(struct vhci_data *data,
const char __user *buf, size_t count)
{
@@ -239,7 +234,7 @@
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
if (amp)
hdev->dev_type = HCI_AMP;
@@ -248,9 +243,6 @@
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
- hdev->destruct = vhci_destruct;
-
- hdev->owner = THIS_MODULE;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
@@ -273,6 +265,7 @@
hci_free_dev(hdev);
file->private_data = NULL;
+ kfree(data);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index fbcd848..17ca72c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2362,6 +2362,9 @@
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+ if (rdev->family < CHIP_CAYMAN)
+ sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 2d1f6c5..73e2c7c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -314,6 +314,10 @@
0x00000000, /* VGT_VTX_CNT_EN */
0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
@@ -626,6 +630,10 @@
0x00000000, /* VGT_VTX_CNT_EN */
0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 3ee1fd7..9b23670 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -831,6 +831,7 @@
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
+# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8b3d8ed..8c9a811 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1057,7 +1057,7 @@
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
- if (ASIC_IS_DCE3(rdev)) {
+ if (0) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8c49fef..3d31433 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1078,15 +1078,21 @@
.create_handle = radeon_user_framebuffer_create_handle,
};
-void
+int
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
+ int ret;
rfb->obj = obj;
- drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ if (ret) {
+ rfb->obj = NULL;
+ return ret;
+ }
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+ return 0;
}
static struct drm_framebuffer *
@@ -1096,6 +1102,7 @@
{
struct drm_gem_object *obj;
struct radeon_framebuffer *radeon_fb;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
@@ -1108,7 +1115,12 @@
if (radeon_fb == NULL)
return ERR_PTR(-ENOMEM);
- radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(radeon_fb);
+ drm_gem_object_unreference_unlocked(obj);
+ return NULL;
+ }
return &radeon_fb->base;
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 9419c51..26e9270 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -307,8 +307,6 @@
bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -326,7 +324,7 @@
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
@@ -348,7 +346,7 @@
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cf2bf35..195471c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -209,6 +209,11 @@
sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon object %d\n", ret);
+ return ret;
+ }
+
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
@@ -220,7 +225,11 @@
info->par = rfbdev;
- radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ if (ret) {
+ DRM_ERROR("failed to initalise framebuffer %d\n", ret);
+ goto out_unref;
+ }
fb = &rfbdev->rfb.base;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4330e32..8a85598 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -649,7 +649,7 @@
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-void radeon_framebuffer_init(struct drm_device *dev,
+int radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0226040..dad895f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -497,8 +497,9 @@
If you say yes here, you get support for JEDEC JC42.4 compliant
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
- to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
+ to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
+ MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
+ STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
This driver can also be built as a module. If so, the module
will be called jc42.
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 28c09ee..b927ee5c 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -64,6 +64,7 @@
/* Manufacturer IDs */
#define ADT_MANID 0x11d4 /* Analog Devices */
+#define ATMEL_MANID 0x001f /* Atmel */
#define MAX_MANID 0x004d /* Maxim */
#define IDT_MANID 0x00b3 /* IDT */
#define MCP_MANID 0x0054 /* Microchip */
@@ -77,15 +78,25 @@
#define ADT7408_DEVID 0x0801
#define ADT7408_DEVID_MASK 0xffff
+/* Atmel */
+#define AT30TS00_DEVID 0x8201
+#define AT30TS00_DEVID_MASK 0xffff
+
/* IDT */
#define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */
#define TS3000B3_DEVID_MASK 0xffff
+#define TS3000GB2_DEVID 0x2912 /* Also matches TSE2002GB2 */
+#define TS3000GB2_DEVID_MASK 0xffff
+
/* Maxim */
#define MAX6604_DEVID 0x3e00
#define MAX6604_DEVID_MASK 0xffff
/* Microchip */
+#define MCP9804_DEVID 0x0200
+#define MCP9804_DEVID_MASK 0xfffc
+
#define MCP98242_DEVID 0x2000
#define MCP98242_DEVID_MASK 0xfffc
@@ -113,6 +124,12 @@
#define STTS424E_DEVID 0x0000
#define STTS424E_DEVID_MASK 0xfffe
+#define STTS2002_DEVID 0x0300
+#define STTS2002_DEVID_MASK 0xffff
+
+#define STTS3000_DEVID 0x0200
+#define STTS3000_DEVID_MASK 0xffff
+
static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
struct jc42_chips {
@@ -123,8 +140,11 @@
static struct jc42_chips jc42_chips[] = {
{ ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
+ { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
{ IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK },
+ { IDT_MANID, TS3000GB2_DEVID, TS3000GB2_DEVID_MASK },
{ MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
+ { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
@@ -133,6 +153,8 @@
{ NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
{ STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
{ STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
+ { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK },
+ { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
};
/* Each client has this additional data */
@@ -159,10 +181,12 @@
static const struct i2c_device_id jc42_id[] = {
{ "adt7408", 0 },
+ { "at30ts00", 0 },
{ "cat94ts02", 0 },
{ "cat6095", 0 },
{ "jc42", 0 },
{ "max6604", 0 },
+ { "mcp9804", 0 },
{ "mcp9805", 0 },
{ "mcp98242", 0 },
{ "mcp98243", 0 },
@@ -171,8 +195,10 @@
{ "se97b", 0 },
{ "se98", 0 },
{ "stts424", 0 },
- { "tse2002b3", 0 },
- { "ts3000b3", 0 },
+ { "stts2002", 0 },
+ { "stts3000", 0 },
+ { "tse2002", 0 },
+ { "ts3000", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 00460d8..d89b339 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -54,7 +54,8 @@
lcrit_alarm, crit_alarm */
#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
+ */
#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
lcrit_alarm, crit_alarm */
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 48c7b4a..880b90c 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -33,6 +33,7 @@
struct zl6100_data {
int id;
ktime_t access; /* chip access time */
+ int delay; /* Delay between chip accesses in uS */
struct pmbus_driver_info info;
};
@@ -52,10 +53,10 @@
/* Some chips need a delay between accesses */
static inline void zl6100_wait(const struct zl6100_data *data)
{
- if (delay) {
+ if (data->delay) {
s64 delta = ktime_us_delta(ktime_get(), data->access);
- if (delta < delay)
- udelay(delay - delta);
+ if (delta < data->delay)
+ udelay(data->delay - delta);
}
}
@@ -207,8 +208,9 @@
* can be cleared later for additional chips if tests show that it
* is not needed (in other words, better be safe than sorry).
*/
+ data->delay = delay;
if (data->id == zl2004 || data->id == zl6105)
- delay = 0;
+ data->delay = 0;
/*
* Since there was a direct I2C device access above, wait before
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index afc166f..7df5bfe 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -332,7 +332,7 @@
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
- int retval;
+ int retval = 0;
if (count < input_event_size())
return -EINVAL;
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 3765137..f3bc418 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -172,7 +172,7 @@
}
/*** Module ***/
-#if CONFIG_PM
+#if CONFIG_PM_SLEEP
static int twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -189,10 +189,10 @@
vibra_disable_leds();
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
-#endif
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
@@ -273,9 +273,7 @@
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &twl4030_vibra_pm_ops,
-#endif
},
};
module_platform_driver(twl4030_vibra_driver);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index bd87380..4c6a72d 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -952,7 +952,9 @@
/*
* First try "E6 report".
- * ALPS should return 0,0,10 or 0,0,100
+ * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+ * The bits 0-2 of the first byte will be 1s if some buttons are
+ * pressed.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
@@ -968,7 +970,8 @@
psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
param[0], param[1], param[2]);
- if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
+ if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
+ (param[2] != 10 && param[2] != 100))
return NULL;
/*
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index 58a8775..e53f408 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -77,6 +77,8 @@
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_ARCH_HAS_HCD
select USB
+ select NEW_LEDS
+ select LEDS_CLASS
help
Say Y here if you want to use the USB version of the Wacom Intuos
or Graphire tablet. Make sure to say Y to "Mouse support"
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 88672ec..cd3ed29 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -926,7 +926,7 @@
{
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
- int count = data[1] & 0x03;
+ int count = data[1] & 0x07;
int i;
if (data[0] != 0x02)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bdea288..a35e98a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -275,7 +275,7 @@
}
/* Programs the physical address of the device table into the IOMMU hardware */
-static void __init iommu_set_device_table(struct amd_iommu *iommu)
+static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 9fb18c1..b280c43 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -323,7 +323,7 @@
* Corrupt successful READs while in down state.
* If flags were specified, only corrupt those that match.
*/
- if (!error && bio_submitted_while_down &&
+ if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ad2eba4..ea5dd28 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -296,6 +296,8 @@
unsigned offset;
unsigned num_bvecs;
sector_t remaining = where->count;
+ struct request_queue *q = bdev_get_queue(where->bdev);
+ sector_t discard_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
@@ -305,9 +307,12 @@
/*
* Allocate a suitably sized-bio.
*/
- num_bvecs = dm_sector_div_up(remaining,
- (PAGE_SIZE >> SECTOR_SHIFT));
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
+ if (rw & REQ_DISCARD)
+ num_bvecs = 1;
+ else
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+ dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
@@ -315,10 +320,14 @@
bio->bi_destructor = dm_bio_destructor;
store_io_and_region_in_bio(bio, io, region);
- /*
- * Try and add as many pages as possible.
- */
- while (remaining) {
+ if (rw & REQ_DISCARD) {
+ discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = discard_sectors << SECTOR_SHIFT;
+ remaining -= discard_sectors;
+ } else while (remaining) {
+ /*
+ * Try and add as many pages as possible.
+ */
dp->get_page(dp, &page, &len, &offset);
len = min(len, to_bytes(remaining));
if (!bio_add_page(bio, page, len, offset))
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 31c2dc2..1ce84ed 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1437,7 +1437,7 @@
if (!argc) {
DMWARN("Empty message received.");
- goto out;
+ goto out_argv;
}
table = dm_get_live_table(md);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 86cb7e5..787022c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -668,7 +668,14 @@
return ret;
sb = page_address(rdev->sb_page);
- if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
+
+ /*
+ * Two cases that we want to write new superblocks and rebuild:
+ * 1) New device (no matching magic number)
+ * 2) Device specified for rebuild (!In_sync w/ offset == 0)
+ */
+ if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
+ (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
super_sync(rdev->mddev, rdev);
set_bit(FirstUse, &rdev->flags);
@@ -745,11 +752,8 @@
*/
rdev_for_each(r, t, mddev) {
if (!test_bit(In_sync, &r->flags)) {
- if (!test_bit(FirstUse, &r->flags))
- DMERR("Superblock area of "
- "rebuild device %d should have been "
- "cleared.", r->raid_disk);
- set_bit(FirstUse, &r->flags);
+ DMINFO("Device %d specified for rebuild: "
+ "Clearing superblock", r->raid_disk);
rebuilds++;
} else if (test_bit(FirstUse, &r->flags))
new_devs++;
@@ -971,6 +975,7 @@
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
+ ti->num_flush_requests = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 59c4f04..237571a 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -385,6 +385,7 @@
data_sm = dm_sm_disk_create(tm, nr_blocks);
if (IS_ERR(data_sm)) {
DMERR("sm_disk_create failed");
+ dm_tm_unlock(tm, sblock);
r = PTR_ERR(data_sm);
goto bad;
}
@@ -789,6 +790,11 @@
return 0;
}
+/*
+ * __open_device: Returns @td corresponding to device with id @dev,
+ * creating it if @create is set and incrementing @td->open_count.
+ * On failure, @td is undefined.
+ */
static int __open_device(struct dm_pool_metadata *pmd,
dm_thin_id dev, int create,
struct dm_thin_device **td)
@@ -799,10 +805,16 @@
struct disk_device_details details_le;
/*
- * Check the device isn't already open.
+ * If the device is already open, return it.
*/
list_for_each_entry(td2, &pmd->thin_devices, list)
if (td2->id == dev) {
+ /*
+ * May not create an already-open device.
+ */
+ if (create)
+ return -EEXIST;
+
td2->open_count++;
*td = td2;
return 0;
@@ -817,6 +829,9 @@
if (r != -ENODATA || !create)
return r;
+ /*
+ * Create new device.
+ */
changed = 1;
details_le.mapped_blocks = 0;
details_le.transaction_id = cpu_to_le64(pmd->trans_id);
@@ -882,12 +897,10 @@
r = __open_device(pmd, dev, 1, &td);
if (r) {
- __close_device(td);
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
- td->changed = 1;
__close_device(td);
return r;
@@ -967,14 +980,14 @@
goto bad;
r = __set_snapshot_details(pmd, td, origin, pmd->time);
+ __close_device(td);
+
if (r)
goto bad;
- __close_device(td);
return 0;
bad:
- __close_device(td);
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
@@ -1211,6 +1224,8 @@
if (r)
return r;
+ td->mapped_blocks--;
+ td->changed = 1;
pmd->need_commit = 1;
return 0;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index c8afd62..9a66e2a 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1031,7 +1031,7 @@
dev->netdev_ops = &cfhsi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
+ dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
skb_queue_head_init(&cfhsi->qhead);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 3474a61..c63a64c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -126,6 +126,7 @@
source "drivers/net/ethernet/nuvoton/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
+source "drivers/net/ethernet/nxp/Kconfig"
source "drivers/net/ethernet/octeon/Kconfig"
source "drivers/net/ethernet/oki-semi/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 08d5f03..9676a51 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -47,6 +47,7 @@
obj-$(CONFIG_NET_NETX) += netx-eth.o
obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
+obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0f21a9b..1ef0c927 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1711,7 +1711,7 @@
"atl1c hardware error (status = 0x%x)\n",
status & ISR_ERROR);
/* reset MAC */
- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f82dfff5..c11e50d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2007,6 +2007,15 @@
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
+ /* mark driver is loaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+ DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
/* Wait for all pending SP commands to complete */
if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
@@ -2060,6 +2069,14 @@
int i;
bool global = false;
+ /* mark driver is unloaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 33aa7de..cc02ae5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1143,7 +1143,7 @@
{
struct bnx2x *bp = fp->bp;
u16 ring_prod, cqe_ring_prod;
- int i;
+ int i, failure_cnt = 0;
fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
@@ -1153,18 +1153,17 @@
*/
for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ failure_cnt++;
continue;
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
+ WARN_ON(ring_prod <= (i - failure_cnt));
}
- if (fp->eth_q_stats.rx_skb_alloc_failed)
- BNX2X_ERR("was only able to allocate "
- "%d rx skbs on queue[%d]\n",
- (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
+ if (failure_cnt)
+ BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
+ i - failure_cnt, fp->index);
fp->rx_bd_prod = ring_prod;
/* Limit the CQE producer by the CQE ring size */
@@ -1172,7 +1171,9 @@
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- return i - fp->eth_q_stats.rx_skb_alloc_failed;
+ fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+
+ return i - failure_cnt;
}
/* Statistics ID are global per chip/path, while Client IDs for E1x are per
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 9c24d53..858d1b5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -222,19 +222,22 @@
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
- if ((bp->state == BNX2X_STATE_OPEN) &&
- !(bp->flags & MF_FUNC_DIS) &&
- (bp->link_vars.link_up)) {
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
- cmd->duplex = bp->link_vars.duplex;
- } else {
- ethtool_cmd_speed_set(
- cmd, bp->link_params.req_line_speed[cfg_idx]);
- cmd->duplex = bp->link_params.req_duplex[cfg_idx];
- }
+ if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
+ if (!(bp->flags & MF_FUNC_DIS)) {
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->duplex = bp->link_vars.duplex;
+ } else {
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ }
- if (IS_MF(bp))
- ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+ if (IS_MF(bp) && !BP_NOMCP(bp))
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ }
cmd->port = bnx2x_get_port_type(bp);
@@ -309,6 +312,10 @@
speed = ethtool_cmd_speed(cmd);
+ /* If recieved a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+
if (IS_MF_SI(bp)) {
u32 part;
u32 line_speed = bp->link_vars.line_speed;
@@ -1443,7 +1450,7 @@
BNX2X_FLOW_CTRL_AUTO);
if (!epause->autoneg)
- cfg_reg = bp->link_vars.flow_ctrl;
+ cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
else
cfg_reg = bp->link_params.req_fc_auto_adv;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b4afef6..bf14a08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2800,6 +2800,7 @@
rxq_init->sge_buf_sz = sge_sz;
rxq_init->max_sges_pkt = max_sge;
rxq_init->rss_engine_id = BP_FUNC(bp);
+ rxq_init->mcast_engine_id = BP_FUNC(bp);
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index adfae6b..1999fa5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -611,12 +611,6 @@
return rx_tx_flag;
}
-/* LLH CAM line allocations */
-enum {
- LLH_CAM_ISCSI_ETH_LINE = 0,
- LLH_CAM_ETH_LINE,
- LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
-};
static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
bool add, unsigned char *dev_addr, int index)
@@ -625,7 +619,7 @@
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
NIG_REG_LLH0_FUNC_MEM;
- if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
return;
DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -731,9 +725,10 @@
if (cmd != BNX2X_VLAN_MAC_MOVE) {
if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
bnx2x_set_mac_in_nig(bp, add, mac,
- LLH_CAM_ISCSI_ETH_LINE);
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE);
else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
- bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ BNX2X_LLH_CAM_ETH_LINE);
}
/* Reset the ramrod data buffer for the first rule */
@@ -869,7 +864,7 @@
/* Reset the ramrod data buffer */
memset(config, 0, sizeof(*config));
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
cam_offset, add,
elem->cmd_data.vlan_mac.u.mac.mac, 0,
ETH_VLAN_FILTER_ANY_VLAN, config);
@@ -4478,7 +4473,7 @@
rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
- rx_data->approx_mcast_engine_id = o->func_id;
+ rx_data->approx_mcast_engine_id = params->mcast_engine_id;
rx_data->is_approx_mcast = 1;
}
@@ -5186,13 +5181,6 @@
obj->set_pending = bnx2x_queue_set_pending;
}
-void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
- struct bnx2x_queue_sp_obj *obj,
- u32 cid, u8 index)
-{
- obj->cids[index] = cid;
-}
-
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 685d42e..4ce351b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -423,6 +423,13 @@
int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
};
+enum {
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0,
+ BNX2X_LLH_CAM_ETH_LINE,
+ BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
@@ -890,6 +897,9 @@
u8 max_tpa_queues;
u8 rss_engine_id;
+ /* valid iff BNX2X_Q_FLG_MCAST */
+ u8 mcast_engine_id;
+
u8 cache_line_log;
u8 sb_cq_index;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bc236b6..b065746 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5595,7 +5595,7 @@
}
}
- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
tnapi->tx_cons = sw_idx;
@@ -6971,7 +6971,7 @@
}
skb_tx_timestamp(skb);
- netdev_sent_queue(tp->dev, skb->len);
+ netdev_tx_sent_queue(txq, skb->len);
/* Sync BD data before updating mailbox */
wmb();
@@ -7396,8 +7396,8 @@
dev_kfree_skb_any(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
}
- netdev_reset_queue(tp->dev);
}
/* Initialize tx/rx rings for packet processing.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1d88942..05ff076a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -196,6 +196,8 @@
CH_DEVICE(0x4408, 4),
CH_DEVICE(0x4409, 4),
CH_DEVICE(0x440a, 4),
+ CH_DEVICE(0x440d, 4),
+ CH_DEVICE(0x440e, 4),
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 3f580c0..25e3308 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2890,6 +2890,8 @@
CH_DEVICE(0x4808, 0), /* T420-cx */
CH_DEVICE(0x4809, 0), /* T420-bt */
CH_DEVICE(0x480a, 0), /* T404-bt */
+ CH_DEVICE(0x480d, 0), /* T480-cr */
+ CH_DEVICE(0x480e, 0), /* T440-lp-cr */
{ 0, }
};
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index cf1fb4b..afe9b16 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.38"
+#define DRV_VERSION "2.1.1.39"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9080ed6..77b4e87 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1069,7 +1069,7 @@
if (err)
return err;
- if (is_valid_ether_addr(mac)) {
+ if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
if (vf == PORT_SELF_VF) {
memcpy(pp->vf_mac, mac, ETH_ALEN);
return 0;
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index 9e16f3f..b9773d2 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -29,10 +29,6 @@
To compile this driver as a module, choose M here. The module will
be called ibmveth.
-config ISERIES_VETH
- tristate "iSeries Virtual Ethernet driver support"
- depends on PPC_ISERIES
-
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile
index 5a7d4e9..2f04e71 100644
--- a/drivers/net/ethernet/ibm/Makefile
+++ b/drivers/net/ethernet/ibm/Makefile
@@ -3,6 +3,5 @@
#
obj-$(CONFIG_IBMVETH) += ibmveth.o
-obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
obj-$(CONFIG_IBM_EMAC) += emac/
obj-$(CONFIG_EHEA) += ehea/
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 8b73dd4..3516e17 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -336,7 +336,9 @@
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
- return &port->stats;
+ stats->multicast = port->stats.multicast;
+ stats->rx_errors = port->stats.rx_errors;
+ return stats;
}
static void ehea_update_stats(struct work_struct *work)
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
deleted file mode 100644
index 1cafa65..0000000
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ /dev/null
@@ -1,1708 +0,0 @@
-/* File veth.c created by Kyle A. Lucke on Mon Aug 7 2000. */
-/*
- * IBM eServer iSeries Virtual Ethernet Device Driver
- * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp.
- * Substantially cleaned up by:
- * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
- * Copyright (C) 2004-2005 Michael Ellerman, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- *
- * This module implements the virtual ethernet device for iSeries LPAR
- * Linux. It uses hypervisor message passing to implement an
- * ethernet-like network device communicating between partitions on
- * the iSeries.
- *
- * The iSeries LPAR hypervisor currently allows for up to 16 different
- * virtual ethernets. These are all dynamically configurable on
- * OS/400 partitions, but dynamic configuration is not supported under
- * Linux yet. An ethXX network device will be created for each
- * virtual ethernet this partition is connected to.
- *
- * - This driver is responsible for routing packets to and from other
- * partitions. The MAC addresses used by the virtual ethernets
- * contains meaning and must not be modified.
- *
- * - Having 2 virtual ethernets to the same remote partition DOES NOT
- * double the available bandwidth. The 2 devices will share the
- * available hypervisor bandwidth.
- *
- * - If you send a packet to your own mac address, it will just be
- * dropped, you won't get it on the receive side.
- *
- * - Multicast is implemented by sending the frame frame to every
- * other partition. It is the responsibility of the receiving
- * partition to filter the addresses desired.
- *
- * Tunable parameters:
- *
- * VETH_NUMBUFFERS: This compile time option defaults to 120. It
- * controls how much memory Linux will allocate per remote partition
- * it is communicating with. It can be thought of as the maximum
- * number of packets outstanding to a remote partition at a time.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/ethtool.h>
-#include <linux/if_ether.h>
-#include <linux/slab.h>
-
-#include <asm/abs_addr.h>
-#include <asm/iseries/mf.h>
-#include <asm/uaccess.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iommu.h>
-#include <asm/vio.h>
-
-#undef DEBUG
-
-MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
-MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
-MODULE_LICENSE("GPL");
-
-#define VETH_EVENT_CAP (0)
-#define VETH_EVENT_FRAMES (1)
-#define VETH_EVENT_MONITOR (2)
-#define VETH_EVENT_FRAMES_ACK (3)
-
-#define VETH_MAX_ACKS_PER_MSG (20)
-#define VETH_MAX_FRAMES_PER_MSG (6)
-
-struct veth_frames_data {
- u32 addr[VETH_MAX_FRAMES_PER_MSG];
- u16 len[VETH_MAX_FRAMES_PER_MSG];
- u32 eofmask;
-};
-#define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG)
-
-struct veth_frames_ack_data {
- u16 token[VETH_MAX_ACKS_PER_MSG];
-};
-
-struct veth_cap_data {
- u8 caps_version;
- u8 rsvd1;
- u16 num_buffers;
- u16 ack_threshold;
- u16 rsvd2;
- u32 ack_timeout;
- u32 rsvd3;
- u64 rsvd4[3];
-};
-
-struct veth_lpevent {
- struct HvLpEvent base_event;
- union {
- struct veth_cap_data caps_data;
- struct veth_frames_data frames_data;
- struct veth_frames_ack_data frames_ack_data;
- } u;
-
-};
-
-#define DRV_NAME "iseries_veth"
-#define DRV_VERSION "2.0"
-
-#define VETH_NUMBUFFERS (120)
-#define VETH_ACKTIMEOUT (1000000) /* microseconds */
-#define VETH_MAX_MCAST (12)
-
-#define VETH_MAX_MTU (9000)
-
-#if VETH_NUMBUFFERS < 10
-#define ACK_THRESHOLD (1)
-#elif VETH_NUMBUFFERS < 20
-#define ACK_THRESHOLD (4)
-#elif VETH_NUMBUFFERS < 40
-#define ACK_THRESHOLD (10)
-#else
-#define ACK_THRESHOLD (20)
-#endif
-
-#define VETH_STATE_SHUTDOWN (0x0001)
-#define VETH_STATE_OPEN (0x0002)
-#define VETH_STATE_RESET (0x0004)
-#define VETH_STATE_SENTMON (0x0008)
-#define VETH_STATE_SENTCAPS (0x0010)
-#define VETH_STATE_GOTCAPACK (0x0020)
-#define VETH_STATE_GOTCAPS (0x0040)
-#define VETH_STATE_SENTCAPACK (0x0080)
-#define VETH_STATE_READY (0x0100)
-
-struct veth_msg {
- struct veth_msg *next;
- struct veth_frames_data data;
- int token;
- int in_use;
- struct sk_buff *skb;
- struct device *dev;
-};
-
-struct veth_lpar_connection {
- HvLpIndex remote_lp;
- struct delayed_work statemachine_wq;
- struct veth_msg *msgs;
- int num_events;
- struct veth_cap_data local_caps;
-
- struct kobject kobject;
- struct timer_list ack_timer;
-
- struct timer_list reset_timer;
- unsigned int reset_timeout;
- unsigned long last_contact;
- int outstanding_tx;
-
- spinlock_t lock;
- unsigned long state;
- HvLpInstanceId src_inst;
- HvLpInstanceId dst_inst;
- struct veth_lpevent cap_event, cap_ack_event;
- u16 pending_acks[VETH_MAX_ACKS_PER_MSG];
- u32 num_pending_acks;
-
- int num_ack_events;
- struct veth_cap_data remote_caps;
- u32 ack_timeout;
-
- struct veth_msg *msg_stack_head;
-};
-
-struct veth_port {
- struct device *dev;
- u64 mac_addr;
- HvLpIndexMap lpar_map;
-
- /* queue_lock protects the stopped_map and dev's queue. */
- spinlock_t queue_lock;
- HvLpIndexMap stopped_map;
-
- /* mcast_gate protects promiscuous, num_mcast & mcast_addr. */
- rwlock_t mcast_gate;
- int promiscuous;
- int num_mcast;
- u64 mcast_addr[VETH_MAX_MCAST];
-
- struct kobject kobject;
-};
-
-static HvLpIndex this_lp;
-static struct veth_lpar_connection *veth_cnx[HVMAXARCHITECTEDLPS]; /* = 0 */
-static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */
-
-static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *);
-static void veth_wake_queues(struct veth_lpar_connection *cnx);
-static void veth_stop_queues(struct veth_lpar_connection *cnx);
-static void veth_receive(struct veth_lpar_connection *, struct veth_lpevent *);
-static void veth_release_connection(struct kobject *kobject);
-static void veth_timed_ack(unsigned long ptr);
-static void veth_timed_reset(unsigned long ptr);
-
-/*
- * Utility functions
- */
-
-#define veth_info(fmt, args...) \
- printk(KERN_INFO DRV_NAME ": " fmt, ## args)
-
-#define veth_error(fmt, args...) \
- printk(KERN_ERR DRV_NAME ": Error: " fmt, ## args)
-
-#ifdef DEBUG
-#define veth_debug(fmt, args...) \
- printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
-#else
-#define veth_debug(fmt, args...) do {} while (0)
-#endif
-
-/* You must hold the connection's lock when you call this function. */
-static inline void veth_stack_push(struct veth_lpar_connection *cnx,
- struct veth_msg *msg)
-{
- msg->next = cnx->msg_stack_head;
- cnx->msg_stack_head = msg;
-}
-
-/* You must hold the connection's lock when you call this function. */
-static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx)
-{
- struct veth_msg *msg;
-
- msg = cnx->msg_stack_head;
- if (msg)
- cnx->msg_stack_head = cnx->msg_stack_head->next;
-
- return msg;
-}
-
-/* You must hold the connection's lock when you call this function. */
-static inline int veth_stack_is_empty(struct veth_lpar_connection *cnx)
-{
- return cnx->msg_stack_head == NULL;
-}
-
-static inline HvLpEvent_Rc
-veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype,
- HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype,
- u64 token,
- u64 data1, u64 data2, u64 data3, u64 data4, u64 data5)
-{
- return HvCallEvent_signalLpEventFast(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan,
- subtype, ackind, acktype,
- cnx->src_inst,
- cnx->dst_inst,
- token, data1, data2, data3,
- data4, data5);
-}
-
-static inline HvLpEvent_Rc veth_signaldata(struct veth_lpar_connection *cnx,
- u16 subtype, u64 token, void *data)
-{
- u64 *p = (u64 *) data;
-
- return veth_signalevent(cnx, subtype, HvLpEvent_AckInd_NoAck,
- HvLpEvent_AckType_ImmediateAck,
- token, p[0], p[1], p[2], p[3], p[4]);
-}
-
-struct veth_allocation {
- struct completion c;
- int num;
-};
-
-static void veth_complete_allocation(void *parm, int number)
-{
- struct veth_allocation *vc = (struct veth_allocation *)parm;
-
- vc->num = number;
- complete(&vc->c);
-}
-
-static int veth_allocate_events(HvLpIndex rlp, int number)
-{
- struct veth_allocation vc =
- { COMPLETION_INITIALIZER_ONSTACK(vc.c), 0 };
-
- mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan,
- sizeof(struct veth_lpevent), number,
- &veth_complete_allocation, &vc);
- wait_for_completion(&vc.c);
-
- return vc.num;
-}
-
-/*
- * sysfs support
- */
-
-struct veth_cnx_attribute {
- struct attribute attr;
- ssize_t (*show)(struct veth_lpar_connection *, char *buf);
- ssize_t (*store)(struct veth_lpar_connection *, const char *buf);
-};
-
-static ssize_t veth_cnx_attribute_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct veth_cnx_attribute *cnx_attr;
- struct veth_lpar_connection *cnx;
-
- cnx_attr = container_of(attr, struct veth_cnx_attribute, attr);
- cnx = container_of(kobj, struct veth_lpar_connection, kobject);
-
- if (!cnx_attr->show)
- return -EIO;
-
- return cnx_attr->show(cnx, buf);
-}
-
-#define CUSTOM_CNX_ATTR(_name, _format, _expression) \
-static ssize_t _name##_show(struct veth_lpar_connection *cnx, char *buf)\
-{ \
- return sprintf(buf, _format, _expression); \
-} \
-struct veth_cnx_attribute veth_cnx_attr_##_name = __ATTR_RO(_name)
-
-#define SIMPLE_CNX_ATTR(_name) \
- CUSTOM_CNX_ATTR(_name, "%lu\n", (unsigned long)cnx->_name)
-
-SIMPLE_CNX_ATTR(outstanding_tx);
-SIMPLE_CNX_ATTR(remote_lp);
-SIMPLE_CNX_ATTR(num_events);
-SIMPLE_CNX_ATTR(src_inst);
-SIMPLE_CNX_ATTR(dst_inst);
-SIMPLE_CNX_ATTR(num_pending_acks);
-SIMPLE_CNX_ATTR(num_ack_events);
-CUSTOM_CNX_ATTR(ack_timeout, "%d\n", jiffies_to_msecs(cnx->ack_timeout));
-CUSTOM_CNX_ATTR(reset_timeout, "%d\n", jiffies_to_msecs(cnx->reset_timeout));
-CUSTOM_CNX_ATTR(state, "0x%.4lX\n", cnx->state);
-CUSTOM_CNX_ATTR(last_contact, "%d\n", cnx->last_contact ?
- jiffies_to_msecs(jiffies - cnx->last_contact) : 0);
-
-#define GET_CNX_ATTR(_name) (&veth_cnx_attr_##_name.attr)
-
-static struct attribute *veth_cnx_default_attrs[] = {
- GET_CNX_ATTR(outstanding_tx),
- GET_CNX_ATTR(remote_lp),
- GET_CNX_ATTR(num_events),
- GET_CNX_ATTR(reset_timeout),
- GET_CNX_ATTR(last_contact),
- GET_CNX_ATTR(state),
- GET_CNX_ATTR(src_inst),
- GET_CNX_ATTR(dst_inst),
- GET_CNX_ATTR(num_pending_acks),
- GET_CNX_ATTR(num_ack_events),
- GET_CNX_ATTR(ack_timeout),
- NULL
-};
-
-static const struct sysfs_ops veth_cnx_sysfs_ops = {
- .show = veth_cnx_attribute_show
-};
-
-static struct kobj_type veth_lpar_connection_ktype = {
- .release = veth_release_connection,
- .sysfs_ops = &veth_cnx_sysfs_ops,
- .default_attrs = veth_cnx_default_attrs
-};
-
-struct veth_port_attribute {
- struct attribute attr;
- ssize_t (*show)(struct veth_port *, char *buf);
- ssize_t (*store)(struct veth_port *, const char *buf);
-};
-
-static ssize_t veth_port_attribute_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct veth_port_attribute *port_attr;
- struct veth_port *port;
-
- port_attr = container_of(attr, struct veth_port_attribute, attr);
- port = container_of(kobj, struct veth_port, kobject);
-
- if (!port_attr->show)
- return -EIO;
-
- return port_attr->show(port, buf);
-}
-
-#define CUSTOM_PORT_ATTR(_name, _format, _expression) \
-static ssize_t _name##_show(struct veth_port *port, char *buf) \
-{ \
- return sprintf(buf, _format, _expression); \
-} \
-struct veth_port_attribute veth_port_attr_##_name = __ATTR_RO(_name)
-
-#define SIMPLE_PORT_ATTR(_name) \
- CUSTOM_PORT_ATTR(_name, "%lu\n", (unsigned long)port->_name)
-
-SIMPLE_PORT_ATTR(promiscuous);
-SIMPLE_PORT_ATTR(num_mcast);
-CUSTOM_PORT_ATTR(lpar_map, "0x%X\n", port->lpar_map);
-CUSTOM_PORT_ATTR(stopped_map, "0x%X\n", port->stopped_map);
-CUSTOM_PORT_ATTR(mac_addr, "0x%llX\n", port->mac_addr);
-
-#define GET_PORT_ATTR(_name) (&veth_port_attr_##_name.attr)
-static struct attribute *veth_port_default_attrs[] = {
- GET_PORT_ATTR(mac_addr),
- GET_PORT_ATTR(lpar_map),
- GET_PORT_ATTR(stopped_map),
- GET_PORT_ATTR(promiscuous),
- GET_PORT_ATTR(num_mcast),
- NULL
-};
-
-static const struct sysfs_ops veth_port_sysfs_ops = {
- .show = veth_port_attribute_show
-};
-
-static struct kobj_type veth_port_ktype = {
- .sysfs_ops = &veth_port_sysfs_ops,
- .default_attrs = veth_port_default_attrs
-};
-
-/*
- * LPAR connection code
- */
-
-static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
-{
- schedule_delayed_work(&cnx->statemachine_wq, 0);
-}
-
-static void veth_take_cap(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnx->lock, flags);
- /* Receiving caps may mean the other end has just come up, so
- * we need to reload the instance ID of the far end */
- cnx->dst_inst =
- HvCallEvent_getTargetLpInstanceId(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan);
-
- if (cnx->state & VETH_STATE_GOTCAPS) {
- veth_error("Received a second capabilities from LPAR %d.\n",
- cnx->remote_lp);
- event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable;
- HvCallEvent_ackLpEvent((struct HvLpEvent *) event);
- } else {
- memcpy(&cnx->cap_event, event, sizeof(cnx->cap_event));
- cnx->state |= VETH_STATE_GOTCAPS;
- veth_kick_statemachine(cnx);
- }
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnx->lock, flags);
- if (cnx->state & VETH_STATE_GOTCAPACK) {
- veth_error("Received a second capabilities ack from LPAR %d.\n",
- cnx->remote_lp);
- } else {
- memcpy(&cnx->cap_ack_event, event,
- sizeof(cnx->cap_ack_event));
- cnx->state |= VETH_STATE_GOTCAPACK;
- veth_kick_statemachine(cnx);
- }
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_take_monitor_ack(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnx->lock, flags);
- veth_debug("cnx %d: lost connection.\n", cnx->remote_lp);
-
- /* Avoid kicking the statemachine once we're shutdown.
- * It's unnecessary and it could break veth_stop_connection(). */
-
- if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
- cnx->state |= VETH_STATE_RESET;
- veth_kick_statemachine(cnx);
- }
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_handle_ack(struct veth_lpevent *event)
-{
- HvLpIndex rlp = event->base_event.xTargetLp;
- struct veth_lpar_connection *cnx = veth_cnx[rlp];
-
- BUG_ON(! cnx);
-
- switch (event->base_event.xSubtype) {
- case VETH_EVENT_CAP:
- veth_take_cap_ack(cnx, event);
- break;
- case VETH_EVENT_MONITOR:
- veth_take_monitor_ack(cnx, event);
- break;
- default:
- veth_error("Unknown ack type %d from LPAR %d.\n",
- event->base_event.xSubtype, rlp);
- }
-}
-
-static void veth_handle_int(struct veth_lpevent *event)
-{
- HvLpIndex rlp = event->base_event.xSourceLp;
- struct veth_lpar_connection *cnx = veth_cnx[rlp];
- unsigned long flags;
- int i, acked = 0;
-
- BUG_ON(! cnx);
-
- switch (event->base_event.xSubtype) {
- case VETH_EVENT_CAP:
- veth_take_cap(cnx, event);
- break;
- case VETH_EVENT_MONITOR:
- /* do nothing... this'll hang out here til we're dead,
- * and the hypervisor will return it for us. */
- break;
- case VETH_EVENT_FRAMES_ACK:
- spin_lock_irqsave(&cnx->lock, flags);
-
- for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) {
- u16 msgnum = event->u.frames_ack_data.token[i];
-
- if (msgnum < VETH_NUMBUFFERS) {
- veth_recycle_msg(cnx, cnx->msgs + msgnum);
- cnx->outstanding_tx--;
- acked++;
- }
- }
-
- if (acked > 0) {
- cnx->last_contact = jiffies;
- veth_wake_queues(cnx);
- }
-
- spin_unlock_irqrestore(&cnx->lock, flags);
- break;
- case VETH_EVENT_FRAMES:
- veth_receive(cnx, event);
- break;
- default:
- veth_error("Unknown interrupt type %d from LPAR %d.\n",
- event->base_event.xSubtype, rlp);
- }
-}
-
-static void veth_handle_event(struct HvLpEvent *event)
-{
- struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
-
- if (hvlpevent_is_ack(event))
- veth_handle_ack(veth_event);
- else
- veth_handle_int(veth_event);
-}
-
-static int veth_process_caps(struct veth_lpar_connection *cnx)
-{
- struct veth_cap_data *remote_caps = &cnx->remote_caps;
- int num_acks_needed;
-
- /* Convert timer to jiffies */
- cnx->ack_timeout = remote_caps->ack_timeout * HZ / 1000000;
-
- if ( (remote_caps->num_buffers == 0) ||
- (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) ||
- (remote_caps->ack_threshold == 0) ||
- (cnx->ack_timeout == 0) ) {
- veth_error("Received incompatible capabilities from LPAR %d.\n",
- cnx->remote_lp);
- return HvLpEvent_Rc_InvalidSubtypeData;
- }
-
- num_acks_needed = (remote_caps->num_buffers
- / remote_caps->ack_threshold) + 1;
-
- /* FIXME: locking on num_ack_events? */
- if (cnx->num_ack_events < num_acks_needed) {
- int num;
-
- num = veth_allocate_events(cnx->remote_lp,
- num_acks_needed-cnx->num_ack_events);
- if (num > 0)
- cnx->num_ack_events += num;
-
- if (cnx->num_ack_events < num_acks_needed) {
- veth_error("Couldn't allocate enough ack events "
- "for LPAR %d.\n", cnx->remote_lp);
-
- return HvLpEvent_Rc_BufferNotAvailable;
- }
- }
-
-
- return HvLpEvent_Rc_Good;
-}
-
-/* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(struct work_struct *work)
-{
- struct veth_lpar_connection *cnx =
- container_of(work, struct veth_lpar_connection,
- statemachine_wq.work);
- int rlp = cnx->remote_lp;
- int rc;
-
- spin_lock_irq(&cnx->lock);
-
- restart:
- if (cnx->state & VETH_STATE_RESET) {
- if (cnx->state & VETH_STATE_OPEN)
- HvCallEvent_closeLpEventPath(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan);
-
- /*
- * Reset ack data. This prevents the ack_timer actually
- * doing anything, even if it runs one more time when
- * we drop the lock below.
- */
- memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
- cnx->num_pending_acks = 0;
-
- cnx->state &= ~(VETH_STATE_RESET | VETH_STATE_SENTMON
- | VETH_STATE_OPEN | VETH_STATE_SENTCAPS
- | VETH_STATE_GOTCAPACK | VETH_STATE_GOTCAPS
- | VETH_STATE_SENTCAPACK | VETH_STATE_READY);
-
- /* Clean up any leftover messages */
- if (cnx->msgs) {
- int i;
- for (i = 0; i < VETH_NUMBUFFERS; ++i)
- veth_recycle_msg(cnx, cnx->msgs + i);
- }
-
- cnx->outstanding_tx = 0;
- veth_wake_queues(cnx);
-
- /* Drop the lock so we can do stuff that might sleep or
- * take other locks. */
- spin_unlock_irq(&cnx->lock);
-
- del_timer_sync(&cnx->ack_timer);
- del_timer_sync(&cnx->reset_timer);
-
- spin_lock_irq(&cnx->lock);
-
- if (cnx->state & VETH_STATE_RESET)
- goto restart;
-
- /* Hack, wait for the other end to reset itself. */
- if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
- schedule_delayed_work(&cnx->statemachine_wq, 5 * HZ);
- goto out;
- }
- }
-
- if (cnx->state & VETH_STATE_SHUTDOWN)
- /* It's all over, do nothing */
- goto out;
-
- if ( !(cnx->state & VETH_STATE_OPEN) ) {
- if (! cnx->msgs || (cnx->num_events < (2 + VETH_NUMBUFFERS)) )
- goto cant_cope;
-
- HvCallEvent_openLpEventPath(rlp, HvLpEvent_Type_VirtualLan);
- cnx->src_inst =
- HvCallEvent_getSourceLpInstanceId(rlp,
- HvLpEvent_Type_VirtualLan);
- cnx->dst_inst =
- HvCallEvent_getTargetLpInstanceId(rlp,
- HvLpEvent_Type_VirtualLan);
- cnx->state |= VETH_STATE_OPEN;
- }
-
- if ( (cnx->state & VETH_STATE_OPEN) &&
- !(cnx->state & VETH_STATE_SENTMON) ) {
- rc = veth_signalevent(cnx, VETH_EVENT_MONITOR,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_DeferredAck,
- 0, 0, 0, 0, 0, 0);
-
- if (rc == HvLpEvent_Rc_Good) {
- cnx->state |= VETH_STATE_SENTMON;
- } else {
- if ( (rc != HvLpEvent_Rc_PartitionDead) &&
- (rc != HvLpEvent_Rc_PathClosed) )
- veth_error("Error sending monitor to LPAR %d, "
- "rc = %d\n", rlp, rc);
-
- /* Oh well, hope we get a cap from the other
- * end and do better when that kicks us */
- goto out;
- }
- }
-
- if ( (cnx->state & VETH_STATE_OPEN) &&
- !(cnx->state & VETH_STATE_SENTCAPS)) {
- u64 *rawcap = (u64 *)&cnx->local_caps;
-
- rc = veth_signalevent(cnx, VETH_EVENT_CAP,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- 0, rawcap[0], rawcap[1], rawcap[2],
- rawcap[3], rawcap[4]);
-
- if (rc == HvLpEvent_Rc_Good) {
- cnx->state |= VETH_STATE_SENTCAPS;
- } else {
- if ( (rc != HvLpEvent_Rc_PartitionDead) &&
- (rc != HvLpEvent_Rc_PathClosed) )
- veth_error("Error sending caps to LPAR %d, "
- "rc = %d\n", rlp, rc);
-
- /* Oh well, hope we get a cap from the other
- * end and do better when that kicks us */
- goto out;
- }
- }
-
- if ((cnx->state & VETH_STATE_GOTCAPS) &&
- !(cnx->state & VETH_STATE_SENTCAPACK)) {
- struct veth_cap_data *remote_caps = &cnx->remote_caps;
-
- memcpy(remote_caps, &cnx->cap_event.u.caps_data,
- sizeof(*remote_caps));
-
- spin_unlock_irq(&cnx->lock);
- rc = veth_process_caps(cnx);
- spin_lock_irq(&cnx->lock);
-
- /* We dropped the lock, so recheck for anything which
- * might mess us up */
- if (cnx->state & (VETH_STATE_RESET|VETH_STATE_SHUTDOWN))
- goto restart;
-
- cnx->cap_event.base_event.xRc = rc;
- HvCallEvent_ackLpEvent((struct HvLpEvent *)&cnx->cap_event);
- if (rc == HvLpEvent_Rc_Good)
- cnx->state |= VETH_STATE_SENTCAPACK;
- else
- goto cant_cope;
- }
-
- if ((cnx->state & VETH_STATE_GOTCAPACK) &&
- (cnx->state & VETH_STATE_GOTCAPS) &&
- !(cnx->state & VETH_STATE_READY)) {
- if (cnx->cap_ack_event.base_event.xRc == HvLpEvent_Rc_Good) {
- /* Start the ACK timer */
- cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
- add_timer(&cnx->ack_timer);
- cnx->state |= VETH_STATE_READY;
- } else {
- veth_error("Caps rejected by LPAR %d, rc = %d\n",
- rlp, cnx->cap_ack_event.base_event.xRc);
- goto cant_cope;
- }
- }
-
- out:
- spin_unlock_irq(&cnx->lock);
- return;
-
- cant_cope:
- /* FIXME: we get here if something happens we really can't
- * cope with. The link will never work once we get here, and
- * all we can do is not lock the rest of the system up */
- veth_error("Unrecoverable error on connection to LPAR %d, shutting down"
- " (state = 0x%04lx)\n", rlp, cnx->state);
- cnx->state |= VETH_STATE_SHUTDOWN;
- spin_unlock_irq(&cnx->lock);
-}
-
-static int veth_init_connection(u8 rlp)
-{
- struct veth_lpar_connection *cnx;
- struct veth_msg *msgs;
- int i;
-
- if ( (rlp == this_lp) ||
- ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
- return 0;
-
- cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
- if (! cnx)
- return -ENOMEM;
-
- cnx->remote_lp = rlp;
- spin_lock_init(&cnx->lock);
- INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
-
- init_timer(&cnx->ack_timer);
- cnx->ack_timer.function = veth_timed_ack;
- cnx->ack_timer.data = (unsigned long) cnx;
-
- init_timer(&cnx->reset_timer);
- cnx->reset_timer.function = veth_timed_reset;
- cnx->reset_timer.data = (unsigned long) cnx;
- cnx->reset_timeout = 5 * HZ * (VETH_ACKTIMEOUT / 1000000);
-
- memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
-
- veth_cnx[rlp] = cnx;
-
- /* This gets us 1 reference, which is held on behalf of the driver
- * infrastructure. It's released at module unload. */
- kobject_init(&cnx->kobject, &veth_lpar_connection_ktype);
-
- msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL);
- if (! msgs) {
- veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
- return -ENOMEM;
- }
-
- cnx->msgs = msgs;
-
- for (i = 0; i < VETH_NUMBUFFERS; i++) {
- msgs[i].token = i;
- veth_stack_push(cnx, msgs + i);
- }
-
- cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS);
-
- if (cnx->num_events < (2 + VETH_NUMBUFFERS)) {
- veth_error("Can't allocate enough events for LPAR %d.\n", rlp);
- return -ENOMEM;
- }
-
- cnx->local_caps.num_buffers = VETH_NUMBUFFERS;
- cnx->local_caps.ack_threshold = ACK_THRESHOLD;
- cnx->local_caps.ack_timeout = VETH_ACKTIMEOUT;
-
- return 0;
-}
-
-static void veth_stop_connection(struct veth_lpar_connection *cnx)
-{
- if (!cnx)
- return;
-
- spin_lock_irq(&cnx->lock);
- cnx->state |= VETH_STATE_RESET | VETH_STATE_SHUTDOWN;
- veth_kick_statemachine(cnx);
- spin_unlock_irq(&cnx->lock);
-
- /* ensure the statemachine runs now and waits for its completion */
- flush_delayed_work_sync(&cnx->statemachine_wq);
-}
-
-static void veth_destroy_connection(struct veth_lpar_connection *cnx)
-{
- if (!cnx)
- return;
-
- if (cnx->num_events > 0)
- mf_deallocate_lp_events(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan,
- cnx->num_events,
- NULL, NULL);
- if (cnx->num_ack_events > 0)
- mf_deallocate_lp_events(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan,
- cnx->num_ack_events,
- NULL, NULL);
-
- kfree(cnx->msgs);
- veth_cnx[cnx->remote_lp] = NULL;
- kfree(cnx);
-}
-
-static void veth_release_connection(struct kobject *kobj)
-{
- struct veth_lpar_connection *cnx;
- cnx = container_of(kobj, struct veth_lpar_connection, kobject);
- veth_stop_connection(cnx);
- veth_destroy_connection(cnx);
-}
-
-/*
- * net_device code
- */
-
-static int veth_open(struct net_device *dev)
-{
- netif_start_queue(dev);
- return 0;
-}
-
-static int veth_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static int veth_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
-static void veth_set_multicast_list(struct net_device *dev)
-{
- struct veth_port *port = netdev_priv(dev);
- unsigned long flags;
-
- write_lock_irqsave(&port->mcast_gate, flags);
-
- if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
- port->promiscuous = 1;
- } else {
- struct netdev_hw_addr *ha;
-
- port->promiscuous = 0;
-
- /* Update table */
- port->num_mcast = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- u8 *addr = ha->addr;
- u64 xaddr = 0;
-
- memcpy(&xaddr, addr, ETH_ALEN);
- port->mcast_addr[port->num_mcast] = xaddr;
- port->num_mcast++;
- }
- }
-
- write_unlock_irqrestore(&port->mcast_gate, flags);
-}
-
-static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
- info->driver[sizeof(info->driver) - 1] = '\0';
- strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
- info->version[sizeof(info->version) - 1] = '\0';
-}
-
-static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- ecmd->supported = (SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- ecmd->advertising = (SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = 0;
- ecmd->speed = SPEED_1000;
- ecmd->duplex = DUPLEX_FULL;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->maxtxpkt = 120;
- ecmd->maxrxpkt = 120;
- return 0;
-}
-
-static const struct ethtool_ops ops = {
- .get_drvinfo = veth_get_drvinfo,
- .get_settings = veth_get_settings,
- .get_link = ethtool_op_get_link,
-};
-
-static const struct net_device_ops veth_netdev_ops = {
- .ndo_open = veth_open,
- .ndo_stop = veth_close,
- .ndo_start_xmit = veth_start_xmit,
- .ndo_change_mtu = veth_change_mtu,
- .ndo_set_rx_mode = veth_set_multicast_list,
- .ndo_set_mac_address = NULL,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static struct net_device *veth_probe_one(int vlan,
- struct vio_dev *vio_dev)
-{
- struct net_device *dev;
- struct veth_port *port;
- struct device *vdev = &vio_dev->dev;
- int i, rc;
- const unsigned char *mac_addr;
-
- mac_addr = vio_get_attribute(vio_dev, "local-mac-address", NULL);
- if (mac_addr == NULL)
- mac_addr = vio_get_attribute(vio_dev, "mac-address", NULL);
- if (mac_addr == NULL) {
- veth_error("Unable to fetch MAC address from device tree.\n");
- return NULL;
- }
-
- dev = alloc_etherdev(sizeof (struct veth_port));
- if (!dev)
- return NULL;
-
- port = netdev_priv(dev);
-
- spin_lock_init(&port->queue_lock);
- rwlock_init(&port->mcast_gate);
- port->stopped_map = 0;
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- HvLpVirtualLanIndexMap map;
-
- if (i == this_lp)
- continue;
- map = HvLpConfig_getVirtualLanIndexMapForLp(i);
- if (map & (0x8000 >> vlan))
- port->lpar_map |= (1 << i);
- }
- port->dev = vdev;
-
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
-
- dev->mtu = VETH_MAX_MTU;
-
- memcpy(&port->mac_addr, mac_addr, ETH_ALEN);
-
- dev->netdev_ops = &veth_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ops);
-
- SET_NETDEV_DEV(dev, vdev);
-
- rc = register_netdev(dev);
- if (rc != 0) {
- veth_error("Failed registering net device for vlan%d.\n", vlan);
- free_netdev(dev);
- return NULL;
- }
-
- kobject_init(&port->kobject, &veth_port_ktype);
- if (0 != kobject_add(&port->kobject, &dev->dev.kobj, "veth_port"))
- veth_error("Failed adding port for %s to sysfs.\n", dev->name);
-
- veth_info("%s attached to iSeries vlan %d (LPAR map = 0x%.4X)\n",
- dev->name, vlan, port->lpar_map);
-
- return dev;
-}
-
-/*
- * Tx path
- */
-
-static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
- struct net_device *dev)
-{
- struct veth_lpar_connection *cnx = veth_cnx[rlp];
- struct veth_port *port = netdev_priv(dev);
- HvLpEvent_Rc rc;
- struct veth_msg *msg = NULL;
- unsigned long flags;
-
- if (! cnx)
- return 0;
-
- spin_lock_irqsave(&cnx->lock, flags);
-
- if (! (cnx->state & VETH_STATE_READY))
- goto no_error;
-
- if ((skb->len - ETH_HLEN) > VETH_MAX_MTU)
- goto drop;
-
- msg = veth_stack_pop(cnx);
- if (! msg)
- goto drop;
-
- msg->in_use = 1;
- msg->skb = skb_get(skb);
-
- msg->data.addr[0] = dma_map_single(port->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
-
- if (dma_mapping_error(port->dev, msg->data.addr[0]))
- goto recycle_and_drop;
-
- msg->dev = port->dev;
- msg->data.len[0] = skb->len;
- msg->data.eofmask = 1 << VETH_EOF_SHIFT;
-
- rc = veth_signaldata(cnx, VETH_EVENT_FRAMES, msg->token, &msg->data);
-
- if (rc != HvLpEvent_Rc_Good)
- goto recycle_and_drop;
-
- /* If the timer's not already running, start it now. */
- if (0 == cnx->outstanding_tx)
- mod_timer(&cnx->reset_timer, jiffies + cnx->reset_timeout);
-
- cnx->last_contact = jiffies;
- cnx->outstanding_tx++;
-
- if (veth_stack_is_empty(cnx))
- veth_stop_queues(cnx);
-
- no_error:
- spin_unlock_irqrestore(&cnx->lock, flags);
- return 0;
-
- recycle_and_drop:
- veth_recycle_msg(cnx, msg);
- drop:
- spin_unlock_irqrestore(&cnx->lock, flags);
- return 1;
-}
-
-static void veth_transmit_to_many(struct sk_buff *skb,
- HvLpIndexMap lpmask,
- struct net_device *dev)
-{
- int i, success, error;
-
- success = error = 0;
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- if ((lpmask & (1 << i)) == 0)
- continue;
-
- if (veth_transmit_to_one(skb, i, dev))
- error = 1;
- else
- success = 1;
- }
-
- if (error)
- dev->stats.tx_errors++;
-
- if (success) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- }
-}
-
-static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned char *frame = skb->data;
- struct veth_port *port = netdev_priv(dev);
- HvLpIndexMap lpmask;
-
- if (is_unicast_ether_addr(frame)) {
- /* unicast packet */
- HvLpIndex rlp = frame[5];
-
- if ( ! ((1 << rlp) & port->lpar_map) ) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- lpmask = 1 << rlp;
- } else {
- lpmask = port->lpar_map;
- }
-
- veth_transmit_to_many(skb, lpmask, dev);
-
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-/* You must hold the connection's lock when you call this function. */
-static void veth_recycle_msg(struct veth_lpar_connection *cnx,
- struct veth_msg *msg)
-{
- u32 dma_address, dma_length;
-
- if (msg->in_use) {
- msg->in_use = 0;
- dma_address = msg->data.addr[0];
- dma_length = msg->data.len[0];
-
- if (!dma_mapping_error(msg->dev, dma_address))
- dma_unmap_single(msg->dev, dma_address, dma_length,
- DMA_TO_DEVICE);
-
- if (msg->skb) {
- dev_kfree_skb_any(msg->skb);
- msg->skb = NULL;
- }
-
- memset(&msg->data, 0, sizeof(msg->data));
- veth_stack_push(cnx, msg);
- } else if (cnx->state & VETH_STATE_OPEN) {
- veth_error("Non-pending frame (# %d) acked by LPAR %d.\n",
- cnx->remote_lp, msg->token);
- }
-}
-
-static void veth_wake_queues(struct veth_lpar_connection *cnx)
-{
- int i;
-
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
- struct net_device *dev = veth_dev[i];
- struct veth_port *port;
- unsigned long flags;
-
- if (! dev)
- continue;
-
- port = netdev_priv(dev);
-
- if (! (port->lpar_map & (1<<cnx->remote_lp)))
- continue;
-
- spin_lock_irqsave(&port->queue_lock, flags);
-
- port->stopped_map &= ~(1 << cnx->remote_lp);
-
- if (0 == port->stopped_map && netif_queue_stopped(dev)) {
- veth_debug("cnx %d: woke queue for %s.\n",
- cnx->remote_lp, dev->name);
- netif_wake_queue(dev);
- }
- spin_unlock_irqrestore(&port->queue_lock, flags);
- }
-}
-
-static void veth_stop_queues(struct veth_lpar_connection *cnx)
-{
- int i;
-
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
- struct net_device *dev = veth_dev[i];
- struct veth_port *port;
-
- if (! dev)
- continue;
-
- port = netdev_priv(dev);
-
- /* If this cnx is not on the vlan for this port, continue */
- if (! (port->lpar_map & (1 << cnx->remote_lp)))
- continue;
-
- spin_lock(&port->queue_lock);
-
- netif_stop_queue(dev);
- port->stopped_map |= (1 << cnx->remote_lp);
-
- veth_debug("cnx %d: stopped queue for %s, map = 0x%x.\n",
- cnx->remote_lp, dev->name, port->stopped_map);
-
- spin_unlock(&port->queue_lock);
- }
-}
-
-static void veth_timed_reset(unsigned long ptr)
-{
- struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)ptr;
- unsigned long trigger_time, flags;
-
- /* FIXME is it possible this fires after veth_stop_connection()?
- * That would reschedule the statemachine for 5 seconds and probably
- * execute it after the module's been unloaded. Hmm. */
-
- spin_lock_irqsave(&cnx->lock, flags);
-
- if (cnx->outstanding_tx > 0) {
- trigger_time = cnx->last_contact + cnx->reset_timeout;
-
- if (trigger_time < jiffies) {
- cnx->state |= VETH_STATE_RESET;
- veth_kick_statemachine(cnx);
- veth_error("%d packets not acked by LPAR %d within %d "
- "seconds, resetting.\n",
- cnx->outstanding_tx, cnx->remote_lp,
- cnx->reset_timeout / HZ);
- } else {
- /* Reschedule the timer */
- trigger_time = jiffies + cnx->reset_timeout;
- mod_timer(&cnx->reset_timer, trigger_time);
- }
- }
-
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-/*
- * Rx path
- */
-
-static inline int veth_frame_wanted(struct veth_port *port, u64 mac_addr)
-{
- int wanted = 0;
- int i;
- unsigned long flags;
-
- if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) )
- return 1;
-
- read_lock_irqsave(&port->mcast_gate, flags);
-
- if (port->promiscuous) {
- wanted = 1;
- goto out;
- }
-
- for (i = 0; i < port->num_mcast; ++i) {
- if (port->mcast_addr[i] == mac_addr) {
- wanted = 1;
- break;
- }
- }
-
- out:
- read_unlock_irqrestore(&port->mcast_gate, flags);
-
- return wanted;
-}
-
-struct dma_chunk {
- u64 addr;
- u64 size;
-};
-
-#define VETH_MAX_PAGES_PER_FRAME ( (VETH_MAX_MTU+PAGE_SIZE-2)/PAGE_SIZE + 1 )
-
-static inline void veth_build_dma_list(struct dma_chunk *list,
- unsigned char *p, unsigned long length)
-{
- unsigned long done;
- int i = 1;
-
- /* FIXME: skbs are contiguous in real addresses. Do we
- * really need to break it into PAGE_SIZE chunks, or can we do
- * it just at the granularity of iSeries real->absolute
- * mapping? Indeed, given the way the allocator works, can we
- * count on them being absolutely contiguous? */
- list[0].addr = iseries_hv_addr(p);
- list[0].size = min(length,
- PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
-
- done = list[0].size;
- while (done < length) {
- list[i].addr = iseries_hv_addr(p + done);
- list[i].size = min(length-done, PAGE_SIZE);
- done += list[i].size;
- i++;
- }
-}
-
-static void veth_flush_acks(struct veth_lpar_connection *cnx)
-{
- HvLpEvent_Rc rc;
-
- rc = veth_signaldata(cnx, VETH_EVENT_FRAMES_ACK,
- 0, &cnx->pending_acks);
-
- if (rc != HvLpEvent_Rc_Good)
- veth_error("Failed acking frames from LPAR %d, rc = %d\n",
- cnx->remote_lp, (int)rc);
-
- cnx->num_pending_acks = 0;
- memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks));
-}
-
-static void veth_receive(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- struct veth_frames_data *senddata = &event->u.frames_data;
- int startchunk = 0;
- int nchunks;
- unsigned long flags;
- HvLpDma_Rc rc;
-
- do {
- u16 length = 0;
- struct sk_buff *skb;
- struct dma_chunk local_list[VETH_MAX_PAGES_PER_FRAME];
- struct dma_chunk remote_list[VETH_MAX_FRAMES_PER_MSG];
- u64 dest;
- HvLpVirtualLanIndex vlan;
- struct net_device *dev;
- struct veth_port *port;
-
- /* FIXME: do we need this? */
- memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(remote_list));
-
- /* a 0 address marks the end of the valid entries */
- if (senddata->addr[startchunk] == 0)
- break;
-
- /* make sure that we have at least 1 EOF entry in the
- * remaining entries */
- if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) {
- veth_error("Missing EOF fragment in event "
- "eofmask = 0x%x startchunk = %d\n",
- (unsigned)senddata->eofmask,
- startchunk);
- break;
- }
-
- /* build list of chunks in this frame */
- nchunks = 0;
- do {
- remote_list[nchunks].addr =
- (u64) senddata->addr[startchunk+nchunks] << 32;
- remote_list[nchunks].size =
- senddata->len[startchunk+nchunks];
- length += remote_list[nchunks].size;
- } while (! (senddata->eofmask &
- (1 << (VETH_EOF_SHIFT + startchunk + nchunks++))));
-
- /* length == total length of all chunks */
- /* nchunks == # of chunks in this frame */
-
- if ((length - ETH_HLEN) > VETH_MAX_MTU) {
- veth_error("Received oversize frame from LPAR %d "
- "(length = %d)\n",
- cnx->remote_lp, length);
- continue;
- }
-
- skb = alloc_skb(length, GFP_ATOMIC);
- if (!skb)
- continue;
-
- veth_build_dma_list(local_list, skb->data, length);
-
- rc = HvCallEvent_dmaBufList(HvLpEvent_Type_VirtualLan,
- event->base_event.xSourceLp,
- HvLpDma_Direction_RemoteToLocal,
- cnx->src_inst,
- cnx->dst_inst,
- HvLpDma_AddressType_RealAddress,
- HvLpDma_AddressType_TceIndex,
- iseries_hv_addr(&local_list),
- iseries_hv_addr(&remote_list),
- length);
- if (rc != HvLpDma_Rc_Good) {
- dev_kfree_skb_irq(skb);
- continue;
- }
-
- vlan = skb->data[9];
- dev = veth_dev[vlan];
- if (! dev) {
- /*
- * Some earlier versions of the driver sent
- * broadcasts down all connections, even to lpars
- * that weren't on the relevant vlan. So ignore
- * packets belonging to a vlan we're not on.
- * We can also be here if we receive packets while
- * the driver is going down, because then dev is NULL.
- */
- dev_kfree_skb_irq(skb);
- continue;
- }
-
- port = netdev_priv(dev);
- dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
-
- if ((vlan > HVMAXARCHITECTEDVIRTUALLANS) || !port) {
- dev_kfree_skb_irq(skb);
- continue;
- }
- if (! veth_frame_wanted(port, dest)) {
- dev_kfree_skb_irq(skb);
- continue;
- }
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, dev);
- skb_checksum_none_assert(skb);
- netif_rx(skb); /* send it up */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += length;
- } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG);
-
- /* Ack it */
- spin_lock_irqsave(&cnx->lock, flags);
- BUG_ON(cnx->num_pending_acks > VETH_MAX_ACKS_PER_MSG);
-
- cnx->pending_acks[cnx->num_pending_acks++] =
- event->base_event.xCorrelationToken;
-
- if ( (cnx->num_pending_acks >= cnx->remote_caps.ack_threshold) ||
- (cnx->num_pending_acks >= VETH_MAX_ACKS_PER_MSG) )
- veth_flush_acks(cnx);
-
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_timed_ack(unsigned long ptr)
-{
- struct veth_lpar_connection *cnx = (struct veth_lpar_connection *) ptr;
- unsigned long flags;
-
- /* Ack all the events */
- spin_lock_irqsave(&cnx->lock, flags);
- if (cnx->num_pending_acks > 0)
- veth_flush_acks(cnx);
-
- /* Reschedule the timer */
- cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
- add_timer(&cnx->ack_timer);
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static int veth_remove(struct vio_dev *vdev)
-{
- struct veth_lpar_connection *cnx;
- struct net_device *dev;
- struct veth_port *port;
- int i;
-
- dev = veth_dev[vdev->unit_address];
-
- if (! dev)
- return 0;
-
- port = netdev_priv(dev);
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- cnx = veth_cnx[i];
-
- if (cnx && (port->lpar_map & (1 << i))) {
- /* Drop our reference to connections on our VLAN */
- kobject_put(&cnx->kobject);
- }
- }
-
- veth_dev[vdev->unit_address] = NULL;
- kobject_del(&port->kobject);
- kobject_put(&port->kobject);
- unregister_netdev(dev);
- free_netdev(dev);
-
- return 0;
-}
-
-static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
-{
- int i = vdev->unit_address;
- struct net_device *dev;
- struct veth_port *port;
-
- dev = veth_probe_one(i, vdev);
- if (dev == NULL) {
- veth_remove(vdev);
- return 1;
- }
- veth_dev[i] = dev;
-
- port = netdev_priv(dev);
-
- /* Start the state machine on each connection on this vlan. If we're
- * the first dev to do so this will commence link negotiation */
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- struct veth_lpar_connection *cnx;
-
- if (! (port->lpar_map & (1 << i)))
- continue;
-
- cnx = veth_cnx[i];
- if (!cnx)
- continue;
-
- kobject_get(&cnx->kobject);
- veth_kick_statemachine(cnx);
- }
-
- return 0;
-}
-
-/**
- * veth_device_table: Used by vio.c to match devices that we
- * support.
- */
-static struct vio_device_id veth_device_table[] __devinitdata = {
- { "network", "IBM,iSeries-l-lan" },
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, veth_device_table);
-
-static struct vio_driver veth_driver = {
- .id_table = veth_device_table,
- .probe = veth_probe,
- .remove = veth_remove,
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-/*
- * Module initialization/cleanup
- */
-
-static void __exit veth_module_cleanup(void)
-{
- int i;
- struct veth_lpar_connection *cnx;
-
- /* Disconnect our "irq" to stop events coming from the Hypervisor. */
- HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- cnx = veth_cnx[i];
-
- if (!cnx)
- continue;
-
- /* Cancel work queued from Hypervisor callbacks */
- cancel_delayed_work_sync(&cnx->statemachine_wq);
- /* Remove the connection from sysfs */
- kobject_del(&cnx->kobject);
- /* Drop the driver's reference to the connection */
- kobject_put(&cnx->kobject);
- }
-
- /* Unregister the driver, which will close all the netdevs and stop
- * the connections when they're no longer referenced. */
- vio_unregister_driver(&veth_driver);
-}
-module_exit(veth_module_cleanup);
-
-static int __init veth_module_init(void)
-{
- int i;
- int rc;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
- this_lp = HvLpConfig_getLpIndex_outline();
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- rc = veth_init_connection(i);
- if (rc != 0)
- goto error;
- }
-
- HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan,
- &veth_handle_event);
-
- rc = vio_register_driver(&veth_driver);
- if (rc != 0)
- goto error;
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- struct kobject *kobj;
-
- if (!veth_cnx[i])
- continue;
-
- kobj = &veth_cnx[i]->kobject;
- /* If the add failes, complain but otherwise continue */
- if (0 != driver_add_kobj(&veth_driver.driver, kobj,
- "cnx%.2d", veth_cnx[i]->remote_lp))
- veth_error("cnx %d: Failed adding to sysfs.\n", i);
- }
-
- return 0;
-
-error:
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- veth_destroy_connection(veth_cnx[i]);
- }
-
- return rc;
-}
-module_init(veth_module_init);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index aa399a8..e10821a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1577,7 +1577,9 @@
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
+ struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@@ -1601,6 +1603,8 @@
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ total_bytes += tx_buffer_info->bytecount;
+ total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@@ -1615,6 +1619,9 @@
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
+ txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
+
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fda8247..e96cef8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2752,6 +2752,8 @@
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
+
+ netdev_tx_reset_queue(txring_txq(ring));
}
/**
@@ -3244,7 +3246,6 @@
buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
- netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index cb23448..4d2ae97 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -75,18 +75,6 @@
#include "ixgb_ee.h"
#include "ixgb_ids.h"
-#define PFX "ixgb: "
-
-#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
-#else
-#define IXGB_DBG(fmt, args...) \
-do { \
- if (0) \
- printk(KERN_DEBUG PFX fmt, ##args); \
-} while (0)
-#endif
-
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
#define MAX_TXD 4096
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 0024788..82aaa79 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2068,8 +2068,8 @@
/* All receives must fit into a single buffer */
- IXGB_DBG("Receive packet consumed multiple buffers "
- "length<%x>\n", length);
+ pr_debug("Receive packet consumed multiple buffers length<%x>\n",
+ length);
dev_kfree_skb_irq(skb);
goto rxdesc_done;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 2807a25..f069c1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -254,10 +254,8 @@
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
- int numa_node;
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
- struct rcu_head rcu;
struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
@@ -298,6 +296,10 @@
u8 itr; /* current ITR setting for ring */
};
+/* iterator for handling rings in ring container */
+#define ixgbe_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
@@ -317,8 +319,13 @@
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
- cpumask_var_t affinity_mask;
+ cpumask_t affinity_mask;
+ int numa_node;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
/*
@@ -370,11 +377,19 @@
#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+/* default to trying for four seconds */
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+
/* board specific private data structure */
struct ixgbe_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
unsigned long state;
/* Some features need tri-state capability,
@@ -418,8 +433,35 @@
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
+
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
+ u16 tx_work_limit;
+
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+
+ /* TX */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+
+ u64 restart_queue;
+ u64 lsc_int;
+ u32 tx_timeout_count;
+
+ /* RX */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
+ u64 non_eop_descs;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
/* DCB parameters */
@@ -431,47 +473,11 @@
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
- u16 eitr_low;
- u16 eitr_high;
-
- /* Work limits */
- u16 tx_work_limit;
-
- /* TX */
- struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
- int num_tx_queues;
- u32 tx_timeout_count;
- bool detect_tx_hung;
-
- u64 restart_queue;
- u64 lsc_int;
-
- /* RX */
- struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
- int num_rx_queues;
- int num_rx_pools; /* == num_rx_queues in 82598 */
- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
- u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
- u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u32 alloc_rx_page_failed;
- u32 alloc_rx_buff_failed;
-
-/* default to trying for four seconds */
-#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
-
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
u32 test_icr;
struct ixgbe_ring test_tx_ring;
struct ixgbe_ring test_rx_ring;
@@ -481,10 +487,6 @@
u16 msg_enable;
struct ixgbe_hw_stats stats;
- /* Interrupt Throttle Rate */
- u32 rx_eitr_param;
- u32 tx_eitr_param;
-
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
@@ -493,25 +495,30 @@
bool link_up;
unsigned long link_check_timeout;
- struct work_struct service_task;
struct timer_list service_timer;
+ struct work_struct service_task;
+
+ struct hlist_head fdir_filter_list;
+ unsigned long fdir_overflow; /* number of times ATR was backed off */
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
u32 fdir_pballoc;
u32 atr_sample_rate;
- unsigned long fdir_overflow; /* number of times ATR was backed off */
spinlock_t fdir_perfect_lock;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
- u64 rsc_total_count;
- u64 rsc_total_flush;
u32 wol;
+
+ u16 bd_number;
+
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
- int node;
- u32 led_reg;
u32 interrupt_event;
+ u32 led_reg;
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -521,9 +528,6 @@
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
- struct hlist_head fdir_filter_list;
- union ixgbe_atr_input fdir_mask;
- int fdir_filter_count;
u32 timer_event_accumulator;
u32 vferr_refcount;
};
@@ -643,4 +647,9 @@
struct netdev_fcoe_hbainfo *info);
#endif /* IXGBE_FCOE */
+static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 752dbe6..85d2e2c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -213,15 +213,15 @@
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4e59083..9c14685 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -779,7 +779,8 @@
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
/* Check to see if speed passed in is supported. */
- hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
+ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
+ &autoneg);
if (status != 0)
goto out;
@@ -1906,38 +1907,17 @@
**/
static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
-#define IXGBE_MAX_SECRX_POLL 30
- int i;
- int secrxreg;
-
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
* If traffic is incoming before we enable the Rx unit, it could hang
* the Rx DMA unit. Therefore, make sure the security engine is
* completely disabled prior to enabling the Rx unit.
*/
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
- if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
- break;
- else
- /* Use interrupt-safe sleep just in case */
- udelay(10);
- }
-
- /* For informational purposes only */
- if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw->mac.ops.disable_rx_buff(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- IXGBE_WRITE_FLUSH(hw);
+
+ hw->mac.ops.enable_rx_buff(hw);
return 0;
}
@@ -2102,6 +2082,8 @@
.get_media_type = &ixgbe_get_media_type_82599,
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
+ .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 87f6b4f..6117bfd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -128,14 +128,14 @@
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@@ -2578,6 +2578,58 @@
}
/**
+ * ixgbe_disable_rx_buff_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally
+ * empty the Rx security block.
+ **/
+s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+ int i;
+ int secrxreg;
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ udelay(10);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ hw_dbg(hw, "Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return 0;
+
+}
+
+/**
+ * ixgbe_enable_rx_buff - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path
+ **/
+s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+{
+ int secrxreg;
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2c834c4..204f062 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -74,6 +74,8 @@
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 8e2e473..24f7291 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -935,12 +935,12 @@
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring[0]->count) &&
@@ -1591,7 +1591,6 @@
tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
- tx_ring->numa_node = adapter->node;
err = ixgbe_setup_tx_resources(tx_ring);
if (err)
@@ -1617,7 +1616,6 @@
rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
- rx_ring->numa_node = adapter->node;
err = ixgbe_setup_rx_resources(rx_ring);
if (err) {
@@ -1703,63 +1701,65 @@
}
static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
+ unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ frame_size >>= 1;
+ memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size + 10], 0xBE, 1);
+ memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
+static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size &= ~1;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+ data = rx_buffer->skb->data;
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+ return match;
}
static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
- struct ixgbe_ring *tx_ring,
- unsigned int size)
+ struct ixgbe_ring *tx_ring,
+ unsigned int size)
{
union ixgbe_adv_rx_desc *rx_desc;
- struct ixgbe_rx_buffer *rx_buffer_info;
- struct ixgbe_tx_buffer *tx_buffer_info;
- const int bufsz = rx_ring->rx_buf_len;
- u32 staterr;
+ struct ixgbe_rx_buffer *rx_buffer;
+ struct ixgbe_tx_buffer *tx_buffer;
u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- while (staterr & IXGBE_RXD_STAT_DD) {
+ while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
/* check Rx buffer */
- rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- bufsz,
+ rx_buffer->dma,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ rx_buffer->dma = 0;
/* verify contents of skb */
- if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++;
/* unmap buffer on Tx side */
- tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -1771,7 +1771,6 @@
/* fetch next descriptor */
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
/* re-map buffers to ring, store next to clean values */
@@ -2108,8 +2107,6 @@
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
-
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
ec->rx_coalesce_usecs = adapter->rx_itr_setting;
@@ -2177,9 +2174,6 @@
&& ec->tx_coalesce_usecs)
return -EINVAL;
- if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
-
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
return -EINVAL;
@@ -2214,7 +2208,6 @@
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
- q_vector->tx.work_limit = adapter->tx_work_limit;
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->itr = tx_itr_param;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 23a4665..167e898 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -763,6 +763,9 @@
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
@@ -773,12 +776,8 @@
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
- /* prevent any other reads prior to eop_desc being verified */
- rmb();
-
do {
ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
- tx_desc->wb.status = 0;
if (likely(tx_desc == eop_desc)) {
eop_desc = NULL;
dev_kfree_skb_any(tx_buffer->skb);
@@ -840,6 +839,9 @@
return true;
}
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -858,63 +860,68 @@
}
#ifdef CONFIG_IXGBE_DCA
-static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cpu)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rxctrl;
- u8 reg_idx = rx_ring->reg_idx;
-
- rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
- break;
- default:
- break;
- }
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
-}
-
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txctrl;
- u8 reg_idx = tx_ring->reg_idx;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+ u16 reg_offset;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(tx_ring->dev, cpu);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
+ txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
+ break;
+ default:
+ /* for unknown hardware do not write register */
+ return;
+ }
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN |
+ IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+
+ IXGBE_WRITE_REG(hw, reg_offset, txctrl);
+}
+
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cpu)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
+ u8 reg_idx = rx_ring->reg_idx;
+
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
break;
default:
break;
}
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_DCA_EN |
+ IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
@@ -926,10 +933,10 @@
if (q_vector->cpu == cpu)
goto out_no_update;
- for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_update_tx_dca(adapter, ring, cpu);
- for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->rx)
ixgbe_update_rx_dca(adapter, ring, cpu);
q_vector->cpu = cpu;
@@ -989,8 +996,8 @@
return 0;
}
-#endif /* CONFIG_IXGBE_DCA */
+#endif /* CONFIG_IXGBE_DCA */
static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -1704,10 +1711,10 @@
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
- for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->rx)
ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
- for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
if (q_vector->tx.ring && !q_vector->rx.ring) {
@@ -1775,20 +1782,19 @@
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
{
- u64 bytes_perint;
- struct ixgbe_adapter *adapter = q_vector->adapter;
int bytes = ring_container->total_bytes;
int packets = ring_container->total_packets;
u32 timepassed_us;
+ u64 bytes_perint;
u8 itr_setting = ring_container->itr;
if (packets == 0)
return;
/* simple throttlerate management
- * 0-20MB/s lowest (100000 ints/s)
- * 20-100MB/s low (20000 ints/s)
- * 100-1249MB/s bulk (8000 ints/s)
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
@@ -1796,17 +1802,17 @@
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
+ if (bytes_perint > 10)
itr_setting = low_latency;
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
+ if (bytes_perint > 20)
itr_setting = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
+ else if (bytes_perint <= 10)
itr_setting = lowest_latency;
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
+ if (bytes_perint <= 20)
itr_setting = low_latency;
break;
}
@@ -1832,7 +1838,7 @@
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
int v_idx = q_vector->v_idx;
- u32 itr_reg = q_vector->itr;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
@@ -1884,14 +1890,14 @@
((9 * new_itr) + q_vector->itr);
/* save the algorithm value here */
- q_vector->itr = new_itr & IXGBE_MAX_EITR;
+ q_vector->itr = new_itr;
ixgbe_write_eitr(q_vector);
}
}
/**
- * ixgbe_check_overtemp_subtask - check for over tempurature
+ * ixgbe_check_overtemp_subtask - check for over temperature
* @adapter: pointer to adapter
**/
static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
@@ -2203,78 +2209,6 @@
return IRQ_HANDLED;
}
-static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
-{
- struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
- struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- q_vector->rx.count++;
-}
-
-static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int t_idx)
-{
- struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
- struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- q_vector->tx.count++;
- q_vector->tx.work_limit = a->tx_work_limit;
-}
-
-/**
- * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
- * @adapter: board private structure to initialize
- *
- * This function maps descriptor rings to the queue-specific vectors
- * we were allotted through the MSI-X enabling code. Ideally, we'd have
- * one vector per ring/queue, but on a constrained vector budget, we
- * group the rings as "efficiently" as possible. You would add new
- * mapping configurations in here.
- **/
-static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
-{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
- int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
- int v_start = 0;
-
- /* only one q_vector if MSI-X is disabled. */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- /*
- * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
- * group them so there are multiple queues per vector.
- *
- * Re-adjusting *qpv takes care of the remainder.
- */
- for (; v_start < q_vectors && rxr_remaining; v_start++) {
- int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
- for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
- map_vector_to_rxq(adapter, v_start, rxr_idx);
- }
-
- /*
- * If there are not enough q_vectors for each ring to have it's own
- * vector then we must pair up Rx/Tx on a each vector
- */
- if ((v_start + txr_remaining) > q_vectors)
- v_start = 0;
-
- for (; v_start < q_vectors && txr_remaining; v_start++) {
- int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
- for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
- map_vector_to_txq(adapter, v_start, txr_idx);
- }
-}
-
/**
* ixgbe_request_msix_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
@@ -2318,14 +2252,14 @@
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
irq_set_affinity_hint(entry->vector,
- q_vector->affinity_mask);
+ &q_vector->affinity_mask);
}
}
err = request_irq(adapter->msix_entries[vector].vector,
ixgbe_msix_other, 0, netdev->name, adapter);
if (err) {
- e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
+ e_err(probe, "request_irq for msix_other failed: %d\n", err);
goto free_queue_irqs;
}
@@ -2399,47 +2333,19 @@
ixgbe_check_fan_failure(adapter, eicr);
- if (napi_schedule_prep(&(q_vector->napi))) {
- /* would disable interrupts here but EIAM disabled it */
- __napi_schedule(&(q_vector->napi));
- }
+ /* would disable interrupts here but EIAM disabled it */
+ napi_schedule(&q_vector->napi);
/*
* re-enable link(maybe) and non-queue interrupts, no flush.
* ixgbe_poll will re-enable the queue interrupts
*/
-
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter, false, false);
return IRQ_HANDLED;
}
-static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
-{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- int i;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i]->q_vector = NULL;
- adapter->rx_ring[i]->next = NULL;
- }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i]->q_vector = NULL;
- adapter->tx_ring[i]->next = NULL;
- }
-
- for (i = 0; i < q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
- memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
- }
-}
-
/**
* ixgbe_request_irq - initialize interrupts
* @adapter: board private structure
@@ -2452,9 +2358,6 @@
struct net_device *netdev = adapter->netdev;
int err;
- /* map all of the rings to the q_vectors */
- ixgbe_map_rings_to_vectors(adapter);
-
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
err = ixgbe_request_msix_irqs(adapter);
else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
@@ -2464,13 +2367,9 @@
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, adapter);
- if (err) {
+ if (err)
e_err(probe, "request_irq failed, Error %d\n", err);
- /* place q_vectors and rings back into a known good state */
- ixgbe_reset_q_vectors(adapter);
- }
-
return err;
}
@@ -2500,9 +2399,6 @@
} else {
free_irq(adapter->pdev->irq, adapter);
}
-
- /* clear q_vector state information */
- ixgbe_reset_q_vectors(adapter);
}
/**
@@ -2593,12 +2489,15 @@
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
- if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
+ if (!ring->q_vector || (ring->q_vector->itr < 8))
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
- /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
+ /*
+ * Setting PTHRESH to 32 both improves performance
+ * and avoids a TX hang with DFP enabled
+ */
txdctl |= (1 << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
@@ -2617,6 +2516,8 @@
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
+ netdev_tx_reset_queue(txring_txq(ring));
+
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3760,6 +3661,8 @@
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+
ixgbe_configure_pb(adapter);
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
@@ -3773,6 +3676,16 @@
ixgbe_configure_fcoe(adapter);
#endif /* IXGBE_FCOE */
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hw->mac.ops.disable_rx_buff(hw);
+ break;
+ default:
+ break;
+ }
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ixgbe_init_fdir_signature_82599(&adapter->hw,
adapter->fdir_pballoc);
@@ -3782,6 +3695,15 @@
ixgbe_fdir_filter_restore(adapter);
}
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hw->mac.ops.enable_rx_buff(hw);
+ break;
+ default:
+ break;
+ }
+
ixgbe_configure_virtualization(adapter);
ixgbe_configure_tx(adapter);
@@ -4298,7 +4220,7 @@
ixgbe_update_dca(q_vector);
#endif
- for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->tx)
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -4308,7 +4230,7 @@
else
per_ring_budget = budget;
- for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->rx)
clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
@@ -4415,7 +4337,7 @@
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
- f->indices = min((int)num_online_cpus(), f->indices);
+ f->indices = min_t(int, num_online_cpus(), f->indices);
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
@@ -4451,8 +4373,8 @@
return false;
/* Map queue offset and counts onto allocated tx queues */
- per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
- q = min((int)num_online_cpus(), per_tc_q);
+ per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
+ q = min_t(int, num_online_cpus(), per_tc_q);
for (i = 0; i < tcs; i++) {
netdev_set_tc_queue(dev, i, q, offset);
@@ -4469,11 +4391,13 @@
* configuration later.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ u8 prio_tc[MAX_USER_PRIORITY] = {0};
int tc;
struct ixgbe_ring_feature *f =
&adapter->ring_feature[RING_F_FCOE];
- tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
+ tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count;
f->mask = dev->tc_to_txq[tc].offset;
}
@@ -4554,11 +4478,9 @@
{
int err, vector_threshold;
- /* We'll want at least 3 (vector_threshold):
- * 1) TxQ[0] Cleanup
- * 2) RxQ[0] Cleanup
- * 3) Other (Link Status Change, etc.)
- * 4) TCP Timer (optional)
+ /* We'll want at least 2 (vector_threshold):
+ * 1) TxQ[0] + RxQ[0] handler
+ * 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
@@ -4826,75 +4748,6 @@
}
/**
- * ixgbe_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time. The polling_netdev array is
- * intended for Multiqueue, but should work fine with a single queue.
- **/
-static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
-{
- int rx = 0, tx = 0, nid = adapter->node;
-
- if (nid < 0 || !node_online(nid))
- nid = first_online_node;
-
- for (; tx < adapter->num_tx_queues; tx++) {
- struct ixgbe_ring *ring;
-
- ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
- if (!ring)
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = tx;
- ring->numa_node = nid;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- adapter->tx_ring[tx] = ring;
- }
-
- for (; rx < adapter->num_rx_queues; rx++) {
- struct ixgbe_ring *ring;
-
- ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
- if (!ring)
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = rx;
- ring->numa_node = nid;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /*
- * 82599 errata, UDP frames with a 0 checksum can be marked as
- * checksum errors.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
-
- adapter->rx_ring[rx] = ring;
- }
-
- ixgbe_cache_ring_register(adapter);
-
- return 0;
-
-err_allocation:
- while (tx)
- kfree(adapter->tx_ring[--tx]);
-
- while (rx)
- kfree(adapter->rx_ring[--rx]);
- return -ENOMEM;
-}
-
-/**
* ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
* @adapter: board private structure to initialize
*
@@ -4912,9 +4765,11 @@
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
*/
- v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)num_online_cpus()) + NON_Q_VECTORS;
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
@@ -4923,7 +4778,7 @@
* descriptor queues supported by our device. Thus, we cap it off in
* those rare cases where the cpu count also exceeds our vector limit.
*/
- v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
+ v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
@@ -4970,6 +4825,164 @@
return err;
}
+static void ixgbe_add_ring(struct ixgbe_ring *ring,
+ struct ixgbe_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: index of vector in adapter struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
+{
+ struct ixgbe_q_vector *q_vector;
+ struct ixgbe_ring *ring;
+ int node = -1;
+ int cpu = -1;
+ int ring_count, size;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct ixgbe_q_vector) +
+ (sizeof(struct ixgbe_ring) * ring_count);
+
+ /* customize cpu for Flow Director mapping */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
+ }
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc_node(size, GFP_KERNEL, node);
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* setup affinity mask and node */
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+ else
+ cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
+ q_vector->numa_node = node;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbe_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = v_idx;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ while (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbe_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ while (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ ixgbe_add_ring(ring, &q_vector->rx);
+
+ /*
+ * 82599 errata, UDP frames with a 0 checksum
+ * can be marked as checksum errors.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+
+ /* update count and index */
+ rxr_count--;
+ rxr_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
+{
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct ixgbe_ring *ring;
+
+ ixgbe_for_each_ring(ring, q_vector->tx)
+ adapter->tx_ring[ring->queue_index] = NULL;
+
+ ixgbe_for_each_ring(ring, q_vector->rx)
+ adapter->rx_ring[ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+
+ /*
+ * ixgbe_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
/**
* ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
@@ -4979,33 +4992,46 @@
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
- int v_idx, num_q_vectors;
- struct ixgbe_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
+ /* only one q_vector if MSI-X is disabled. */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL, adapter->node);
- if (!q_vector)
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL);
- if (!q_vector)
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+ err = ixgbe_alloc_q_vector(adapter, v_idx,
+ 0, 0, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ }
+ }
+
+ for (; q_vectors; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
+ err = ixgbe_alloc_q_vector(adapter, v_idx,
+ tqpv, txr_idx,
+ rqpv, rxr_idx);
+
+ if (err)
goto err_out;
- q_vector->adapter = adapter;
- q_vector->v_idx = v_idx;
-
- /* Allocate the affinity_hint cpumask, configure the mask */
- if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto err_out;
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ txr_remaining -= tqpv;
+ txr_idx += tqpv;
}
return 0;
@@ -5013,12 +5039,9 @@
err_out:
while (v_idx) {
v_idx--;
- q_vector = adapter->q_vector[v_idx];
- netif_napi_del(&q_vector->napi);
- free_cpumask_var(q_vector->affinity_mask);
- kfree(q_vector);
- adapter->q_vector[v_idx] = NULL;
+ ixgbe_free_q_vector(adapter, v_idx);
}
+
return -ENOMEM;
}
@@ -5032,20 +5055,15 @@
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
- int v_idx, num_q_vectors;
+ int v_idx, q_vectors;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
- num_q_vectors = 1;
+ q_vectors = 1;
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- netif_napi_del(&q_vector->napi);
- free_cpumask_var(q_vector->affinity_mask);
- kfree(q_vector);
- }
+ for (v_idx = 0; v_idx < q_vectors; v_idx++)
+ ixgbe_free_q_vector(adapter, v_idx);
}
static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
@@ -5092,11 +5110,7 @@
goto err_alloc_q_vectors;
}
- err = ixgbe_alloc_queues(adapter);
- if (err) {
- e_dev_err("Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
+ ixgbe_cache_ring_register(adapter);
e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
@@ -5106,8 +5120,6 @@
return 0;
-err_alloc_queues:
- ixgbe_free_q_vectors(adapter);
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
@@ -5123,22 +5135,6 @@
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
-
- /* ixgbe_get_stats64() might access this ring, we must wait
- * a grace period before freeing it.
- */
- kfree_rcu(ring, rcu);
- adapter->rx_ring[i] = NULL;
- }
-
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -5173,7 +5169,7 @@
hw->subsystem_device_id = pdev->subsystem_device;
/* Set capability flags */
- rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) {
@@ -5265,10 +5261,6 @@
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
- /* set defaults for eitr in MegaBytes */
- adapter->eitr_low = 10;
- adapter->eitr_high = 20;
-
/* set default ring sizes */
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
@@ -5282,9 +5274,6 @@
return -EIO;
}
- /* get assigned NUMA node */
- adapter->node = dev_to_node(&pdev->dev);
-
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -5299,10 +5288,16 @@
int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = -1;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
+
+ if (tx_ring->q_vector)
+ numa_node = tx_ring->q_vector->numa_node;
+
+ tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
@@ -5312,8 +5307,15 @@
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ set_dev_node(dev, numa_node);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!tx_ring->desc)
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -5362,10 +5364,16 @@
int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = -1;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
+
+ if (rx_ring->q_vector)
+ numa_node = rx_ring->q_vector->numa_node;
+
+ rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
@@ -5375,9 +5383,15 @@
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
+ set_dev_node(dev, numa_node);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!rx_ring->desc)
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -6808,6 +6822,8 @@
tx_buffer_info->gso_segs = gso_segs;
tx_buffer_info->skb = skb;
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer_info->bytecount);
+
/* set the timestamp */
first->time_stamp = jiffies;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 4c060292..8636e83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1021,14 +1021,16 @@
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@@ -2726,6 +2728,8 @@
s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
+ s32 (*disable_rx_buff)(struct ixgbe_hw *);
+ s32 (*enable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 5e9f05f..97a9914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -847,6 +847,8 @@
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 738f950..fb2b367 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -151,11 +151,6 @@
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
- port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
- if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- context->pri_path.sched_queue = (context->pri_path.sched_queue &
- 0xc3);
-
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index bfdb7af..8752e6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2255,8 +2255,7 @@
if (vhcr->op_modifier == 0) {
err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
- if (err)
- goto ex_put;
+ goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig
new file mode 100644
index 0000000..0d9baf9
--- /dev/null
+++ b/drivers/net/ethernet/nxp/Kconfig
@@ -0,0 +1,8 @@
+config LPC_ENET
+ tristate "NXP ethernet MAC on LPC devices"
+ depends on ARCH_LPC32XX
+ select PHYLIB
+ help
+ Say Y or M here if you want to use the NXP ethernet MAC included on
+ some NXP LPC devices. You can safely enable this option for LPC32xx
+ SoC. Also available as a module.
diff --git a/drivers/net/ethernet/nxp/Makefile b/drivers/net/ethernet/nxp/Makefile
new file mode 100644
index 0000000..a128114
--- /dev/null
+++ b/drivers/net/ethernet/nxp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LPC_ENET) += lpc_eth.o
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
new file mode 100644
index 0000000..6944424
--- /dev/null
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -0,0 +1,1604 @@
+/*
+ * drivers/net/ethernet/nxp/lpc_eth.c
+ *
+ * Author: Kevin Wells <kevin.wells@nxp.com>
+ *
+ * Copyright (C) 2010 NXP Semiconductors
+ * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/clk.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_net.h>
+#include <linux/types.h>
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <mach/board.h>
+#include <mach/platform.h>
+#include <mach/hardware.h>
+
+#define MODNAME "lpc-eth"
+#define DRV_VERSION "1.00"
+#define PHYDEF_ADDR 0x00
+
+#define ENET_MAXF_SIZE 1536
+#define ENET_RX_DESC 48
+#define ENET_TX_DESC 16
+
+#define NAPI_WEIGHT 16
+
+/*
+ * Ethernet MAC controller Register offsets
+ */
+#define LPC_ENET_MAC1(x) (x + 0x000)
+#define LPC_ENET_MAC2(x) (x + 0x004)
+#define LPC_ENET_IPGT(x) (x + 0x008)
+#define LPC_ENET_IPGR(x) (x + 0x00C)
+#define LPC_ENET_CLRT(x) (x + 0x010)
+#define LPC_ENET_MAXF(x) (x + 0x014)
+#define LPC_ENET_SUPP(x) (x + 0x018)
+#define LPC_ENET_TEST(x) (x + 0x01C)
+#define LPC_ENET_MCFG(x) (x + 0x020)
+#define LPC_ENET_MCMD(x) (x + 0x024)
+#define LPC_ENET_MADR(x) (x + 0x028)
+#define LPC_ENET_MWTD(x) (x + 0x02C)
+#define LPC_ENET_MRDD(x) (x + 0x030)
+#define LPC_ENET_MIND(x) (x + 0x034)
+#define LPC_ENET_SA0(x) (x + 0x040)
+#define LPC_ENET_SA1(x) (x + 0x044)
+#define LPC_ENET_SA2(x) (x + 0x048)
+#define LPC_ENET_COMMAND(x) (x + 0x100)
+#define LPC_ENET_STATUS(x) (x + 0x104)
+#define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
+#define LPC_ENET_RXSTATUS(x) (x + 0x10C)
+#define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
+#define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
+#define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
+#define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
+#define LPC_ENET_TXSTATUS(x) (x + 0x120)
+#define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
+#define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
+#define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
+#define LPC_ENET_TSV0(x) (x + 0x158)
+#define LPC_ENET_TSV1(x) (x + 0x15C)
+#define LPC_ENET_RSV(x) (x + 0x160)
+#define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
+#define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
+#define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
+#define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
+#define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
+#define LPC_ENET_HASHFILTERL(x) (x + 0x210)
+#define LPC_ENET_HASHFILTERH(x) (x + 0x214)
+#define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
+#define LPC_ENET_INTENABLE(x) (x + 0xFE4)
+#define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
+#define LPC_ENET_INTSET(x) (x + 0xFEC)
+#define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
+
+/*
+ * mac1 register definitions
+ */
+#define LPC_MAC1_RECV_ENABLE (1 << 0)
+#define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
+#define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
+#define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
+#define LPC_MAC1_LOOPBACK (1 << 4)
+#define LPC_MAC1_RESET_TX (1 << 8)
+#define LPC_MAC1_RESET_MCS_TX (1 << 9)
+#define LPC_MAC1_RESET_RX (1 << 10)
+#define LPC_MAC1_RESET_MCS_RX (1 << 11)
+#define LPC_MAC1_SIMULATION_RESET (1 << 14)
+#define LPC_MAC1_SOFT_RESET (1 << 15)
+
+/*
+ * mac2 register definitions
+ */
+#define LPC_MAC2_FULL_DUPLEX (1 << 0)
+#define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
+#define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
+#define LPC_MAC2_DELAYED_CRC (1 << 3)
+#define LPC_MAC2_CRC_ENABLE (1 << 4)
+#define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
+#define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
+#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
+#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
+#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
+#define LPC_MAC2_NO_BACKOFF (1 << 12)
+#define LPC_MAC2_BACK_PRESSURE (1 << 13)
+#define LPC_MAC2_EXCESS_DEFER (1 << 14)
+
+/*
+ * ipgt register definitions
+ */
+#define LPC_IPGT_LOAD(n) ((n) & 0x7F)
+
+/*
+ * ipgr register definitions
+ */
+#define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
+#define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
+
+/*
+ * clrt register definitions
+ */
+#define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
+#define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
+
+/*
+ * maxf register definitions
+ */
+#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
+
+/*
+ * supp register definitions
+ */
+#define LPC_SUPP_SPEED (1 << 8)
+#define LPC_SUPP_RESET_RMII (1 << 11)
+
+/*
+ * test register definitions
+ */
+#define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
+#define LPC_TEST_PAUSE (1 << 1)
+#define LPC_TEST_BACKPRESSURE (1 << 2)
+
+/*
+ * mcfg register definitions
+ */
+#define LPC_MCFG_SCAN_INCREMENT (1 << 0)
+#define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
+#define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
+#define LPC_MCFG_CLOCK_HOST_DIV_4 0
+#define LPC_MCFG_CLOCK_HOST_DIV_6 2
+#define LPC_MCFG_CLOCK_HOST_DIV_8 3
+#define LPC_MCFG_CLOCK_HOST_DIV_10 4
+#define LPC_MCFG_CLOCK_HOST_DIV_14 5
+#define LPC_MCFG_CLOCK_HOST_DIV_20 6
+#define LPC_MCFG_CLOCK_HOST_DIV_28 7
+#define LPC_MCFG_RESET_MII_MGMT (1 << 15)
+
+/*
+ * mcmd register definitions
+ */
+#define LPC_MCMD_READ (1 << 0)
+#define LPC_MCMD_SCAN (1 << 1)
+
+/*
+ * madr register definitions
+ */
+#define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
+#define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
+
+/*
+ * mwtd register definitions
+ */
+#define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
+
+/*
+ * mrdd register definitions
+ */
+#define LPC_MRDD_READ_MASK 0xFFFF
+
+/*
+ * mind register definitions
+ */
+#define LPC_MIND_BUSY (1 << 0)
+#define LPC_MIND_SCANNING (1 << 1)
+#define LPC_MIND_NOT_VALID (1 << 2)
+#define LPC_MIND_MII_LINK_FAIL (1 << 3)
+
+/*
+ * command register definitions
+ */
+#define LPC_COMMAND_RXENABLE (1 << 0)
+#define LPC_COMMAND_TXENABLE (1 << 1)
+#define LPC_COMMAND_REG_RESET (1 << 3)
+#define LPC_COMMAND_TXRESET (1 << 4)
+#define LPC_COMMAND_RXRESET (1 << 5)
+#define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
+#define LPC_COMMAND_PASSRXFILTER (1 << 7)
+#define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
+#define LPC_COMMAND_RMII (1 << 9)
+#define LPC_COMMAND_FULLDUPLEX (1 << 10)
+
+/*
+ * status register definitions
+ */
+#define LPC_STATUS_RXACTIVE (1 << 0)
+#define LPC_STATUS_TXACTIVE (1 << 1)
+
+/*
+ * tsv0 register definitions
+ */
+#define LPC_TSV0_CRC_ERROR (1 << 0)
+#define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
+#define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
+#define LPC_TSV0_DONE (1 << 3)
+#define LPC_TSV0_MULTICAST (1 << 4)
+#define LPC_TSV0_BROADCAST (1 << 5)
+#define LPC_TSV0_PACKET_DEFER (1 << 6)
+#define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
+#define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
+#define LPC_TSV0_LATE_COLLISION (1 << 9)
+#define LPC_TSV0_GIANT (1 << 10)
+#define LPC_TSV0_UNDERRUN (1 << 11)
+#define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
+#define LPC_TSV0_CONTROL_FRAME (1 << 28)
+#define LPC_TSV0_PAUSE (1 << 29)
+#define LPC_TSV0_BACKPRESSURE (1 << 30)
+#define LPC_TSV0_VLAN (1 << 31)
+
+/*
+ * tsv1 register definitions
+ */
+#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
+#define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
+
+/*
+ * rsv register definitions
+ */
+#define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
+#define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
+#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
+#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
+#define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
+#define LPC_RSV_CRC_ERROR (1 << 20)
+#define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
+#define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
+#define LPC_RSV_RECEIVE_OK (1 << 23)
+#define LPC_RSV_MULTICAST (1 << 24)
+#define LPC_RSV_BROADCAST (1 << 25)
+#define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
+#define LPC_RSV_CONTROL_FRAME (1 << 27)
+#define LPC_RSV_PAUSE (1 << 28)
+#define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
+#define LPC_RSV_VLAN (1 << 30)
+
+/*
+ * flowcontrolcounter register definitions
+ */
+#define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
+#define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
+
+/*
+ * flowcontrolstatus register definitions
+ */
+#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
+
+/*
+ * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
+ * register definitions
+ */
+#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
+#define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
+#define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
+#define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
+#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
+#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
+
+/*
+ * rxfliterctrl register definitions
+ */
+#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
+#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
+
+/*
+ * rxfilterwolstatus/rxfilterwolclear register definitions
+ */
+#define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
+#define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
+
+/*
+ * intstatus, intenable, intclear, and Intset shared register
+ * definitions
+ */
+#define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
+#define LPC_MACINT_RXERRORONINT (1 << 1)
+#define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
+#define LPC_MACINT_RXDONEINTEN (1 << 3)
+#define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
+#define LPC_MACINT_TXERRORINTEN (1 << 5)
+#define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
+#define LPC_MACINT_TXDONEINTEN (1 << 7)
+#define LPC_MACINT_SOFTINTEN (1 << 12)
+#define LPC_MACINT_WAKEUPINTEN (1 << 13)
+
+/*
+ * powerdown register definitions
+ */
+#define LPC_POWERDOWN_MACAHB (1 << 31)
+
+/* Upon the upcoming introduction of device tree usage in LPC32xx,
+ * lpc_phy_interface_mode() and use_iram_for_net() will be extended with a
+ * device parameter for access to device tree information at runtime, instead
+ * of defining the values at compile time
+ */
+static inline phy_interface_t lpc_phy_interface_mode(void)
+{
+#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
+ return PHY_INTERFACE_MODE_MII;
+#else
+ return PHY_INTERFACE_MODE_RMII;
+#endif
+}
+
+static inline int use_iram_for_net(void)
+{
+#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/* Receive Status information word */
+#define RXSTATUS_SIZE 0x000007FF
+#define RXSTATUS_CONTROL (1 << 18)
+#define RXSTATUS_VLAN (1 << 19)
+#define RXSTATUS_FILTER (1 << 20)
+#define RXSTATUS_MULTICAST (1 << 21)
+#define RXSTATUS_BROADCAST (1 << 22)
+#define RXSTATUS_CRC (1 << 23)
+#define RXSTATUS_SYMBOL (1 << 24)
+#define RXSTATUS_LENGTH (1 << 25)
+#define RXSTATUS_RANGE (1 << 26)
+#define RXSTATUS_ALIGN (1 << 27)
+#define RXSTATUS_OVERRUN (1 << 28)
+#define RXSTATUS_NODESC (1 << 29)
+#define RXSTATUS_LAST (1 << 30)
+#define RXSTATUS_ERROR (1 << 31)
+
+#define RXSTATUS_STATUS_ERROR \
+ (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
+ RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
+
+/* Receive Descriptor control word */
+#define RXDESC_CONTROL_SIZE 0x000007FF
+#define RXDESC_CONTROL_INT (1 << 31)
+
+/* Transmit Status information word */
+#define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
+#define TXSTATUS_DEFER (1 << 25)
+#define TXSTATUS_EXCESSDEFER (1 << 26)
+#define TXSTATUS_EXCESSCOLL (1 << 27)
+#define TXSTATUS_LATECOLL (1 << 28)
+#define TXSTATUS_UNDERRUN (1 << 29)
+#define TXSTATUS_NODESC (1 << 30)
+#define TXSTATUS_ERROR (1 << 31)
+
+/* Transmit Descriptor control word */
+#define TXDESC_CONTROL_SIZE 0x000007FF
+#define TXDESC_CONTROL_OVERRIDE (1 << 26)
+#define TXDESC_CONTROL_HUGE (1 << 27)
+#define TXDESC_CONTROL_PAD (1 << 28)
+#define TXDESC_CONTROL_CRC (1 << 29)
+#define TXDESC_CONTROL_LAST (1 << 30)
+#define TXDESC_CONTROL_INT (1 << 31)
+
+static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+
+/*
+ * Structure of a TX/RX descriptors and RX status
+ */
+struct txrx_desc_t {
+ __le32 packet;
+ __le32 control;
+};
+struct rx_status_t {
+ __le32 statusinfo;
+ __le32 statushashcrc;
+};
+
+/*
+ * Device driver data structure
+ */
+struct netdata_local {
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ spinlock_t lock;
+ void __iomem *net_base;
+ u32 msg_enable;
+ struct sk_buff *skb[ENET_TX_DESC];
+ unsigned int last_tx_idx;
+ unsigned int num_used_tx_buffs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ struct clk *clk;
+ dma_addr_t dma_buff_base_p;
+ void *dma_buff_base_v;
+ size_t dma_buff_size;
+ struct txrx_desc_t *tx_desc_v;
+ u32 *tx_stat_v;
+ void *tx_buff_v;
+ struct txrx_desc_t *rx_desc_v;
+ struct rx_status_t *rx_stat_v;
+ void *rx_buff_v;
+ int link;
+ int speed;
+ int duplex;
+ struct napi_struct napi;
+};
+
+/*
+ * MAC support functions
+ */
+static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+{
+ u32 tmp;
+
+ /* Set station address */
+ tmp = mac[0] | ((u32)mac[1] << 8);
+ writel(tmp, LPC_ENET_SA2(pldat->net_base));
+ tmp = mac[2] | ((u32)mac[3] << 8);
+ writel(tmp, LPC_ENET_SA1(pldat->net_base));
+ tmp = mac[4] | ((u32)mac[5] << 8);
+ writel(tmp, LPC_ENET_SA0(pldat->net_base));
+
+ netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
+}
+
+static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
+{
+ u32 tmp;
+
+ /* Get station address */
+ tmp = readl(LPC_ENET_SA2(pldat->net_base));
+ mac[0] = tmp & 0xFF;
+ mac[1] = tmp >> 8;
+ tmp = readl(LPC_ENET_SA1(pldat->net_base));
+ mac[2] = tmp & 0xFF;
+ mac[3] = tmp >> 8;
+ tmp = readl(LPC_ENET_SA0(pldat->net_base));
+ mac[4] = tmp & 0xFF;
+ mac[5] = tmp >> 8;
+}
+
+static void __lpc_eth_clock_enable(struct netdata_local *pldat,
+ bool enable)
+{
+ if (enable)
+ clk_enable(pldat->clk);
+ else
+ clk_disable(pldat->clk);
+}
+
+static void __lpc_params_setup(struct netdata_local *pldat)
+{
+ u32 tmp;
+
+ if (pldat->duplex == DUPLEX_FULL) {
+ tmp = readl(LPC_ENET_MAC2(pldat->net_base));
+ tmp |= LPC_MAC2_FULL_DUPLEX;
+ writel(tmp, LPC_ENET_MAC2(pldat->net_base));
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp |= LPC_COMMAND_FULLDUPLEX;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
+ } else {
+ tmp = readl(LPC_ENET_MAC2(pldat->net_base));
+ tmp &= ~LPC_MAC2_FULL_DUPLEX;
+ writel(tmp, LPC_ENET_MAC2(pldat->net_base));
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp &= ~LPC_COMMAND_FULLDUPLEX;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
+ }
+
+ if (pldat->speed == SPEED_100)
+ writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
+ else
+ writel(0, LPC_ENET_SUPP(pldat->net_base));
+}
+
+static void __lpc_eth_reset(struct netdata_local *pldat)
+{
+ /* Reset all MAC logic */
+ writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
+ LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
+ LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
+ writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
+ LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
+}
+
+static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
+{
+ /* Reset MII management hardware */
+ writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
+
+ /* Setup MII clock to slowest rate with a /28 divider */
+ writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
+ LPC_ENET_MCFG(pldat->net_base));
+
+ return 0;
+}
+
+static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
+{
+ phys_addr_t phaddr;
+
+ phaddr = addr - pldat->dma_buff_base_v;
+ phaddr += pldat->dma_buff_base_p;
+
+ return phaddr;
+}
+
+static void lpc_eth_enable_int(void __iomem *regbase)
+{
+ writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
+ LPC_ENET_INTENABLE(regbase));
+}
+
+static void lpc_eth_disable_int(void __iomem *regbase)
+{
+ writel(0, LPC_ENET_INTENABLE(regbase));
+}
+
+/* Setup TX/RX descriptors */
+static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
+{
+ u32 *ptxstat;
+ void *tbuff;
+ int i;
+ struct txrx_desc_t *ptxrxdesc;
+ struct rx_status_t *prxstat;
+
+ tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
+
+ /* Setup TX descriptors, status, and buffers */
+ pldat->tx_desc_v = tbuff;
+ tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
+
+ pldat->tx_stat_v = tbuff;
+ tbuff += sizeof(u32) * ENET_TX_DESC;
+
+ tbuff = PTR_ALIGN(tbuff, 16);
+ pldat->tx_buff_v = tbuff;
+ tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
+
+ /* Setup RX descriptors, status, and buffers */
+ pldat->rx_desc_v = tbuff;
+ tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
+
+ tbuff = PTR_ALIGN(tbuff, 16);
+ pldat->rx_stat_v = tbuff;
+ tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
+
+ tbuff = PTR_ALIGN(tbuff, 16);
+ pldat->rx_buff_v = tbuff;
+ tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
+
+ /* Map the TX descriptors to the TX buffers in hardware */
+ for (i = 0; i < ENET_TX_DESC; i++) {
+ ptxstat = &pldat->tx_stat_v[i];
+ ptxrxdesc = &pldat->tx_desc_v[i];
+
+ ptxrxdesc->packet = __va_to_pa(
+ pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
+ ptxrxdesc->control = 0;
+ *ptxstat = 0;
+ }
+
+ /* Map the RX descriptors to the RX buffers in hardware */
+ for (i = 0; i < ENET_RX_DESC; i++) {
+ prxstat = &pldat->rx_stat_v[i];
+ ptxrxdesc = &pldat->rx_desc_v[i];
+
+ ptxrxdesc->packet = __va_to_pa(
+ pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
+ ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
+ prxstat->statusinfo = 0;
+ prxstat->statushashcrc = 0;
+ }
+
+ /* Setup base addresses in hardware to point to buffers and
+ * descriptors
+ */
+ writel((ENET_TX_DESC - 1),
+ LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
+ writel(__va_to_pa(pldat->tx_desc_v, pldat),
+ LPC_ENET_TXDESCRIPTOR(pldat->net_base));
+ writel(__va_to_pa(pldat->tx_stat_v, pldat),
+ LPC_ENET_TXSTATUS(pldat->net_base));
+ writel((ENET_RX_DESC - 1),
+ LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
+ writel(__va_to_pa(pldat->rx_desc_v, pldat),
+ LPC_ENET_RXDESCRIPTOR(pldat->net_base));
+ writel(__va_to_pa(pldat->rx_stat_v, pldat),
+ LPC_ENET_RXSTATUS(pldat->net_base));
+}
+
+static void __lpc_eth_init(struct netdata_local *pldat)
+{
+ u32 tmp;
+
+ /* Disable controller and reset */
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ tmp = readl(LPC_ENET_MAC1(pldat->net_base));
+ tmp &= ~LPC_MAC1_RECV_ENABLE;
+ writel(tmp, LPC_ENET_MAC1(pldat->net_base));
+
+ /* Initial MAC setup */
+ writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
+ writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
+ LPC_ENET_MAC2(pldat->net_base));
+ writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
+
+ /* Collision window, gap */
+ writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
+ LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
+ LPC_ENET_CLRT(pldat->net_base));
+ writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
+
+ if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
+ writel(LPC_COMMAND_PASSRUNTFRAME,
+ LPC_ENET_COMMAND(pldat->net_base));
+ else {
+ writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
+ LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
+ }
+
+ __lpc_params_setup(pldat);
+
+ /* Setup TX and RX descriptors */
+ __lpc_txrx_desc_setup(pldat);
+
+ /* Setup packet filtering */
+ writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
+ LPC_ENET_RXFILTER_CTRL(pldat->net_base));
+
+ /* Get the next TX buffer output index */
+ pldat->num_used_tx_buffs = 0;
+ pldat->last_tx_idx =
+ readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+
+ /* Clear and enable interrupts */
+ writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
+ smp_wmb();
+ lpc_eth_enable_int(pldat->net_base);
+
+ /* Enable controller */
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ tmp = readl(LPC_ENET_MAC1(pldat->net_base));
+ tmp |= LPC_MAC1_RECV_ENABLE;
+ writel(tmp, LPC_ENET_MAC1(pldat->net_base));
+}
+
+static void __lpc_eth_shutdown(struct netdata_local *pldat)
+{
+ /* Reset ethernet and power down PHY */
+ __lpc_eth_reset(pldat);
+ writel(0, LPC_ENET_MAC1(pldat->net_base));
+ writel(0, LPC_ENET_MAC2(pldat->net_base));
+}
+
+/*
+ * MAC<--->PHY support functions
+ */
+static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
+{
+ struct netdata_local *pldat = bus->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ int lps;
+
+ writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
+ writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
+
+ /* Wait for unbusy status */
+ while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
+ if (time_after(jiffies, timeout))
+ return -EIO;
+ cpu_relax();
+ }
+
+ lps = readl(LPC_ENET_MRDD(pldat->net_base));
+ writel(0, LPC_ENET_MCMD(pldat->net_base));
+
+ return lps;
+}
+
+static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
+ u16 phydata)
+{
+ struct netdata_local *pldat = bus->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
+ writel(phydata, LPC_ENET_MWTD(pldat->net_base));
+
+ /* Wait for completion */
+ while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
+ if (time_after(jiffies, timeout))
+ return -EIO;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int lpc_mdio_reset(struct mii_bus *bus)
+{
+ return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
+}
+
+static void lpc_handle_link_change(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+ unsigned long flags;
+
+ bool status_change = false;
+
+ spin_lock_irqsave(&pldat->lock, flags);
+
+ if (phydev->link) {
+ if ((pldat->speed != phydev->speed) ||
+ (pldat->duplex != phydev->duplex)) {
+ pldat->speed = phydev->speed;
+ pldat->duplex = phydev->duplex;
+ status_change = true;
+ }
+ }
+
+ if (phydev->link != pldat->link) {
+ if (!phydev->link) {
+ pldat->speed = 0;
+ pldat->duplex = -1;
+ }
+ pldat->link = phydev->link;
+
+ status_change = true;
+ }
+
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
+ if (status_change)
+ __lpc_params_setup(pldat);
+}
+
+static int lpc_mii_probe(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = phy_find_first(pldat->mii_bus);
+
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ /* Attach to the PHY */
+ if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
+ netdev_info(ndev, "using MII interface\n");
+ else
+ netdev_info(ndev, "using RMII interface\n");
+ phydev = phy_connect(ndev, dev_name(&phydev->dev),
+ &lpc_handle_link_change, 0, lpc_phy_interface_mode());
+
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= PHY_BASIC_FEATURES;
+
+ phydev->advertising = phydev->supported;
+
+ pldat->link = 0;
+ pldat->speed = 0;
+ pldat->duplex = -1;
+ pldat->phy_dev = phydev;
+
+ netdev_info(ndev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ return 0;
+}
+
+static int lpc_mii_init(struct netdata_local *pldat)
+{
+ int err = -ENXIO, i;
+
+ pldat->mii_bus = mdiobus_alloc();
+ if (!pldat->mii_bus) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Setup MII mode */
+ if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
+ writel(LPC_COMMAND_PASSRUNTFRAME,
+ LPC_ENET_COMMAND(pldat->net_base));
+ else {
+ writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
+ LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
+ }
+
+ pldat->mii_bus->name = "lpc_mii_bus";
+ pldat->mii_bus->read = &lpc_mdio_read;
+ pldat->mii_bus->write = &lpc_mdio_write;
+ pldat->mii_bus->reset = &lpc_mdio_reset;
+ snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pldat->pdev->name, pldat->pdev->id);
+ pldat->mii_bus->priv = pldat;
+ pldat->mii_bus->parent = &pldat->pdev->dev;
+
+ pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!pldat->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_1;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ pldat->mii_bus->irq[i] = PHY_POLL;
+
+ platform_set_drvdata(pldat->pdev, pldat->mii_bus);
+
+ if (mdiobus_register(pldat->mii_bus))
+ goto err_out_free_mdio_irq;
+
+ if (lpc_mii_probe(pldat->ndev) != 0)
+ goto err_out_unregister_bus;
+
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(pldat->mii_bus);
+err_out_free_mdio_irq:
+ kfree(pldat->mii_bus->irq);
+err_out_1:
+ mdiobus_free(pldat->mii_bus);
+err_out:
+ return err;
+}
+
+static void __lpc_handle_xmit(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 txcidx, *ptxstat, txstat;
+
+ txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+ while (pldat->last_tx_idx != txcidx) {
+ skb = pldat->skb[pldat->last_tx_idx];
+
+ /* A buffer is available, get buffer status */
+ ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
+ txstat = *ptxstat;
+
+ /* Next buffer and decrement used buffer counter */
+ pldat->num_used_tx_buffs--;
+ pldat->last_tx_idx++;
+ if (pldat->last_tx_idx >= ENET_TX_DESC)
+ pldat->last_tx_idx = 0;
+
+ /* Update collision counter */
+ ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
+
+ /* Any errors occurred? */
+ if (txstat & TXSTATUS_ERROR) {
+ if (txstat & TXSTATUS_UNDERRUN) {
+ /* FIFO underrun */
+ ndev->stats.tx_fifo_errors++;
+ }
+ if (txstat & TXSTATUS_LATECOLL) {
+ /* Late collision */
+ ndev->stats.tx_aborted_errors++;
+ }
+ if (txstat & TXSTATUS_EXCESSCOLL) {
+ /* Excessive collision */
+ ndev->stats.tx_aborted_errors++;
+ }
+ if (txstat & TXSTATUS_EXCESSDEFER) {
+ /* Defer limit */
+ ndev->stats.tx_aborted_errors++;
+ }
+ ndev->stats.tx_errors++;
+ } else {
+ /* Update stats */
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+
+ /* Free buffer */
+ dev_kfree_skb_irq(skb);
+ }
+
+ txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+ }
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static int __lpc_handle_recv(struct net_device *ndev, int budget)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 rxconsidx, len, ethst;
+ struct rx_status_t *prxstat;
+ u8 *prdbuf;
+ int rx_done = 0;
+
+ /* Get the current RX buffer indexes */
+ rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
+ while (rx_done < budget && rxconsidx !=
+ readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
+ /* Get pointer to receive status */
+ prxstat = &pldat->rx_stat_v[rxconsidx];
+ len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
+
+ /* Status error? */
+ ethst = prxstat->statusinfo;
+ if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
+ (RXSTATUS_ERROR | RXSTATUS_RANGE))
+ ethst &= ~RXSTATUS_ERROR;
+
+ if (ethst & RXSTATUS_ERROR) {
+ int si = prxstat->statusinfo;
+ /* Check statuses */
+ if (si & RXSTATUS_OVERRUN) {
+ /* Overrun error */
+ ndev->stats.rx_fifo_errors++;
+ } else if (si & RXSTATUS_CRC) {
+ /* CRC error */
+ ndev->stats.rx_crc_errors++;
+ } else if (si & RXSTATUS_LENGTH) {
+ /* Length error */
+ ndev->stats.rx_length_errors++;
+ } else if (si & RXSTATUS_ERROR) {
+ /* Other error */
+ ndev->stats.rx_length_errors++;
+ }
+ ndev->stats.rx_errors++;
+ } else {
+ /* Packet is good */
+ skb = dev_alloc_skb(len + 8);
+ if (!skb)
+ ndev->stats.rx_dropped++;
+ else {
+ prdbuf = skb_put(skb, len);
+
+ /* Copy packet from buffer */
+ memcpy(prdbuf, pldat->rx_buff_v +
+ rxconsidx * ENET_MAXF_SIZE, len);
+
+ /* Pass to upper layer */
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ /* Increment consume index */
+ rxconsidx = rxconsidx + 1;
+ if (rxconsidx >= ENET_RX_DESC)
+ rxconsidx = 0;
+ writel(rxconsidx,
+ LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
+ rx_done++;
+ }
+
+ return rx_done;
+}
+
+static int lpc_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct netdata_local *pldat = container_of(napi,
+ struct netdata_local, napi);
+ struct net_device *ndev = pldat->ndev;
+ int rx_done = 0;
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
+
+ __netif_tx_lock(txq, smp_processor_id());
+ __lpc_handle_xmit(ndev);
+ __netif_tx_unlock(txq);
+ rx_done = __lpc_handle_recv(ndev, budget);
+
+ if (rx_done < budget) {
+ napi_complete(napi);
+ lpc_eth_enable_int(pldat->net_base);
+ }
+
+ return rx_done;
+}
+
+static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct netdata_local *pldat = netdev_priv(ndev);
+ u32 tmp;
+
+ spin_lock(&pldat->lock);
+
+ tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
+ /* Clear interrupts */
+ writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
+
+ lpc_eth_disable_int(pldat->net_base);
+ if (likely(napi_schedule_prep(&pldat->napi)))
+ __napi_schedule(&pldat->napi);
+
+ spin_unlock(&pldat->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc_eth_close(struct net_device *ndev)
+{
+ unsigned long flags;
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ if (netif_msg_ifdown(pldat))
+ dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
+
+ napi_disable(&pldat->napi);
+ netif_stop_queue(ndev);
+
+ if (pldat->phy_dev)
+ phy_stop(pldat->phy_dev);
+
+ spin_lock_irqsave(&pldat->lock, flags);
+ __lpc_eth_reset(pldat);
+ netif_carrier_off(ndev);
+ writel(0, LPC_ENET_MAC1(pldat->net_base));
+ writel(0, LPC_ENET_MAC2(pldat->net_base));
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
+ __lpc_eth_clock_enable(pldat, false);
+
+ return 0;
+}
+
+static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ u32 len, txidx;
+ u32 *ptxstat;
+ struct txrx_desc_t *ptxrxdesc;
+
+ len = skb->len;
+
+ spin_lock_irq(&pldat->lock);
+
+ if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
+ /* This function should never be called when there are no
+ buffers */
+ netif_stop_queue(ndev);
+ spin_unlock_irq(&pldat->lock);
+ WARN(1, "BUG! TX request when no free TX buffers!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Get the next TX descriptor index */
+ txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
+
+ /* Setup control for the transfer */
+ ptxstat = &pldat->tx_stat_v[txidx];
+ *ptxstat = 0;
+ ptxrxdesc = &pldat->tx_desc_v[txidx];
+ ptxrxdesc->control =
+ (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
+
+ /* Copy data to the DMA buffer */
+ memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
+
+ /* Save the buffer and increment the buffer counter */
+ pldat->skb[txidx] = skb;
+ pldat->num_used_tx_buffs++;
+
+ /* Start transmit */
+ txidx++;
+ if (txidx >= ENET_TX_DESC)
+ txidx = 0;
+ writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
+
+ /* Stop queue if no more TX buffers */
+ if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irq(&pldat->lock);
+
+ return NETDEV_TX_OK;
+}
+
+static int lpc_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct netdata_local *pldat = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ spin_lock_irqsave(&pldat->lock, flags);
+
+ /* Set station address */
+ __lpc_set_mac(pldat, ndev->dev_addr);
+
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
+ return 0;
+}
+
+static void lpc_eth_set_multicast_list(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct netdev_hw_addr_list *mcptr = &ndev->mc;
+ struct netdev_hw_addr *ha;
+ u32 tmp32, hash_val, hashlo, hashhi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pldat->lock, flags);
+
+ /* Set station address */
+ __lpc_set_mac(pldat, ndev->dev_addr);
+
+ tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
+
+ if (ndev->flags & IFF_PROMISC)
+ tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
+ LPC_RXFLTRW_ACCEPTUMULTICAST;
+ if (ndev->flags & IFF_ALLMULTI)
+ tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
+
+ if (netdev_hw_addr_list_count(mcptr))
+ tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
+
+ writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
+
+
+ /* Set initial hash table */
+ hashlo = 0x0;
+ hashhi = 0x0;
+
+ /* 64 bits : multicast address in hash table */
+ netdev_hw_addr_list_for_each(ha, mcptr) {
+ hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
+
+ if (hash_val >= 32)
+ hashhi |= 1 << (hash_val - 32);
+ else
+ hashlo |= 1 << hash_val;
+ }
+
+ writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
+ writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
+
+ spin_unlock_irqrestore(&pldat->lock, flags);
+}
+
+static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static int lpc_eth_open(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ if (netif_msg_ifup(pldat))
+ dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ __lpc_eth_clock_enable(pldat, true);
+
+ /* Reset and initialize */
+ __lpc_eth_reset(pldat);
+ __lpc_eth_init(pldat);
+
+ /* schedule a link state check */
+ phy_start(pldat->phy_dev);
+ netif_start_queue(ndev);
+ napi_enable(&pldat->napi);
+
+ return 0;
+}
+
+/*
+ * Ethtool ops
+ */
+static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, dev_name(ndev->dev.parent));
+}
+
+static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ return pldat->msg_enable;
+}
+
+static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ pldat->msg_enable = level;
+}
+
+static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static const struct ethtool_ops lpc_eth_ethtool_ops = {
+ .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
+ .get_settings = lpc_eth_ethtool_getsettings,
+ .set_settings = lpc_eth_ethtool_setsettings,
+ .get_msglevel = lpc_eth_ethtool_getmsglevel,
+ .set_msglevel = lpc_eth_ethtool_setmsglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops lpc_netdev_ops = {
+ .ndo_open = lpc_eth_open,
+ .ndo_stop = lpc_eth_close,
+ .ndo_start_xmit = lpc_eth_hard_start_xmit,
+ .ndo_set_rx_mode = lpc_eth_set_multicast_list,
+ .ndo_do_ioctl = lpc_eth_ioctl,
+ .ndo_set_mac_address = lpc_set_mac_address,
+};
+
+static int lpc_eth_drv_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct resource *dma_res;
+ struct net_device *ndev;
+ struct netdata_local *pldat;
+ struct phy_device *phydev;
+ dma_addr_t dma_handle;
+ int irq, ret;
+
+ /* Get platform resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq = platform_get_irq(pdev, 0);
+ if ((!res) || (!dma_res) || (irq < 0) || (irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "error getting resources.\n");
+ ret = -ENXIO;
+ goto err_exit;
+ }
+
+ /* Allocate net driver data structure */
+ ndev = alloc_etherdev(sizeof(struct netdata_local));
+ if (!ndev) {
+ dev_err(&pdev->dev, "could not allocate device.\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ pldat = netdev_priv(ndev);
+ pldat->pdev = pdev;
+ pldat->ndev = ndev;
+
+ spin_lock_init(&pldat->lock);
+
+ /* Save resources */
+ ndev->irq = irq;
+
+ /* Get clock for the device */
+ pldat->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pldat->clk)) {
+ dev_err(&pdev->dev, "error getting clock.\n");
+ ret = PTR_ERR(pldat->clk);
+ goto err_out_free_dev;
+ }
+
+ /* Enable network clock */
+ __lpc_eth_clock_enable(pldat, true);
+
+ /* Map IO space */
+ pldat->net_base = ioremap(res->start, res->end - res->start + 1);
+ if (!pldat->net_base) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ ret = -ENOMEM;
+ goto err_out_disable_clocks;
+ }
+ ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "error requesting interrupt.\n");
+ goto err_out_iounmap;
+ }
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(ndev);
+
+ /* Setup driver functions */
+ ndev->netdev_ops = &lpc_netdev_ops;
+ ndev->ethtool_ops = &lpc_eth_ethtool_ops;
+ ndev->watchdog_timeo = msecs_to_jiffies(2500);
+
+ /* Get size of DMA buffers/descriptors region */
+ pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
+ sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
+ pldat->dma_buff_base_v = 0;
+
+ if (use_iram_for_net()) {
+ dma_handle = dma_res->start;
+ if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
+ pldat->dma_buff_base_v =
+ io_p2v(dma_res->start);
+ else
+ netdev_err(ndev,
+ "IRAM not big enough for net buffers, using SDRAM instead.\n");
+ }
+
+ if (pldat->dma_buff_base_v == 0) {
+ pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
+
+ /* Allocate a chunk of memory for the DMA ethernet buffers
+ and descriptors */
+ pldat->dma_buff_base_v =
+ dma_alloc_coherent(&pldat->pdev->dev,
+ pldat->dma_buff_size, &dma_handle,
+ GFP_KERNEL);
+
+ if (pldat->dma_buff_base_v == NULL) {
+ dev_err(&pdev->dev, "error getting DMA region.\n");
+ ret = -ENOMEM;
+ goto err_out_free_irq;
+ }
+ }
+ pldat->dma_buff_base_p = dma_handle;
+
+ netdev_dbg(ndev, "IO address start :0x%08x\n",
+ res->start);
+ netdev_dbg(ndev, "IO address size :%d\n",
+ res->end - res->start + 1);
+ netdev_err(ndev, "IO address (mapped) :0x%p\n",
+ pldat->net_base);
+ netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
+ netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
+ netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
+ pldat->dma_buff_base_p);
+ netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
+ pldat->dma_buff_base_v);
+
+ /* Get MAC address from current HW setting (POR state is all zeros) */
+ __lpc_get_mac(pldat, ndev->dev_addr);
+
+#ifdef CONFIG_OF_NET
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ const char *macaddr = of_get_mac_address(pdev->dev.of_node);
+ if (macaddr)
+ memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
+ }
+#endif
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ dev_hw_addr_random(ndev, ndev->dev_addr);
+
+ /* Reset the ethernet controller */
+ __lpc_eth_reset(pldat);
+
+ /* then shut everything down to save power */
+ __lpc_eth_shutdown(pldat);
+
+ /* Set default parameters */
+ pldat->msg_enable = NETIF_MSG_LINK;
+
+ /* Force an MII interface reset and clock setup */
+ __lpc_mii_mngt_reset(pldat);
+
+ /* Force default PHY interface setup in chip, this will probably be
+ changed by the PHY driver */
+ pldat->link = 0;
+ pldat->speed = 100;
+ pldat->duplex = DUPLEX_FULL;
+ __lpc_params_setup(pldat);
+
+ netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_dma_unmap;
+ }
+ platform_set_drvdata(pdev, ndev);
+
+ if (lpc_mii_init(pldat) != 0)
+ goto err_out_unregister_netdev;
+
+ netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
+ res->start, ndev->irq);
+
+ phydev = pldat->phy_dev;
+
+ device_init_wakeup(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, 0);
+
+ return 0;
+
+err_out_unregister_netdev:
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+err_out_dma_unmap:
+ if (!use_iram_for_net() ||
+ pldat->dma_buff_size > lpc32xx_return_iram_size())
+ dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
+ pldat->dma_buff_base_v,
+ pldat->dma_buff_base_p);
+err_out_free_irq:
+ free_irq(ndev->irq, ndev);
+err_out_iounmap:
+ iounmap(pldat->net_base);
+err_out_disable_clocks:
+ clk_disable(pldat->clk);
+ clk_put(pldat->clk);
+err_out_free_dev:
+ free_netdev(ndev);
+err_exit:
+ pr_err("%s: not found (%d).\n", MODNAME, ret);
+ return ret;
+}
+
+static int lpc_eth_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ if (!use_iram_for_net() ||
+ pldat->dma_buff_size > lpc32xx_return_iram_size())
+ dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
+ pldat->dma_buff_base_v,
+ pldat->dma_buff_base_p);
+ free_irq(ndev->irq, ndev);
+ iounmap(pldat->net_base);
+ mdiobus_free(pldat->mii_bus);
+ clk_disable(pldat->clk);
+ clk_put(pldat->clk);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc_eth_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(ndev->irq);
+
+ if (ndev) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ __lpc_eth_shutdown(pldat);
+ clk_disable(pldat->clk);
+
+ /*
+ * Reset again now clock is disable to be sure
+ * EMC_MDC is down
+ */
+ __lpc_eth_reset(pldat);
+ }
+ }
+
+ return 0;
+}
+
+static int lpc_eth_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct netdata_local *pldat;
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(ndev->irq);
+
+ if (ndev) {
+ if (netif_running(ndev)) {
+ pldat = netdev_priv(ndev);
+
+ /* Enable interface clock */
+ clk_enable(pldat->clk);
+
+ /* Reset and initialize */
+ __lpc_eth_reset(pldat);
+ __lpc_eth_init(pldat);
+
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static struct platform_driver lpc_eth_driver = {
+ .probe = lpc_eth_drv_probe,
+ .remove = __devexit_p(lpc_eth_drv_remove),
+#ifdef CONFIG_PM
+ .suspend = lpc_eth_drv_suspend,
+ .resume = lpc_eth_drv_resume,
+#endif
+ .driver = {
+ .name = MODNAME,
+ },
+};
+
+module_platform_driver(lpc_eth_driver);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("LPC Ethernet Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 00bc4fc..bce0164 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -20,3 +20,16 @@
purpose use.
ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+if PCH_GBE
+
+config PCH_PTP
+ bool "PCH PTP clock support"
+ default n
+ depends on PTP_1588_CLOCK_PCH
+ ---help---
+ Say Y here if you want to use Precision Time Protocol (PTP) in the
+ driver. PTP is a method to precisely synchronize distributed clocks
+ over Ethernet networks.
+
+endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index a09a0719..dd14915 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -630,6 +630,9 @@
unsigned long tx_queue_len;
bool have_msi;
bool rx_stop_flag;
+ int hwts_tx_en;
+ int hwts_rx_en;
+ struct pci_dev *ptp_pdev;
};
extern const char pch_driver_version[];
@@ -648,6 +651,16 @@
extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
struct pch_gbe_rx_ring *rx_ring);
extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+#ifdef CONFIG_PCH_PTP
+extern u32 pch_ch_control_read(struct pci_dev *pdev);
+extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+extern u32 pch_ch_event_read(struct pci_dev *pdev);
+extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+extern u64 pch_rx_snap_read(struct pci_dev *pdev);
+extern u64 pch_tx_snap_read(struct pci_dev *pdev);
+#endif
/* pch_gbe_param.c */
extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 69a66545..8035e5f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
@@ -21,6 +21,10 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
#include <linux/module.h>
+#ifdef CONFIG_PCH_PTP
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#endif
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
@@ -95,12 +99,195 @@
#define PCH_GBE_INT_DISABLE_ALL 0
+#ifdef CONFIG_PCH_PTP
+/* Macros for ieee1588 */
+#define TICKS_NS_SHIFT 5
+
+/* 0x40 Time Synchronization Channel Control Register Bits */
+#define MASTER_MODE (1<<0)
+#define SLAVE_MODE (0<<0)
+#define V2_MODE (1<<31)
+#define CAP_MODE0 (0<<16)
+#define CAP_MODE2 (1<<17)
+
+/* 0x44 Time Synchronization Channel Event Register Bits */
+#define TX_SNAPSHOT_LOCKED (1<<0)
+#define RX_SNAPSHOT_LOCKED (1<<1)
+#endif
+
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
+#ifdef CONFIG_PCH_PTP
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
+static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
+{
+ u8 *data = skb->data;
+ unsigned int offset;
+ u16 *hi, *id;
+ u32 lo;
+
+ if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
+ (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
+ return 0;
+ }
+
+ offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+
+ if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
+ return 0;
+
+ hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
+ id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ memcpy(&lo, &hi[1], sizeof(lo));
+
+ return (uid_hi == *hi &&
+ uid_lo == lo &&
+ seqid == *id);
+}
+
+static void pch_rx_timestamp(
+ struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct pci_dev *pdev;
+ u64 ns;
+ u32 hi, lo, val;
+ u16 uid, seq;
+
+ if (!adapter->hwts_rx_en)
+ return;
+
+ /* Get ieee1588's dev information */
+ pdev = adapter->ptp_pdev;
+
+ val = pch_ch_event_read(pdev);
+
+ if (!(val & RX_SNAPSHOT_LOCKED))
+ return;
+
+ lo = pch_src_uuid_lo_read(pdev);
+ hi = pch_src_uuid_hi_read(pdev);
+
+ uid = hi & 0xffff;
+ seq = (hi >> 16) & 0xffff;
+
+ if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
+ goto out;
+
+ ns = pch_rx_snap_read(pdev);
+ ns <<= TICKS_NS_SHIFT;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+out:
+ pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
+}
+
+static void pch_tx_timestamp(
+ struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct pci_dev *pdev;
+ struct skb_shared_info *shtx;
+ u64 ns;
+ u32 cnt, val;
+
+ shtx = skb_shinfo(skb);
+ if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
+ shtx->tx_flags |= SKBTX_IN_PROGRESS;
+ else
+ return;
+
+ /* Get ieee1588's dev information */
+ pdev = adapter->ptp_pdev;
+
+ /*
+ * This really stinks, but we have to poll for the Tx time stamp.
+ * Usually, the time stamp is ready after 4 to 6 microseconds.
+ */
+ for (cnt = 0; cnt < 100; cnt++) {
+ val = pch_ch_event_read(pdev);
+ if (val & TX_SNAPSHOT_LOCKED)
+ break;
+ udelay(1);
+ }
+ if (!(val & TX_SNAPSHOT_LOCKED)) {
+ shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
+ return;
+ }
+
+ ns = pch_tx_snap_read(pdev);
+ ns <<= TICKS_NS_SHIFT;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
+}
+
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config cfg;
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ /* Get ieee1588's dev information */
+ pdev = adapter->ptp_pdev;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ adapter->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ adapter->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ adapter->hwts_rx_en = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ adapter->hwts_rx_en = 0;
+ pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ adapter->hwts_rx_en = 1;
+ pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ adapter->hwts_rx_en = 1;
+ pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out any old time stamps. */
+ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#endif
+
inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
{
iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
@@ -1072,6 +1259,11 @@
iowrite32(tx_ring->dma +
(int)sizeof(struct pch_gbe_tx_desc) * ring_num,
&hw->reg->TX_DSC_SW_P);
+
+#ifdef CONFIG_PCH_PTP
+ pch_tx_timestamp(adapter, skb);
+#endif
+
dev_kfree_skb_any(skb);
}
@@ -1543,6 +1735,11 @@
adapter->stats.multicast++;
/* Write meta date of skb */
skb_put(skb, length);
+
+#ifdef CONFIG_PCH_PTP
+ pch_rx_timestamp(adapter, skb);
+#endif
+
skb->protocol = eth_type_trans(skb, netdev);
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
skb->ip_summed = CHECKSUM_NONE;
@@ -2144,6 +2341,11 @@
pr_debug("cmd : 0x%04x\n", cmd);
+#ifdef CONFIG_PCH_PTP
+ if (cmd == SIOCSHWTSTAMP)
+ return hwtstamp_ioctl(netdev, ifr, cmd);
+#endif
+
return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
}
@@ -2435,6 +2637,15 @@
goto err_free_netdev;
}
+#ifdef CONFIG_PCH_PTP
+ adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
+ PCI_DEVFN(12, 4));
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+ pr_err("Bad ptp filter\n");
+ return -EINVAL;
+ }
+#endif
+
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
netif_napi_add(netdev, &adapter->napi,
@@ -2499,7 +2710,7 @@
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
+ dev_dbg(&pdev->dev, "PCH Network Connection\n");
device_set_wakeup_enable(&pdev->dev, 1);
return 0;
@@ -2600,7 +2811,7 @@
module_exit(pch_gbe_exit_module);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 817302c..27c358c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -255,10 +255,6 @@
RTL_CFG_2
};
-static void rtl_hw_start_8169(struct net_device *);
-static void rtl_hw_start_8168(struct net_device *);
-static void rtl_hw_start_8101(struct net_device *);
-
static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
@@ -774,22 +770,6 @@
MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
-static int rtl8169_open(struct net_device *dev);
-static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
-static int rtl8169_init_ring(struct net_device *dev);
-static void rtl_hw_start(struct net_device *dev);
-static int rtl8169_close(struct net_device *dev);
-static void rtl_set_rx_mode(struct net_device *dev);
-static void rtl8169_tx_timeout(struct net_device *dev);
-static struct rtnl_link_stats64 *rtl8169_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64
- *stats);
-static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
-static void rtl8169_rx_clear(struct rtl8169_private *tp);
-static int rtl8169_poll(struct napi_struct *napi, int budget);
-
static void rtl_lock_work(struct rtl8169_private *tp)
{
mutex_lock(&tp->wk.mutex);
@@ -3303,15 +3283,6 @@
rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void rtl8169_netpoll(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_interrupt(tp->pci_dev->irq, dev);
-}
-#endif
-
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
{
@@ -3463,63 +3434,6 @@
return -EOPNOTSUPP;
}
-static const struct rtl_cfg_info {
- void (*hw_start)(struct net_device *);
- unsigned int region;
- unsigned int align;
- u16 event_slow;
- unsigned features;
- u8 default_ver;
-} rtl_cfg_infos [] = {
- [RTL_CFG_0] = {
- .hw_start = rtl_hw_start_8169,
- .region = 1,
- .align = 0,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
- .features = RTL_FEATURE_GMII,
- .default_ver = RTL_GIGA_MAC_VER_01,
- },
- [RTL_CFG_1] = {
- .hw_start = rtl_hw_start_8168,
- .region = 2,
- .align = 8,
- .event_slow = SYSErr | LinkChg | RxOverflow,
- .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
- .default_ver = RTL_GIGA_MAC_VER_11,
- },
- [RTL_CFG_2] = {
- .hw_start = rtl_hw_start_8101,
- .region = 2,
- .align = 8,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
- PCSTimeout,
- .features = RTL_FEATURE_MSI,
- .default_ver = RTL_GIGA_MAC_VER_13,
- }
-};
-
-/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct rtl8169_private *tp,
- const struct rtl_cfg_info *cfg)
-{
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned msi = 0;
- u8 cfg2;
-
- cfg2 = RTL_R8(Config2) & ~MSIEnable;
- if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(tp->pci_dev)) {
- netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
- } else {
- cfg2 |= MSIEnable;
- msi = RTL_FEATURE_MSI;
- }
- }
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- RTL_W8(Config2, cfg2);
- return msi;
-}
-
static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
{
if (tp->features & RTL_FEATURE_MSI) {
@@ -3528,25 +3442,6 @@
}
}
-static const struct net_device_ops rtl8169_netdev_ops = {
- .ndo_open = rtl8169_open,
- .ndo_stop = rtl8169_close,
- .ndo_get_stats64 = rtl8169_get_stats64,
- .ndo_start_xmit = rtl8169_start_xmit,
- .ndo_tx_timeout = rtl8169_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = rtl8169_change_mtu,
- .ndo_fix_features = rtl8169_fix_features,
- .ndo_set_features = rtl8169_set_features,
- .ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = rtl8169_ioctl,
- .ndo_set_rx_mode = rtl_set_rx_mode,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = rtl8169_netpoll,
-#endif
-
-};
-
static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
@@ -4018,279 +3913,6 @@
}
}
-static int __devinit
-rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
- const unsigned int region = cfg->region;
- struct rtl8169_private *tp;
- struct mii_if_info *mii;
- struct net_device *dev;
- void __iomem *ioaddr;
- int chipset, i;
- int rc;
-
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
-
- dev = alloc_etherdev(sizeof (*tp));
- if (!dev) {
- rc = -ENOMEM;
- goto out;
- }
-
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->netdev_ops = &rtl8169_netdev_ops;
- tp = netdev_priv(dev);
- tp->dev = dev;
- tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
-
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- /* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
- if (rc < 0) {
- netif_err(tp, probe, dev, "enable failure\n");
- goto err_out_free_dev_1;
- }
-
- if (pci_set_mwi(pdev) < 0)
- netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
-
- /* make sure PCI base addr 1 is MMIO */
- if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- netif_err(tp, probe, dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- rc = -ENODEV;
- goto err_out_mwi_2;
- }
-
- /* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- netif_err(tp, probe, dev,
- "Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out_mwi_2;
- }
-
- rc = pci_request_regions(pdev, MODULENAME);
- if (rc < 0) {
- netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_2;
- }
-
- tp->cp_cmd = RxChkSum;
-
- if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
- tp->cp_cmd |= PCIDAC;
- dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_3;
- }
- }
-
- /* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
- if (!ioaddr) {
- netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res_3;
- }
- tp->mmio_addr = ioaddr;
-
- if (!pci_is_pcie(pdev))
- netif_info(tp, probe, dev, "not PCI Express\n");
-
- /* Identify chip attached to board */
- rtl8169_get_mac_version(tp, dev, cfg->default_ver);
-
- rtl_init_rxcfg(tp);
-
- rtl_irq_disable(tp);
-
- rtl_hw_reset(tp);
-
- rtl_ack_events(tp, 0xffff);
-
- pci_set_master(pdev);
-
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- tp->cp_cmd |= RxVlan;
-
- rtl_init_mdio_ops(tp);
- rtl_init_pll_power_ops(tp);
- rtl_init_jumbo_ops(tp);
-
- rtl8169_print_mac_version(tp);
-
- chipset = tp->mac_version;
- tp->txd_version = rtl_chip_infos[chipset].txd_version;
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
- if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(tp, cfg);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_settings = rtl8169_gset_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_settings = rtl8169_gset_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
- }
-
- mutex_init(&tp->wk.mutex);
-
- /* Get MAC address */
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
- dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
- dev->irq = pdev->irq;
- dev->base_addr = (unsigned long) ioaddr;
-
- netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
-
- /* don't enable SG, IP_CSUM and TSO by default - it might not work
- * properly for all devices */
- dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HIGHDMA;
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- /* 8110SCd requires hardware Rx VLAN - disallow toggling */
- dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
-
- dev->hw_features |= NETIF_F_RXALL;
- dev->hw_features |= NETIF_F_RXFCS;
-
- tp->hw_start = cfg->hw_start;
- tp->event_slow = cfg->event_slow;
-
- tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
- ~(RxBOVF | RxFOVF) : ~0;
-
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = rtl8169_phy_timer;
-
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
-
- rc = register_netdev(dev);
- if (rc < 0)
- goto err_out_msi_4;
-
- pci_set_drvdata(pdev, dev);
-
- netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
- (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
- if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
- netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
- "tx checksumming: %s]\n",
- rtl_chip_infos[chipset].jumbo_max,
- rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
- }
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- rtl8168_driver_start(tp);
- }
-
- device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
-
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
- netif_carrier_off(dev);
-
-out:
- return rc;
-
-err_out_msi_4:
- rtl_disable_msi(pdev, tp);
- iounmap(ioaddr);
-err_out_free_res_3:
- pci_release_regions(pdev);
-err_out_mwi_2:
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
-err_out_free_dev_1:
- free_netdev(dev);
- goto out;
-}
-
-static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- rtl8168_driver_stop(tp);
- }
-
- cancel_work_sync(&tp->wk.work);
-
- unregister_netdev(dev);
-
- rtl_release_firmware(tp);
-
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
-
- /* restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
-
- rtl_disable_msi(pdev, tp);
- rtl8169_release_board(pdev, dev, tp->mmio_addr);
- pci_set_drvdata(pdev, NULL);
-}
-
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
{
struct rtl_fw *rtl_fw;
@@ -4335,88 +3957,6 @@
rtl_request_uncached_firmware(tp);
}
-static void rtl_task(struct work_struct *);
-
-static int rtl8169_open(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
- int retval = -ENOMEM;
-
- pm_runtime_get_sync(&pdev->dev);
-
- /*
- * Rx and Tx desscriptors needs 256 bytes alignment.
- * dma_alloc_coherent provides more.
- */
- tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
- &tp->TxPhyAddr, GFP_KERNEL);
- if (!tp->TxDescArray)
- goto err_pm_runtime_put;
-
- tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
- &tp->RxPhyAddr, GFP_KERNEL);
- if (!tp->RxDescArray)
- goto err_free_tx_0;
-
- retval = rtl8169_init_ring(dev);
- if (retval < 0)
- goto err_free_rx_1;
-
- INIT_WORK(&tp->wk.work, rtl_task);
-
- smp_mb();
-
- rtl_request_firmware(tp);
-
- retval = request_irq(dev->irq, rtl8169_interrupt,
- (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
- dev->name, dev);
- if (retval < 0)
- goto err_release_fw_2;
-
- rtl_lock_work(tp);
-
- set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
-
- napi_enable(&tp->napi);
-
- rtl8169_init_phy(dev, tp);
-
- __rtl8169_set_features(dev, dev->features);
-
- rtl_pll_power_up(tp);
-
- rtl_hw_start(dev);
-
- netif_start_queue(dev);
-
- rtl_unlock_work(tp);
-
- tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
-
- rtl8169_check_link_status(dev, tp, ioaddr);
-out:
- return retval;
-
-err_release_fw_2:
- rtl_release_firmware(tp);
- rtl8169_rx_clear(tp);
-err_free_rx_1:
- dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
- tp->RxDescArray = NULL;
-err_free_tx_0:
- dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
- tp->TxDescArray = NULL;
-err_pm_runtime_put:
- pm_runtime_put_noidle(&pdev->dev);
- goto out;
-}
-
static void rtl_rx_close(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -4524,6 +4064,56 @@
}
}
+static void rtl_set_rx_mode(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= (AcceptErr | AcceptRunt);
+
+ tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ u32 data = mc_filter[0];
+
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(data);
+ }
+
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+ RTL_W32(RxConfig, tmp);
+}
+
static void rtl_hw_start_8169(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -5564,6 +5154,8 @@
netdev_sent_queue(dev, skb->len);
+ skb_tx_timestamp(skb);
+
wmb();
/* Anti gcc 2.95.3 bugware (sic) */
@@ -6044,7 +5636,7 @@
rtl8169_down(dev);
rtl_unlock_work(tp);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
@@ -6058,54 +5650,93 @@
return 0;
}
-static void rtl_set_rx_mode(struct net_device *dev)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void rtl8169_netpoll(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_interrupt(tp->pci_dev->irq, dev);
+}
+#endif
+
+static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp = 0;
+ struct pci_dev *pdev = tp->pci_dev;
+ int retval = -ENOMEM;
- if (dev->flags & IFF_PROMISC) {
- /* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else {
- struct netdev_hw_addr *ha;
+ pm_runtime_get_sync(&pdev->dev);
- rx_mode = AcceptBroadcast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- rx_mode |= AcceptMulticast;
- }
- }
+ /*
+ * Rx and Tx desscriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
+ if (!tp->TxDescArray)
+ goto err_pm_runtime_put;
- if (dev->features & NETIF_F_RXALL)
- rx_mode |= (AcceptErr | AcceptRunt);
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
+ if (!tp->RxDescArray)
+ goto err_free_tx_0;
- tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+ retval = rtl8169_init_ring(dev);
+ if (retval < 0)
+ goto err_free_rx_1;
- if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
- u32 data = mc_filter[0];
+ INIT_WORK(&tp->wk.work, rtl_task);
- mc_filter[0] = swab32(mc_filter[1]);
- mc_filter[1] = swab32(data);
- }
+ smp_mb();
- RTL_W32(MAR0 + 4, mc_filter[1]);
- RTL_W32(MAR0 + 0, mc_filter[0]);
+ rtl_request_firmware(tp);
- RTL_W32(RxConfig, tmp);
+ retval = request_irq(pdev->irq, rtl8169_interrupt,
+ (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
+ dev->name, dev);
+ if (retval < 0)
+ goto err_release_fw_2;
+
+ rtl_lock_work(tp);
+
+ set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+
+ napi_enable(&tp->napi);
+
+ rtl8169_init_phy(dev, tp);
+
+ __rtl8169_set_features(dev, dev->features);
+
+ rtl_pll_power_up(tp);
+
+ rtl_hw_start(dev);
+
+ netif_start_queue(dev);
+
+ rtl_unlock_work(tp);
+
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
+ rtl8169_check_link_status(dev, tp, ioaddr);
+out:
+ return retval;
+
+err_release_fw_2:
+ rtl_release_firmware(tp);
+ rtl8169_rx_clear(tp);
+err_free_rx_1:
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
+err_free_tx_0:
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ goto out;
}
static struct rtnl_link_stats64 *
@@ -6292,6 +5923,9 @@
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &pdev->dev;
+
+ pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -6309,13 +5943,362 @@
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
+
+ pm_runtime_put_noidle(d);
+}
+
+static void __devexit rtl_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_stop(tp);
+ }
+
+ cancel_work_sync(&tp->wk.work);
+
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /* restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
+
+ rtl_disable_msi(pdev, tp);
+ rtl8169_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct net_device_ops rtl_netdev_ops = {
+ .ndo_open = rtl_open,
+ .ndo_stop = rtl8169_close,
+ .ndo_get_stats64 = rtl8169_get_stats64,
+ .ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_tx_timeout = rtl8169_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = rtl8169_change_mtu,
+ .ndo_fix_features = rtl8169_fix_features,
+ .ndo_set_features = rtl8169_set_features,
+ .ndo_set_mac_address = rtl_set_mac_address,
+ .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_set_rx_mode = rtl_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rtl8169_netpoll,
+#endif
+
+};
+
+static const struct rtl_cfg_info {
+ void (*hw_start)(struct net_device *);
+ unsigned int region;
+ unsigned int align;
+ u16 event_slow;
+ unsigned features;
+ u8 default_ver;
+} rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+ .region = 1,
+ .align = 0,
+ .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+ .features = RTL_FEATURE_GMII,
+ .default_ver = RTL_GIGA_MAC_VER_01,
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+ .region = 2,
+ .align = 8,
+ .event_slow = SYSErr | LinkChg | RxOverflow,
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_11,
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+ .region = 2,
+ .align = 8,
+ .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
+ PCSTimeout,
+ .features = RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_13,
+ }
+};
+
+/* Cfg9346_Unlock assumed. */
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
+ const struct rtl_cfg_info *cfg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned msi = 0;
+ u8 cfg2;
+
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
+ } else {
+ cfg2 |= MSIEnable;
+ msi = RTL_FEATURE_MSI;
+ }
+ }
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
+ return msi;
+}
+
+static int __devinit
+rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
+ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int chipset, i;
+ int rc;
+
+ if (netif_msg_drv(&debug)) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ }
+
+ dev = alloc_etherdev(sizeof (*tp));
+ if (!dev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->netdev_ops = &rtl_netdev_ops;
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
+ mii = &tp->mii;
+ mii->dev = dev;
+ mii->mdio_read = rtl_mdio_read;
+ mii->mdio_write = rtl_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "enable failure\n");
+ goto err_out_free_dev_1;
+ }
+
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "could not request regions\n");
+ goto err_out_mwi_2;
+ }
+
+ tp->cp_cmd = RxChkSum;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_free_res_3;
+ }
+ }
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ if (!ioaddr) {
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res_3;
+ }
+ tp->mmio_addr = ioaddr;
+
+ if (!pci_is_pcie(pdev))
+ netif_info(tp, probe, dev, "not PCI Express\n");
+
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+
+ rtl_init_rxcfg(tp);
+
+ rtl_irq_disable(tp);
+
+ rtl_hw_reset(tp);
+
+ rtl_ack_events(tp, 0xffff);
+
+ pci_set_master(pdev);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ tp->cp_cmd |= RxVlan;
+
+ rtl_init_mdio_ops(tp);
+ rtl_init_pll_power_ops(tp);
+ rtl_init_jumbo_ops(tp);
+
+ rtl8169_print_mac_version(tp);
+
+ chipset = tp->mac_version;
+ tp->txd_version = rtl_chip_infos[chipset].txd_version;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ tp->features |= rtl_try_msi(tp, cfg);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ if (rtl_tbi_enabled(tp)) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+ tp->do_ioctl = rtl_tbi_ioctl;
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ tp->do_ioctl = rtl_xmii_ioctl;
+ }
+
+ mutex_init(&tp->wk.mutex);
+
+ /* Get MAC address */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
+
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
+
+ /* don't enable SG, IP_CSUM and TSO by default - it might not work
+ * properly for all devices */
+ dev->features |= NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features |= NETIF_F_RXALL;
+ dev->hw_features |= NETIF_F_RXFCS;
+
+ tp->hw_start = cfg->hw_start;
+ tp->event_slow = cfg->event_slow;
+
+ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+ ~(RxBOVF | RxFOVF) : ~0;
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long) dev;
+ tp->timer.function = rtl8169_phy_timer;
+
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_out_msi_4;
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
+ rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
+ if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
+ netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
+ "tx checksumming: %s]\n",
+ rtl_chip_infos[chipset].jumbo_max,
+ rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_start(tp);
+ }
+
+ device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ netif_carrier_off(dev);
+
+out:
+ return rc;
+
+err_out_msi_4:
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+err_out_free_res_3:
+ pci_release_regions(pdev);
+err_out_mwi_2:
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+err_out_free_dev_1:
+ free_netdev(dev);
+ goto out;
}
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
- .probe = rtl8169_init_one,
- .remove = __devexit_p(rtl8169_remove_one),
+ .probe = rtl_init_one,
+ .remove = __devexit_p(rtl_remove_one),
.shutdown = rtl_shutdown,
.driver.pm = RTL8169_PM_OPS,
};
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 1908ba7..3cbfbff 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -25,6 +25,7 @@
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
+#include "selftest.h"
#include "mcdi.h"
#include "workarounds.h"
@@ -163,12 +164,12 @@
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
-static unsigned irq_adapt_low_thresh = 10000;
+static unsigned irq_adapt_low_thresh = 8000;
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
"Threshold score for reducing IRQ moderation");
-static unsigned irq_adapt_high_thresh = 20000;
+static unsigned irq_adapt_high_thresh = 16000;
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
@@ -1564,8 +1565,9 @@
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
- /* Make sure the hardware monitor is stopped */
+ /* Make sure the hardware monitor and event self-test are stopped */
cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
@@ -1825,6 +1827,7 @@
efx_link_status_changed(efx);
efx_start_all(efx);
+ efx_selftest_async_start(efx);
return 0;
}
@@ -2375,6 +2378,7 @@
#endif
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
@@ -2493,6 +2497,57 @@
free_netdev(efx->net_dev);
};
+/* NIC VPD information
+ * Called during probe to display the part number of the
+ * installed NIC. VPD is potentially very large but this should
+ * always appear within the first 512 bytes.
+ */
+#define SFC_VPD_LEN 512
+static void efx_print_product_vpd(struct efx_nic *efx)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ char vpd_data[SFC_VPD_LEN];
+ ssize_t vpd_size;
+ int i, j;
+
+ /* Get the vpd data from the device */
+ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ if (vpd_size <= 0) {
+ netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
+ return;
+ }
+
+ /* Get the Read only section */
+ i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
+ return;
+ }
+
+ j = pci_vpd_lrdt_size(&vpd_data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (i + j > vpd_size)
+ j = vpd_size - i;
+
+ /* Get the Part number */
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Part number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ return;
+ }
+
+ netif_info(efx, drv, efx->net_dev,
+ "Part Number : %.*s\n", j, &vpd_data[i]);
+}
+
+
/* Main body of NIC initialisation
* This is called at module load (or hotplug insertion, theoretically).
*/
@@ -2582,6 +2637,8 @@
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
+ efx_print_product_vpd(efx);
+
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
if (rc)
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4debfe0..be8f915 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -148,7 +148,7 @@
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
{
- channel->last_irq_cpu = raw_smp_processor_id();
+ channel->event_test_cpu = raw_smp_processor_id();
efx_schedule_channel(channel);
}
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 2084cc6..8687a6c 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -709,8 +709,6 @@
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
- .ref_model = "SFE4001",
- .gen_type = "10GBASE-T adapter",
.init = sfe4001_init,
.init_phy = efx_port_dummy_op_void,
.fini = sfe4001_fini,
@@ -719,8 +717,6 @@
},
{
.id = FALCON_BOARD_SFE4002,
- .ref_model = "SFE4002",
- .gen_type = "XFP adapter",
.init = sfe4002_init,
.init_phy = sfe4002_init_phy,
.fini = efx_fini_lm87,
@@ -729,8 +725,6 @@
},
{
.id = FALCON_BOARD_SFE4003,
- .ref_model = "SFE4003",
- .gen_type = "10GBASE-CX4 adapter",
.init = sfe4003_init,
.init_phy = sfe4003_init_phy,
.fini = efx_fini_lm87,
@@ -739,8 +733,6 @@
},
{
.id = FALCON_BOARD_SFN4112F,
- .ref_model = "SFN4112F",
- .gen_type = "SFP+ adapter",
.init = sfn4112f_init,
.init_phy = sfn4112f_init_phy,
.fini = efx_fini_lm87,
@@ -763,11 +755,6 @@
board->type = &board_types[i];
if (board->type) {
- netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
- (efx->pci_dev->subsystem_vendor ==
- PCI_VENDOR_ID_SOLARFLARE)
- ? board->type->ref_model : board->type->gen_type,
- 'A' + board->major, board->minor);
return 0;
} else {
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0b95505..f0385e1 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -324,8 +324,7 @@
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
- * @last_eventq_read_ptr: Last event queue read pointer value.
- * @last_irq_cpu: Last CPU to handle interrupt for this channel
+ * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -355,9 +354,8 @@
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
- unsigned int last_eventq_read_ptr;
+ int event_test_cpu;
- int last_irq_cpu;
unsigned int irq_count;
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
@@ -678,6 +676,7 @@
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
+ * @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
@@ -791,6 +790,7 @@
struct efx_buffer irq_status;
unsigned irq_zero_count;
unsigned irq_level;
+ struct delayed_work selftest_work;
#ifdef CONFIG_SFC_MTD
struct list_head mtd_list;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 2bf4283..4a9a5be 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -822,7 +822,6 @@
channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
tx_queue->ptr_mask);
- channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -1084,7 +1083,7 @@
code = _EFX_CHANNEL_MAGIC_CODE(magic);
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
- /* ignore */
+ channel->event_test_cpu = raw_smp_processor_id();
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
@@ -1333,8 +1332,10 @@
}
-void efx_nic_generate_test_event(struct efx_channel *channel)
+void efx_nic_event_test_start(struct efx_channel *channel)
{
+ channel->event_test_cpu = -1;
+ smp_wmb();
efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
}
@@ -1383,8 +1384,10 @@
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
-void efx_nic_generate_interrupt(struct efx_nic *efx)
+void efx_nic_irq_test_start(struct efx_nic *efx)
{
+ efx->last_irq_cpu = -1;
+ smp_wmb();
efx_nic_interrupts(efx, true, true);
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 246c414..f48ccf6 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -35,10 +35,6 @@
extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
-static inline bool efx_nic_has_mc(struct efx_nic *efx)
-{
- return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
-}
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
{
@@ -73,8 +69,6 @@
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
- * @ref_model: Model number of Solarflare reference design
- * @gen_type: Generic board type description
* @init: Allocate resources and initialise peripheral hardware
* @init_phy: Do board-specific PHY initialisation
* @fini: Shut down hardware and free resources
@@ -83,8 +77,6 @@
*/
struct falcon_board_type {
u8 id;
- const char *ref_model;
- const char *gen_type;
int (*init) (struct efx_nic *nic);
void (*init_phy) (struct efx_nic *efx);
void (*fini) (struct efx_nic *nic);
@@ -305,14 +297,23 @@
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel);
-extern void efx_nic_generate_interrupt(struct efx_nic *efx);
+extern void efx_nic_event_test_start(struct efx_channel *channel);
+extern void efx_nic_irq_test_start(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
extern void falcon_irq_ack_a1(struct efx_nic *efx);
+static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
+{
+ return ACCESS_ONCE(channel->event_test_cpu);
+}
+static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
+{
+ return ACCESS_ONCE(efx->last_irq_cpu);
+}
+
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 1ba290d..763fa2f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -449,10 +449,8 @@
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through the generic GRO stack
- *
- * Handles driverlink veto, and passes the fragment up via
- * the appropriate GRO method
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
*/
static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
@@ -461,7 +459,6 @@
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
- /* Pass the skb/page into the GRO engine */
if (rx_buf->flags & EFX_RX_BUF_PAGE) {
struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->u.page;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index febe2a9..de4c006 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -25,6 +25,16 @@
#include "selftest.h"
#include "workarounds.h"
+/* IRQ latency can be enormous because:
+ * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
+ * slow serial console or an old IDE driver doing error recovery
+ * - The PREEMPT_RT patches mostly deal with this, but also allow a
+ * tasklet or normal task to be given higher priority than our IRQ
+ * threads
+ * Try to avoid blaming the hardware for this.
+ */
+#define IRQ_TIMEOUT HZ
+
/*
* Loopback test packet structure
*
@@ -77,6 +87,9 @@
struct efx_loopback_payload payload;
};
+/* How long to wait for all the packets to arrive (in ms) */
+#define LOOPBACK_TIMEOUT_MS 1000
+
/**************************************************************************
*
* MII, NVRAM and register tests
@@ -130,23 +143,25 @@
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
+ unsigned long timeout, wait;
int cpu;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
- /* Reset interrupt flag */
- efx->last_irq_cpu = -1;
- smp_wmb();
-
- efx_nic_generate_interrupt(efx);
+ efx_nic_irq_test_start(efx);
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
- schedule_timeout_uninterruptible(HZ / 10);
- cpu = ACCESS_ONCE(efx->last_irq_cpu);
- if (cpu >= 0)
- goto success;
+ do {
+ schedule_timeout_uninterruptible(wait);
+ cpu = efx_nic_irq_test_irq_cpu(efx);
+ if (cpu >= 0)
+ goto success;
+ wait *= 2;
+ } while (time_before(jiffies, timeout));
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
@@ -159,61 +174,86 @@
}
/* Test generation and receipt of interrupting events */
-static int efx_test_eventq_irq(struct efx_channel *channel,
+static int efx_test_eventq_irq(struct efx_nic *efx,
struct efx_self_tests *tests)
{
- struct efx_nic *efx = channel->efx;
- unsigned int read_ptr;
- bool napi_ran, dma_seen, int_seen;
+ struct efx_channel *channel;
+ unsigned int read_ptr[EFX_MAX_CHANNELS];
+ unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
+ unsigned long timeout, wait;
- read_ptr = channel->eventq_read_ptr;
- channel->last_irq_cpu = -1;
- smp_wmb();
+ BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
- efx_nic_generate_test_event(channel);
+ efx_for_each_channel(channel, efx) {
+ read_ptr[channel->channel] = channel->eventq_read_ptr;
+ set_bit(channel->channel, &dma_pend);
+ set_bit(channel->channel, &int_pend);
+ efx_nic_event_test_start(channel);
+ }
- /* Wait for arrival of interrupt. NAPI processing may or may
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case.
*/
- msleep(10);
- napi_disable(&channel->napi_str);
- if (channel->eventq_read_ptr != read_ptr) {
- napi_ran = true;
- dma_seen = true;
- int_seen = true;
- } else {
- napi_ran = false;
- dma_seen = efx_nic_event_present(channel);
- int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
- }
- napi_enable(&channel->napi_str);
- efx_nic_eventq_read_ack(channel);
+ do {
+ schedule_timeout_uninterruptible(wait);
- tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
- tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+ efx_for_each_channel(channel, efx) {
+ napi_disable(&channel->napi_str);
+ if (channel->eventq_read_ptr !=
+ read_ptr[channel->channel]) {
+ set_bit(channel->channel, &napi_ran);
+ clear_bit(channel->channel, &dma_pend);
+ clear_bit(channel->channel, &int_pend);
+ } else {
+ if (efx_nic_event_present(channel))
+ clear_bit(channel->channel, &dma_pend);
+ if (efx_nic_event_test_irq_cpu(channel) >= 0)
+ clear_bit(channel->channel, &int_pend);
+ }
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+ }
- if (dma_seen && int_seen) {
- netif_dbg(efx, drv, efx->net_dev,
- "channel %d event queue passed (with%s NAPI)\n",
- channel->channel, napi_ran ? "" : "out");
- return 0;
- } else {
- /* Report failure and whether either interrupt or DMA worked */
- netif_err(efx, drv, efx->net_dev,
- "channel %d timed out waiting for event queue\n",
- channel->channel);
- if (int_seen)
+ wait *= 2;
+ } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
+
+ efx_for_each_channel(channel, efx) {
+ bool dma_seen = !test_bit(channel->channel, &dma_pend);
+ bool int_seen = !test_bit(channel->channel, &int_pend);
+
+ tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
+ tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+
+ if (dma_seen && int_seen) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "channel %d event queue passed (with%s NAPI)\n",
+ channel->channel,
+ test_bit(channel->channel, &napi_ran) ?
+ "" : "out");
+ } else {
+ /* Report failure and whether either interrupt or DMA
+ * worked
+ */
netif_err(efx, drv, efx->net_dev,
- "channel %d saw interrupt "
- "during event queue test\n",
+ "channel %d timed out waiting for event queue\n",
channel->channel);
- if (dma_seen)
- netif_err(efx, drv, efx->net_dev,
- "channel %d event was generated, but "
- "failed to trigger an interrupt\n",
- channel->channel);
- return -ETIMEDOUT;
+ if (int_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt "
+ "during event queue test\n",
+ channel->channel);
+ if (dma_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n",
+ channel->channel);
+ }
}
+
+ return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
}
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
@@ -516,10 +556,10 @@
begin_rc = efx_begin_loopback(tx_queue);
/* This will normally complete very quickly, but be
- * prepared to wait up to 100 ms. */
+ * prepared to wait much longer. */
msleep(1);
if (!efx_poll_loopback(efx)) {
- msleep(100);
+ msleep(LOOPBACK_TIMEOUT_MS);
efx_poll_loopback(efx);
}
@@ -660,9 +700,10 @@
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
- struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
+ efx_selftest_async_cancel(efx);
+
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
@@ -678,11 +719,9 @@
if (rc && !rc_test)
rc_test = rc;
- efx_for_each_channel(channel, efx) {
- rc = efx_test_eventq_irq(channel, tests);
- if (rc && !rc_test)
- rc_test = rc;
- }
+ rc = efx_test_eventq_irq(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
if (rc_test)
return rc_test;
@@ -757,3 +796,36 @@
return rc_test;
}
+void efx_selftest_async_start(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_nic_event_test_start(channel);
+ schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
+}
+
+void efx_selftest_async_cancel(struct efx_nic *efx)
+{
+ cancel_delayed_work_sync(&efx->selftest_work);
+}
+
+void efx_selftest_async_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ selftest_work.work);
+ struct efx_channel *channel;
+ int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = efx_nic_event_test_irq_cpu(channel);
+ if (cpu < 0)
+ netif_err(efx, ifup, efx->net_dev,
+ "channel %d failed to trigger an interrupt\n",
+ channel->channel);
+ else
+ netif_dbg(efx, ifup, efx->net_dev,
+ "channel %d triggered interrupt on CPU %d\n",
+ channel->channel, cpu);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87abe2a..aed24b7 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -48,5 +48,8 @@
extern int efx_selftest(struct efx_nic *efx,
struct efx_self_tests *tests,
unsigned flags);
+extern void efx_selftest_async_start(struct efx_nic *efx);
+extern void efx_selftest_async_cancel(struct efx_nic *efx);
+extern void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 7bea7901..9f8d7ce 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -409,8 +409,7 @@
siena_reset_hw(efx, RESET_TYPE_ALL);
/* Relinquish the device back to the BMC */
- if (efx_nic_has_mc(efx))
- efx_mcdi_drv_attach(efx, false, NULL);
+ efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
kfree(efx->nic_data);
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 80976e8..9cb3b84 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -514,7 +514,7 @@
if (abs_index < EFX_VI_BASE)
return true;
- vf_i = (abs_index - EFX_VI_BASE) * efx_vf_size(efx);
+ vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
if (vf_i >= efx->vf_init_count)
return true;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dec5836..c358245 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -49,6 +49,7 @@
struct hv_device *device;
bool is_data_pkt;
+ u16 vlan_tci;
/*
* Valid only for receives when we break a xfer page packet
@@ -926,9 +927,40 @@
struct rndis_per_packet_info {
u32 size;
u32 type;
- u32 per_pkt_info_offset;
+ u32 ppi_offset;
};
+enum ndis_per_pkt_info_type {
+ TCPIP_CHKSUM_PKTINFO,
+ IPSEC_PKTINFO,
+ TCP_LARGESEND_PKTINFO,
+ CLASSIFICATION_HANDLE_PKTINFO,
+ NDIS_RESERVED,
+ SG_LIST_PKTINFO,
+ IEEE_8021Q_INFO,
+ ORIGINAL_PKTINFO,
+ PACKET_CANCEL_ID,
+ ORIGINAL_NET_BUFLIST,
+ CACHED_NET_BUFLIST,
+ SHORT_PKT_PADINFO,
+ MAX_PER_PKT_INFO
+};
+
+struct ndis_pkt_8021q_info {
+ union {
+ struct {
+ u32 pri:3; /* User Priority */
+ u32 cfi:1; /* Canonical Format ID */
+ u32 vlanid:12; /* VLAN ID */
+ u32 reserved:16;
+ };
+ u32 value;
+ };
+};
+
+#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_pkt_8021q_info))
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8965b45..d025c83 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -300,6 +300,7 @@
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+ init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
@@ -341,7 +342,7 @@
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
- ndis_version = 0x00050000;
+ ndis_version = 0x00050001;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0ae7a1a..0f8e834 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -159,7 +159,8 @@
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_filter_packet), GFP_ATOMIC);
+ sizeof(struct rndis_filter_packet) +
+ NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -169,6 +170,8 @@
return NETDEV_TX_BUSY;
}
+ packet->vlan_tci = skb->vlan_tci;
+
packet->extension = (void *)(unsigned long)packet +
sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer));
@@ -293,6 +296,7 @@
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
+ skb->vlan_tci = packet->vlan_tci;
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
@@ -310,7 +314,7 @@
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "hv_netvsc");
+ strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, HV_DRV_VERSION);
strcpy(info->fw_version, "N/A");
}
@@ -407,7 +411,7 @@
/* TODO: Add GSO and Checksum offload */
net->hw_features = NETIF_F_SG;
- net->features = NETIF_F_SG;
+ net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX;
SET_ETHTOOL_OPS(net, ðtool_ops);
SET_NETDEV_DEV(net, &dev->device);
@@ -482,7 +486,7 @@
/* The one and only one */
static struct hv_driver netvsc_drv = {
- .name = "netvsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 136efd8..d6be64b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include "hyperv_net.h"
@@ -303,12 +304,39 @@
}
}
+/*
+ * Get the Per-Packet-Info with the specified type
+ * return NULL if not found.
+ */
+static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
+{
+ struct rndis_per_packet_info *ppi;
+ int len;
+
+ if (rpkt->per_pkt_info_offset == 0)
+ return NULL;
+
+ ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
+ rpkt->per_pkt_info_offset);
+ len = rpkt->per_pkt_info_len;
+
+ while (len > 0) {
+ if (ppi->type == type)
+ return (void *)((ulong)ppi + ppi->ppi_offset);
+ len -= ppi->size;
+ ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
+ }
+
+ return NULL;
+}
+
static void rndis_filter_receive_data(struct rndis_device *dev,
struct rndis_message *msg,
struct hv_netvsc_packet *pkt)
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
+ struct ndis_pkt_8021q_info *vlan;
rndis_pkt = &msg->msg.pkt;
@@ -344,6 +372,14 @@
pkt->is_data_pkt = true;
+ vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+ if (vlan) {
+ pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
+ (vlan->pri << VLAN_PRIO_SHIFT);
+ } else {
+ pkt->vlan_tci = 0;
+ }
+
netvsc_recv_callback(dev->net_dev->dev, pkt);
}
@@ -352,8 +388,7 @@
{
struct netvsc_device *net_dev = hv_get_drvdata(dev);
struct rndis_device *rndis_dev;
- struct rndis_message rndis_msg;
- struct rndis_message *rndis_hdr;
+ struct rndis_message *rndis_msg;
struct net_device *ndev;
if (!net_dev)
@@ -375,46 +410,32 @@
return -ENODEV;
}
- rndis_hdr = pkt->data;
+ rndis_msg = pkt->data;
- /* Make sure we got a valid rndis message */
- if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
- (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
- netdev_err(ndev, "incoming rndis message buffer overflow "
- "detected (got %u, max %zu)..marking it an error!\n",
- rndis_hdr->msg_len,
- sizeof(struct rndis_message));
- }
+ dump_rndis_message(dev, rndis_msg);
- memcpy(&rndis_msg, rndis_hdr,
- (rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
- sizeof(struct rndis_message) :
- rndis_hdr->msg_len);
-
- dump_rndis_message(dev, &rndis_msg);
-
- switch (rndis_msg.ndis_msg_type) {
+ switch (rndis_msg->ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
/* data msg */
- rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
+ rndis_filter_receive_data(rndis_dev, rndis_msg, pkt);
break;
case REMOTE_NDIS_INITIALIZE_CMPLT:
case REMOTE_NDIS_QUERY_CMPLT:
case REMOTE_NDIS_SET_CMPLT:
/* completion msgs */
- rndis_filter_receive_response(rndis_dev, &rndis_msg);
+ rndis_filter_receive_response(rndis_dev, rndis_msg);
break;
case REMOTE_NDIS_INDICATE_STATUS_MSG:
/* notification msgs */
- rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
+ rndis_filter_receive_indicate_status(rndis_dev, rndis_msg);
break;
default:
netdev_err(ndev,
"unhandled rndis message (type %u len %u)\n",
- rndis_msg.ndis_msg_type,
- rndis_msg.msg_len);
+ rndis_msg->ndis_msg_type,
+ rndis_msg->msg_len);
break;
}
@@ -774,12 +795,15 @@
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
+ bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
/* Add the rndis header */
filter_pkt = (struct rndis_filter_packet *)pkt->extension;
rndis_msg = &filter_pkt->msg;
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+ if (isvlan)
+ rndis_msg_size += NDIS_VLAN_PPI_SIZE;
rndis_msg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
rndis_msg->msg_len = pkt->total_data_buflen +
@@ -787,8 +811,29 @@
rndis_pkt = &rndis_msg->msg.pkt;
rndis_pkt->data_offset = sizeof(struct rndis_packet);
+ if (isvlan)
+ rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
rndis_pkt->data_len = pkt->total_data_buflen;
+ if (isvlan) {
+ struct rndis_per_packet_info *ppi;
+ struct ndis_pkt_8021q_info *vlan;
+
+ rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+ rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
+
+ ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
+ rndis_pkt->per_pkt_info_offset);
+ ppi->size = NDIS_VLAN_PPI_SIZE;
+ ppi->type = IEEE_8021Q_INFO;
+ ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
+
+ vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
+ ppi->ppi_offset);
+ vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
+ vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
+
pkt->is_data_pkt = true;
pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
pkt->page_buf[0].offset =
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 963067d..dcc80d6 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1368,7 +1368,7 @@
IRDA_WARNING("%s, unable to allocate dma=%d\n",
ALI_IRCC_DRIVER_NAME,
self->io.dma);
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
return -EAGAIN;
}
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2d456dd..1a89fd4 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1495,14 +1495,14 @@
if (request_dma(self->io.dma, dev->name)) {
IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
self->io.dma);
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
return -EAGAIN;
}
if (self->io.dma2 != self->io.dma) {
if (request_dma(self->io.dma2, dev->name)) {
IRDA_WARNING("%s, unable to allocate dma2=%d\n",
driver_name, self->io.dma2);
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
free_dma(self->io.dma);
return -EAGAIN;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 7d43506..f5bb92f 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -1172,7 +1172,7 @@
* and clean up on failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
return -EAGAIN;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b924f46..83dcc53 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -589,6 +589,7 @@
entry = (struct skb_data *) skb->cb;
urb = entry->urb;
+ spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
@@ -596,6 +597,7 @@
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
+ spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags);
return count;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1c008c6..ddc061d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1869,7 +1869,7 @@
static void try_auto_wep(struct airo_info *ai)
{
- if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) {
+ if (auto_wep && !test_bit(FLAG_RADIO_DOWN, &ai->flags)) {
ai->expires = RUN_AT(3*HZ);
wake_up_interruptible(&ai->thr_wait);
}
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6640326..8d434b8f 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1320,6 +1320,7 @@
struct ieee80211_vif *bslot[ATH_BCBUF];
u16 num_ap_vifs;
u16 num_adhoc_vifs;
+ u16 num_mesh_vifs;
unsigned int bhalq, /* SW q for outgoing beacons */
bmisscount, /* missed beacon transmits */
bintval, /* beacon interval in TU */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a339693..0e643b0 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1867,7 +1867,8 @@
ah->bmisscount = 0;
}
- if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) ||
+ if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
+ ah->num_mesh_vifs > 1) ||
ah->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
@@ -1952,7 +1953,8 @@
u64 hw_tsf;
intval = ah->bintval & AR5K_BEACON_PERIOD;
- if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) {
+ if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
+ + ah->num_mesh_vifs > 1) {
intval /= ATH_BCBUF; /* staggered multi-bss beacons */
if (intval < 15)
ATH5K_WARN(ah, "intval %u is too low, min 15\n",
@@ -2330,15 +2332,6 @@
"got new rfgain, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
}
-
- /* TODO: On full calibration we should stop TX here,
- * so that it doesn't interfere (mostly due to gain_f
- * calibration that messes with tx packets -see phy.c).
- *
- * NOTE: Stopping the queues from above is not enough
- * to stop TX but saves us from disconecting (at least
- * we don't lose packets). */
- ieee80211_stop_queues(ah->hw);
} else
ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
@@ -2353,10 +2346,9 @@
ah->curchan->center_freq));
/* Clear calibration flags */
- if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
- ieee80211_wake_queues(ah->hw);
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
- } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index af4c7ec..5c53299 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -134,6 +134,8 @@
ah->num_ap_vifs++;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs++;
+ else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
+ ah->num_mesh_vifs++;
}
/* Any MAC address is fine, all others are included through the
@@ -175,6 +177,8 @@
ah->num_ap_vifs--;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs--;
+ else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
+ ah->num_mesh_vifs--;
ath5k_update_bssid_mask_and_opmode(ah, NULL);
mutex_unlock(&ah->lock);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index e1f8613..3a28454 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1871,31 +1871,15 @@
ret = 0;
}
- /* On full calibration do an AGC calibration and
- * request a PAPD probe for gainf calibration if
- * needed */
- if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+ /* On full calibration request a PAPD probe for
+ * gainf calibration if needed */
+ if ((ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ (ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112) &&
+ channel->hw_value != AR5K_MODE_11B)
+ ath5k_hw_request_rfgain_probe(ah);
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
- AR5K_PHY_AGCCTL_CAL);
-
- ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
- AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
- 0, false);
- if (ret) {
- ATH5K_ERR(ah,
- "gain calibration timeout (%uMHz)\n",
- channel->center_freq);
- }
-
- if ((ah->ah_radio == AR5K_RF5111 ||
- ah->ah_radio == AR5K_RF5112)
- && (channel->hw_value != AR5K_MODE_11B))
- ath5k_hw_request_rfgain_probe(ah);
- }
-
- /* Update noise floor
- * XXX: Only do this after AGC calibration */
+ /* Update noise floor */
if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
ath5k_hw_update_noise_floor(ah);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 18fa9aa..97abf46 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -556,7 +556,8 @@
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -595,7 +596,8 @@
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ ev->data, dlen, GFP_ATOMIC);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 595a272..e507e78 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -81,6 +81,14 @@
developed. At this point enabling this option won't do anything
except increase code size.
+config ATH9K_MAC_DEBUG
+ bool "Atheros MAC statistics"
+ depends on ATH9K_DEBUGFS
+ default y
+ ---help---
+ This option enables collection of statistics for Rx/Tx status
+ data and some other MAC related statistics
+
config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 8d1bca0..a66a13b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -326,7 +326,6 @@
static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts)
{
- struct ar9003_txc *txc = (struct ar9003_txc *) ds;
struct ar9003_txs *ads;
u32 status;
@@ -336,11 +335,7 @@
if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
- ts->qid = MS(ads->ds_info, AR_TxQcuNum);
- if (!txc || (MS(txc->info, AR_TxQcuNum) == ts->qid))
- ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
- else
- return -ENOENT;
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
@@ -354,6 +349,7 @@
ts->ts_seqnum = MS(status, AR_SeqNum);
ts->tid = MS(status, AR_TxTid);
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
ts->desc_id = MS(ads->status1, AR_TxDescId);
ts->ts_tstamp = ads->status4;
ts->ts_status = 0;
@@ -440,20 +436,14 @@
struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
unsigned int phyerr;
- /* TODO: byte swap on big endian for ar9300_10 */
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
- if (!rxs) {
- if ((rxsp->status11 & AR_RxDone) == 0)
- return -EINPROGRESS;
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
- if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
- return -EINVAL;
-
- if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
- return -EINPROGRESS;
-
- return 0;
- }
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
rxs->rs_status = 0;
rxs->rs_flags = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c2ccba6..3d8e51c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -299,7 +299,6 @@
struct ath_rx_edma {
struct sk_buff_head rx_fifo;
- struct sk_buff_head rx_buffers;
u32 rx_fifo_hwsize;
};
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b8967e4..43882f9 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -91,7 +91,7 @@
info.txpower = MAX_RATE_POWER;
info.keyix = ATH9K_TXKEYIX_INVALID;
info.keytype = ATH9K_KEY_TYPE_CLEAR;
- info.flags = ATH9K_TXDESC_NOACK;
+ info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_INTREQ;
info.buf_addr[0] = bf->bf_buf_addr;
info.buf_len[0] = roundup(skb->len, 4);
@@ -355,7 +355,6 @@
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
- struct ath_tx_status ts;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
u32 bfaddr, bc = 0;
@@ -462,11 +461,6 @@
ath9k_hw_txstart(ah, sc->beacon.beaconq);
sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
- if (edma) {
- spin_lock_bh(&sc->sc_pcu_lock);
- ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
- spin_unlock_bh(&sc->sc_pcu_lock);
- }
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 228c181..c2edf68 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -818,6 +818,7 @@
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
+#ifdef CONFIG_ATH9K_MAC_DEBUG
spin_lock(&sc->debug.samp_lock);
TX_SAMP_DBG(jiffies) = jiffies;
TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
@@ -844,6 +845,7 @@
sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
spin_unlock(&sc->debug.samp_lock);
+#endif
#undef TX_SAMP_DBG
}
@@ -942,27 +944,6 @@
PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "RSSI-CTL0",
- sc->debug.stats.rxstats.rs_rssi_ctl0);
- len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "RSSI-CTL1",
- sc->debug.stats.rxstats.rs_rssi_ctl1);
- len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "RSSI-CTL2",
- sc->debug.stats.rxstats.rs_rssi_ctl2);
- len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "RSSI-EXT0",
- sc->debug.stats.rxstats.rs_rssi_ext0);
- len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "RSSI-EXT1",
- sc->debug.stats.rxstats.rs_rssi_ext1);
- len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "RSSI-EXT2",
- sc->debug.stats.rxstats.rs_rssi_ext2);
- len += snprintf(buf + len, size - len,
- "%22s : %10d\n", "Rx Antenna",
- sc->debug.stats.rxstats.rs_antenna);
- len += snprintf(buf + len, size - len,
"%22s : %10u\n", "RX-Pkts-All",
sc->debug.stats.rxstats.rx_pkts_all);
len += snprintf(buf + len, size - len,
@@ -1009,16 +990,7 @@
RX_PHY_ERR_INC(rs->rs_phyerr);
}
- sc->debug.stats.rxstats.rs_rssi_ctl0 = rs->rs_rssi_ctl0;
- sc->debug.stats.rxstats.rs_rssi_ctl1 = rs->rs_rssi_ctl1;
- sc->debug.stats.rxstats.rs_rssi_ctl2 = rs->rs_rssi_ctl2;
-
- sc->debug.stats.rxstats.rs_rssi_ext0 = rs->rs_rssi_ext0;
- sc->debug.stats.rxstats.rs_rssi_ext1 = rs->rs_rssi_ext1;
- sc->debug.stats.rxstats.rs_rssi_ext2 = rs->rs_rssi_ext2;
-
- sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
-
+#ifdef CONFIG_ATH9K_MAC_DEBUG
spin_lock(&sc->debug.samp_lock);
RX_SAMP_DBG(jiffies) = jiffies;
RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
@@ -1035,6 +1007,8 @@
sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
spin_unlock(&sc->debug.samp_lock);
+#endif
+
#undef RX_STAT_INC
#undef RX_PHY_ERR_INC
#undef RX_SAMP_DBG
@@ -1278,6 +1252,8 @@
.llseek = default_llseek,
};
+#ifdef CONFIG_ATH9K_MAC_DEBUG
+
void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
{
#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
@@ -1551,6 +1527,7 @@
.llseek = default_llseek,
};
+#endif
int ath9k_init_debug(struct ath_hw *ah)
{
@@ -1604,8 +1581,10 @@
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
+#ifdef CONFIG_ATH9K_MAC_DEBUG
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_samps);
+#endif
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 776a24a..64fcfad4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -165,13 +165,6 @@
u32 post_delim_crc_err;
u32 decrypt_busy_err;
u32 phy_err_stats[ATH9K_PHYERR_MAX];
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
- u8 rs_antenna;
};
enum ath_reset_type {
@@ -235,16 +228,17 @@
struct dentry *debugfs_phy;
u32 regidx;
struct ath_stats stats;
+#ifdef CONFIG_ATH9K_MAC_DEBUG
spinlock_t samp_lock;
struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
u8 sampidx;
u8 tsidx;
u8 rsidx;
+#endif
};
int ath9k_init_debug(struct ath_hw *ah);
-void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
@@ -258,10 +252,6 @@
return 0;
}
-static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
-{
-}
-
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
@@ -282,4 +272,17 @@
#endif /* CONFIG_ATH9K_DEBUGFS */
+#ifdef CONFIG_ATH9K_MAC_DEBUG
+
+void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
+
+#else
+
+static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
+{
+}
+
+#endif
+
+
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5c57568..2eb0ef3 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1386,10 +1386,16 @@
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) {
- if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON))
- return false;
- } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
+ int reset_type = ATH9K_RESET_WARM;
+
+ if (AR_SREV_9280(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ reset_type = ATH9K_RESET_POWER_ON;
+ else
+ reset_type = ATH9K_RESET_COLD;
+ }
+
+ if (!ath9k_hw_set_reset_reg(ah, reset_type))
return false;
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d8b0596..944e9b5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -555,9 +555,11 @@
mutex_init(&sc->mutex);
#ifdef CONFIG_ATH9K_DEBUGFS
spin_lock_init(&sc->nodes_lock);
- spin_lock_init(&sc->debug.samp_lock);
INIT_LIST_HEAD(&sc->nodes);
#endif
+#ifdef CONFIG_ATH9K_MAC_DEBUG
+ spin_lock_init(&sc->debug.samp_lock);
+#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
(unsigned long)sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index e196aba..5f4ae6c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -745,7 +745,11 @@
qi.tqi_aifs = 1;
qi.tqi_cwmin = 0;
qi.tqi_cwmax = 0;
- /* NB: don't enable any interrupts */
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
+ TXQ_FLAG_TXERRINT_ENABLE;
+
return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 02e95c8..cc2535c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2300,6 +2300,7 @@
struct ath_vif *avp;
struct ath_buf *bf;
struct ath_tx_status ts;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int status;
vif = sc->beacon.bslot[0];
@@ -2310,7 +2311,7 @@
if (!avp->is_bslot_active)
return 0;
- if (!sc->beacon.tx_processed) {
+ if (!sc->beacon.tx_processed && !edma) {
tasklet_disable(&sc->bcon_tasklet);
bf = avp->av_bcbuf;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index b8c6c38..6407af2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1226,7 +1226,7 @@
ath_rc_init_valid_rate_idx(ath_rc_priv);
for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < MAX_TX_RATE_PHY; j++)
+ for (j = 0; j < RATE_TABLE_SIZE; j++)
ath_rc_priv->valid_phy_rateidx[i][j] = 0;
ath_rc_priv->valid_phy_ratecnt[i] = 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index b7a4bcd..75f8e9b 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -25,8 +25,6 @@
#define ATH_RATE_MAX 30
#define RATE_TABLE_SIZE 72
-#define MAX_TX_RATE_PHY 48
-
#define RC_INVALID 0x0000
#define RC_LEGACY 0x0001
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 7e1a91a..1b1b279 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -169,22 +169,17 @@
enum ath9k_rx_qtype qtype, int size)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u32 nbuf = 0;
+ struct ath_buf *bf, *tbf;
if (list_empty(&sc->rx.rxbuf)) {
ath_dbg(common, QUEUE, "No free rx buf available\n");
return;
}
- while (!list_empty(&sc->rx.rxbuf)) {
- nbuf++;
-
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
if (!ath_rx_edma_buf_link(sc, qtype))
break;
- if (nbuf >= size)
- break;
- }
}
static void ath_rx_remove_buffer(struct ath_softc *sc,
@@ -232,7 +227,6 @@
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
skb_queue_head_init(&rx_edma->rx_fifo);
- skb_queue_head_init(&rx_edma->rx_buffers);
rx_edma->rx_fifo_hwsize = size;
}
@@ -658,7 +652,9 @@
}
static bool ath_edma_get_buffers(struct ath_softc *sc,
- enum ath9k_rx_qtype qtype)
+ enum ath9k_rx_qtype qtype,
+ struct ath_rx_status *rs,
+ struct ath_buf **dest)
{
struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
struct ath_hw *ah = sc->sc_ah;
@@ -677,7 +673,7 @@
dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
common->rx_bufsize, DMA_FROM_DEVICE);
- ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
+ ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
if (ret == -EINPROGRESS) {
/*let device gain the buffer again*/
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
@@ -690,20 +686,21 @@
/* corrupt descriptor, skip this one and the following one */
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
+
skb = skb_peek(&rx_edma->rx_fifo);
- if (!skb)
- return true;
+ if (skb) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
- bf = SKB_CB_ATHBUF(skb);
- BUG_ON(!bf);
-
- __skb_unlink(skb, &rx_edma->rx_fifo);
- list_add_tail(&bf->list, &sc->rx.rxbuf);
- ath_rx_edma_buf_link(sc, qtype);
- return true;
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+ bf = NULL;
+ }
}
- skb_queue_tail(&rx_edma->rx_buffers, skb);
+ *dest = bf;
return true;
}
@@ -711,18 +708,15 @@
struct ath_rx_status *rs,
enum ath9k_rx_qtype qtype)
{
- struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
- struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_buf *bf = NULL;
- while (ath_edma_get_buffers(sc, qtype));
- skb = __skb_dequeue(&rx_edma->rx_buffers);
- if (!skb)
- return NULL;
+ while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
+ if (!bf)
+ continue;
- bf = SKB_CB_ATHBUF(skb);
- ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
- return bf;
+ return bf;
+ }
+ return NULL;
}
static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
@@ -954,6 +948,7 @@
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
+ int rssi = rx_stats->rs_rssi;
if (!rx_stats->is_mybeacon ||
((ah->opmode != NL80211_IFTYPE_STATION) &&
@@ -965,13 +960,12 @@
last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
- ATH_RSSI_EP_MULTIPLIER);
- if (rx_stats->rs_rssi < 0)
- rx_stats->rs_rssi = 0;
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
/* Update Beacon RSSI, this is used by ANI. */
- ah->stats.avgbrssi = rx_stats->rs_rssi;
+ ah->stats.avgbrssi = rssi;
}
/*
@@ -1011,6 +1005,8 @@
rx_status->signal = ah->noise + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ if (rx_stats->rs_moreaggr)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5dd27d2..9f78501 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2296,9 +2296,12 @@
break;
}
- /* Skip beacon completions */
- if (ts.qid == sc->beacon.beaconq)
+ /* Process beacon completions separately */
+ if (ts.qid == sc->beacon.beaconq) {
+ sc->beacon.tx_processed = true;
+ sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
continue;
+ }
txq = &sc->tx.txq[ts.qid];
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5189cf3..1d633f3 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2706,6 +2706,8 @@
mask |= 0x0060;
set |= 0x0060;
}
+ if (dev->dev->chip_id == 0x5354)
+ set &= 0xff02;
if (0 /* FIXME: conditional unknown */ ) {
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 96faaef..9503341 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -1860,7 +1860,7 @@
* which accounts for the factor of 4 */
#define REG_MAX_PWR 20
max_pwr = min(REG_MAX_PWR * 4
- - dev->dev->bus->sprom.antenna_gain.ghz24.a0
+ - dev->dev->bus->sprom.antenna_gain.a0
- 0x6, max_pwr);
/* find the desired power in Q5.2 - power_level is in dBm
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 83ca3cc..4688904 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -604,7 +604,7 @@
sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-int brcmf_sdio_init(void)
+void brcmf_sdio_init(void)
{
int ret;
@@ -614,6 +614,4 @@
if (ret)
brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
-
- return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index b7671b3..3669164 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -108,11 +108,11 @@
#ifdef CONFIG_BRCMFMAC_SDIO
extern void brcmf_sdio_exit(void);
-extern int brcmf_sdio_init(void);
+extern void brcmf_sdio_init(void);
#endif
#ifdef CONFIG_BRCMFMAC_USB
extern void brcmf_usb_exit(void);
-extern int brcmf_usb_init(void);
+extern void brcmf_usb_init(void);
#endif
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index c4da058..2a1e5ae 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1181,27 +1181,29 @@
}
#endif /* DEBUG */
-static int __init brcmfmac_init(void)
+static void brcmf_driver_init(struct work_struct *work)
{
- int ret = 0;
-
#ifdef CONFIG_BRCMFMAC_SDIO
- ret = brcmf_sdio_init();
- if (ret)
- goto fail;
+ brcmf_sdio_init();
#endif
#ifdef CONFIG_BRCMFMAC_USB
- ret = brcmf_usb_init();
- if (ret)
- goto fail;
+ brcmf_usb_init();
#endif
+}
+static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
-fail:
- return ret;
+static int __init brcmfmac_module_init(void)
+{
+ if (!schedule_work(&brcmf_driver_work))
+ return -EBUSY;
+
+ return 0;
}
-static void __exit brcmfmac_exit(void)
+static void __exit brcmfmac_module_exit(void)
{
+ cancel_work_sync(&brcmf_driver_work);
+
#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_exit();
#endif
@@ -1210,5 +1212,5 @@
#endif
}
-module_init(brcmfmac_init);
-module_exit(brcmfmac_exit);
+module_init(brcmfmac_module_init);
+module_exit(brcmfmac_module_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index d4a9e8e..8236422 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -514,9 +514,9 @@
brcmf_usb_del_fromq(devinfo, req);
if (urb->status == 0)
- devinfo->bus_pub.stats.tx_packets++;
+ devinfo->bus_pub.bus->dstats.tx_packets++;
else
- devinfo->bus_pub.stats.tx_errors++;
+ devinfo->bus_pub.bus->dstats.tx_errors++;
dev_kfree_skb(req->skb);
req->skb = NULL;
@@ -536,9 +536,9 @@
req->skb = NULL;
if (urb->status == 0) {
- devinfo->bus_pub.stats.rx_packets++;
+ devinfo->bus_pub.bus->dstats.rx_packets++;
} else {
- devinfo->bus_pub.stats.rx_errors++;
+ devinfo->bus_pub.bus->dstats.rx_errors++;
dev_kfree_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
return;
@@ -712,9 +712,6 @@
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
u16 ifnum;
- if (devinfo == NULL)
- return -EINVAL;
-
if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
return 0;
@@ -900,8 +897,8 @@
sizeof(struct bootrom_id_le));
return false;
} else {
- devinfo->bus_pub.attrib.devid = chipid;
- devinfo->bus_pub.attrib.chiprev = chiprev;
+ devinfo->bus_pub.devid = chipid;
+ devinfo->bus_pub.chiprev = chiprev;
}
return true;
}
@@ -1067,7 +1064,7 @@
if (devinfo == NULL)
return -EINVAL;
- if (devinfo->bus_pub.attrib.devid == 0xDEAD)
+ if (devinfo->bus_pub.devid == 0xDEAD)
return -EINVAL;
err = brcmf_usb_dl_writeimage(devinfo, fw, len);
@@ -1088,7 +1085,7 @@
if (!devinfo)
return -EINVAL;
- if (devinfo->bus_pub.attrib.devid == 0xDEAD)
+ if (devinfo->bus_pub.devid == 0xDEAD)
return -EINVAL;
/* Check we are runnable */
@@ -1127,18 +1124,19 @@
static int
brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
{
- struct brcmf_usb_attrib *attr;
+ int devid, chiprev;
int err;
brcmf_dbg(TRACE, "enter\n");
if (devinfo == NULL)
return -ENODEV;
- attr = &devinfo->bus_pub.attrib;
+ devid = devinfo->bus_pub.devid;
+ chiprev = devinfo->bus_pub.chiprev;
- if (!brcmf_usb_chip_support(attr->devid, attr->chiprev)) {
+ if (!brcmf_usb_chip_support(devid, chiprev)) {
brcmf_dbg(ERROR, "unsupported chip %d rev %d\n",
- attr->devid, attr->chiprev);
+ devid, chiprev);
return -EINVAL;
}
@@ -1617,7 +1615,7 @@
g_image.len = 0;
}
-int brcmf_usb_init(void)
+void brcmf_usb_init(void)
{
- return usb_register(&brcmf_usbdrvr);
+ usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
index b31da7b..acfa5e8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
@@ -33,36 +33,12 @@
};
struct brcmf_stats {
- u32 tx_errors;
- u32 tx_packets;
- u32 tx_multicast;
u32 tx_ctlpkts;
u32 tx_ctlerrs;
- u32 tx_dropped;
- u32 tx_flushed;
- u32 rx_errors;
- u32 rx_packets;
- u32 rx_multicast;
u32 rx_ctlpkts;
u32 rx_ctlerrs;
- u32 rx_dropped;
- u32 rx_flushed;
-
};
-struct brcmf_usb_attrib {
- int bustype;
- int vid;
- int pid;
- int devid;
- int chiprev; /* chip revsion number */
- int mtu;
- int nchan; /* Data Channels */
- int has_2nd_bulk_in_ep;
-};
-
-struct brcmf_usbdev_info;
-
struct brcmf_usbdev {
struct brcmf_bus *bus;
struct brcmf_usbdev_info *devinfo;
@@ -70,7 +46,8 @@
struct brcmf_stats stats;
int ntxq, nrxq, rxsize;
u32 bus_mtu;
- struct brcmf_usb_attrib attrib;
+ int devid;
+ int chiprev; /* chip revsion number */
};
/* IO Request Block (IRB) */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index dbee696..95b5902 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -959,14 +959,13 @@
if (supr_status) {
update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
- wiphy_err(wiphy, "%s: Pkt tx suppressed, "
- "illegal channel possibly %d\n",
+ wiphy_err(wiphy,
+ "%s: Pkt tx suppressed, illegal channel possibly %d\n",
__func__, CHSPEC_CHANNEL(
wlc->default_bss->chanspec));
} else {
if (supr_status != TX_STATUS_SUPR_FRAG)
- wiphy_err(wiphy, "%s:"
- "supr_status 0x%x\n",
+ wiphy_err(wiphy, "%s: supr_status 0x%x\n",
__func__, supr_status);
}
/* no need to retry for badch; will fail again */
@@ -988,9 +987,8 @@
}
} else if (txs->phyerr) {
update_rate = false;
- wiphy_err(wiphy, "wl%d: ampdu tx phy "
- "error (0x%x)\n", wlc->pub->unit,
- txs->phyerr);
+ wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n",
+ __func__, txs->phyerr);
if (brcm_msg_level & LOG_ERROR_VAL) {
brcmu_prpkt("txpkt (AMPDU)", p);
@@ -1018,10 +1016,10 @@
ack_recd = false;
if (ba_recd) {
bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
- BCMMSG(wlc->wiphy, "tid %d seq %d,"
- " start_seq %d, bindex %d set %d, index %d\n",
- tid, seq, start_seq, bindex,
- isset(bitmap, bindex), index);
+ BCMMSG(wiphy,
+ "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
+ tid, seq, start_seq, bindex,
+ isset(bitmap, bindex), index);
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index fec0f10..569ab8a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1169,25 +1169,31 @@
/**
* This is the main entry point for the brcmsmac driver.
*
- * This function determines if a device pointed to by pdev is a WL device,
- * and if so, performs a brcms_attach() on it.
- *
+ * This function is scheduled upon module initialization and
+ * does the driver registration, which result in brcms_bcma_probe()
+ * call resulting in the driver bringup.
*/
+static void brcms_driver_init(struct work_struct *work)
+{
+ int error;
+
+ error = bcma_driver_register(&brcms_bcma_driver);
+ if (error)
+ pr_err("%s: register returned %d\n", __func__, error);
+}
+
+static DECLARE_WORK(brcms_driver_work, brcms_driver_init);
+
static int __init brcms_module_init(void)
{
- int error = -ENODEV;
-
#ifdef DEBUG
if (msglevel != 0xdeadbeef)
brcm_msg_level = msglevel;
-#endif /* DEBUG */
+#endif
+ if (!schedule_work(&brcms_driver_work))
+ return -EBUSY;
- error = bcma_driver_register(&brcms_bcma_driver);
- pr_err("%s: register returned %d\n", __func__, error);
- if (!error)
- return 0;
-
- return error;
+ return 0;
}
/**
@@ -1199,6 +1205,7 @@
*/
static void __exit brcms_module_exit(void)
{
+ cancel_work_sync(&brcms_driver_work);
bcma_driver_unregister(&brcms_bcma_driver);
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 5fd56ad..f0551f8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -298,8 +298,6 @@
};
#endif
-#define WEXT_USECHANNELS 1
-
static const long ipw2100_frequencies[] = {
2412, 2417, 2422, 2427,
2432, 2437, 2442, 2447,
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index ecb561d..66e84ec 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -27,8 +27,6 @@
#ifndef __ipw2200_h__
#define __ipw2200_h__
-#define WEXT_USECHANNELS 1
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ae08498..2fe6273 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
- depends on PCI && MAC80211
+ depends on PCI && MAC80211 && HAS_IOMEM
select FW_LOADER
select NEW_LEDS
select LEDS_CLASS
@@ -127,3 +127,12 @@
support when it is loaded.
Say Y only if you want to experiment with P2P.
+
+config IWLWIFI_EXPERIMENTAL_MFP
+ bool "support MFP (802.11w) even if uCode doesn't advertise"
+ depends on IWLWIFI
+ help
+ This option enables experimental MFP (802.11W) support
+ even if the microcode doesn't advertise it.
+
+ Say Y only if you want to experiment with MFP.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index afa0bb8..85d163e 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -14,7 +14,7 @@
iwlwifi-objs += iwl-2000.o
iwlwifi-objs += iwl-pci.o
iwlwifi-objs += iwl-drv.o
-iwlwifi-objs += iwl-trans.o
+iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 605ee3d..5b0d888 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -43,6 +43,7 @@
#include "iwl-agn-hw.h"
#include "iwl-shared.h"
#include "iwl-cfg.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 6
@@ -95,9 +96,8 @@
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
-static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
.min_nrg_cck = 95,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 120,
@@ -122,23 +122,15 @@
static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl1000_set_ct_threshold(priv);
@@ -163,7 +155,7 @@
.temperature = iwlagn_temperature,
};
-static struct iwl_base_params iwl1000_base_params = {
+static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
@@ -178,7 +170,8 @@
.max_event_log_size = 128,
.wd_disable = true,
};
-static struct iwl_ht_params iwl1000_ht_params = {
+
+static const struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
.smps_mode = IEEE80211_SMPS_DYNAMIC,
@@ -197,13 +190,13 @@
.base_params = &iwl1000_base_params, \
.led_mode = IWL_LED_BLINK
-struct iwl_cfg iwl1000_bgn_cfg = {
+const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
IWL_DEVICE_1000,
.ht_params = &iwl1000_ht_params,
};
-struct iwl_cfg iwl1000_bg_cfg = {
+const struct iwl_cfg iwl1000_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
IWL_DEVICE_1000,
};
@@ -222,13 +215,13 @@
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-struct iwl_cfg iwl100_bgn_cfg = {
+const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
IWL_DEVICE_100,
.ht_params = &iwl1000_ht_params,
};
-struct iwl_cfg iwl100_bg_cfg = {
+const struct iwl_cfg iwl100_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BG",
IWL_DEVICE_100,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index e6e8c79..5635b9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -91,9 +91,8 @@
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
-static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
.min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 80,
.auto_corr_min_ofdm_mrc = 128,
.auto_corr_min_ofdm_x1 = 105,
@@ -118,23 +117,15 @@
static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl2000_set_ct_threshold(priv);
@@ -155,16 +146,13 @@
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
static struct iwl_lib_ops iwl2030_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
- .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
- .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
- .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
.nic_config = iwl2000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
@@ -176,12 +164,12 @@
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
-static struct iwl_base_params iwl2000_base_params = {
+static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -200,7 +188,7 @@
};
-static struct iwl_base_params iwl2030_base_params = {
+static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -218,12 +206,12 @@
.hd_v2 = true,
};
-static struct iwl_ht_params iwl2000_ht_params = {
+static const struct iwl_ht_params iwl2000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
-static struct iwl_bt_params iwl2030_bt_params = {
+static const struct iwl_bt_params iwl2030_bt_params = {
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.advanced_bt_coexist = true,
.agg_time_limit = BT_AGG_THRESHOLD_DEF,
@@ -249,13 +237,13 @@
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
-struct iwl_cfg iwl2000_2bgn_cfg = {
+const struct iwl_cfg iwl2000_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2000_2bgn_d_cfg = {
+const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
@@ -279,7 +267,7 @@
.adv_pm = true, \
.iq_invert = true \
-struct iwl_cfg iwl2030_2bgn_cfg = {
+const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
IWL_DEVICE_2030,
.ht_params = &iwl2000_ht_params,
@@ -303,13 +291,13 @@
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl105_bgn_cfg = {
+const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl105_bgn_d_cfg = {
+const struct iwl_cfg iwl105_bgn_d_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
@@ -334,7 +322,7 @@
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl135_bgn_cfg = {
+const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
IWL_DEVICE_135,
.ht_params = &iwl2000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 1527dec..a805e97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -45,6 +45,7 @@
#include "iwl-trans.h"
#include "iwl-shared.h"
#include "iwl-cfg.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -63,12 +64,8 @@
/* NIC configuration for 5000 series */
static void iwl5000_nic_config(struct iwl_priv *priv)
{
- unsigned long flags;
-
iwl_rf_config(priv);
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
/* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
@@ -76,14 +73,10 @@
iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
-
-
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
-static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 100,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 105,
@@ -108,7 +101,6 @@
static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.min_nrg_cck = 95,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 105,
@@ -164,20 +156,13 @@
static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
+ hw_params(priv).rx_chains_num =
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl5000_set_ct_threshold(priv);
@@ -187,20 +172,13 @@
static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
+ hw_params(priv).rx_chains_num =
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl5150_set_ct_threshold(priv);
@@ -288,7 +266,7 @@
return -EFAULT;
}
- return iwl_trans_send_cmd(trans(priv), &hcmd);
+ return iwl_dvm_send_cmd(priv, &hcmd);
}
static struct iwl_lib_ops iwl5000_lib = {
@@ -327,7 +305,7 @@
.temperature = iwl5150_temperature,
};
-static struct iwl_base_params iwl5000_base_params = {
+static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -340,7 +318,8 @@
.no_idle_support = true,
.wd_disable = true,
};
-static struct iwl_ht_params iwl5000_ht_params = {
+
+static const struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
};
@@ -356,7 +335,7 @@
.base_params = &iwl5000_base_params, \
.led_mode = IWL_LED_BLINK
-struct iwl_cfg iwl5300_agn_cfg = {
+const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
IWL_DEVICE_5000,
/* at least EEPROM 0x11A has wrong info */
@@ -365,7 +344,7 @@
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5100_bgn_cfg = {
+const struct iwl_cfg iwl5100_bgn_cfg = {
.name = "Intel(R) WiFi Link 5100 BGN",
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
@@ -373,14 +352,14 @@
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5100_abg_cfg = {
+const struct iwl_cfg iwl5100_abg_cfg = {
.name = "Intel(R) WiFi Link 5100 ABG",
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
.valid_rx_ant = ANT_AB, /* .cfg overwrite */
};
-struct iwl_cfg iwl5100_agn_cfg = {
+const struct iwl_cfg iwl5100_agn_cfg = {
.name = "Intel(R) WiFi Link 5100 AGN",
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
@@ -388,7 +367,7 @@
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5350_agn_cfg = {
+const struct iwl_cfg iwl5350_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
@@ -418,14 +397,14 @@
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-struct iwl_cfg iwl5150_agn_cfg = {
+const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
IWL_DEVICE_5150,
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5150_abg_cfg = {
+const struct iwl_cfg iwl5150_abg_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
IWL_DEVICE_5150,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 223e60a..64060cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -96,25 +96,25 @@
CSR_GP_DRIVER_REG_BIT_6050_1x2);
}
+static void iwl6000i_additional_nic_config(struct iwl_priv *priv)
+{
+ /* 2x2 IPA phy type */
+ iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
+}
+
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
iwl_rf_config(priv);
- /* no locking required for register write */
- if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
- /* 2x2 IPA phy type */
- iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
- CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
- }
/* do additional nic configuration if needed */
if (cfg(priv)->additional_nic_config)
- cfg(priv)->additional_nic_config(priv);
+ cfg(priv)->additional_nic_config(priv);
}
-static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.min_nrg_cck = 110,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 80,
.auto_corr_min_ofdm_mrc = 128,
.auto_corr_min_ofdm_x1 = 105,
@@ -139,24 +139,16 @@
static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl6000_set_ct_threshold(priv);
@@ -233,7 +225,7 @@
return -EFAULT;
}
- return iwl_trans_send_cmd(trans(priv), &hcmd);
+ return iwl_dvm_send_cmd(priv, &hcmd);
}
static struct iwl_lib_ops iwl6000_lib = {
@@ -250,16 +242,13 @@
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
static struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
- .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
- .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
.eeprom_ops = {
@@ -272,12 +261,12 @@
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
-static struct iwl_base_params iwl6000_base_params = {
+static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -294,7 +283,7 @@
.shadow_reg_enable = true,
};
-static struct iwl_base_params iwl6050_base_params = {
+static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -310,7 +299,8 @@
.max_event_log_size = 1024,
.shadow_reg_enable = true,
};
-static struct iwl_base_params iwl6000_g2_base_params = {
+
+static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -327,12 +317,12 @@
.shadow_reg_enable = true,
};
-static struct iwl_ht_params iwl6000_ht_params = {
+static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
-static struct iwl_bt_params iwl6000_bt_params = {
+static const struct iwl_bt_params iwl6000_bt_params = {
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.advanced_bt_coexist = true,
.agg_time_limit = BT_AGG_THRESHOLD_DEF,
@@ -355,40 +345,41 @@
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
-struct iwl_cfg iwl6005_2agn_cfg = {
+const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6005_2abg_cfg = {
+const struct iwl_cfg iwl6005_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
IWL_DEVICE_6005,
};
-struct iwl_cfg iwl6005_2bg_cfg = {
+const struct iwl_cfg iwl6005_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
IWL_DEVICE_6005,
};
-struct iwl_cfg iwl6005_2agn_sff_cfg = {
+const struct iwl_cfg iwl6005_2agn_sff_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6005_2agn_d_cfg = {
+const struct iwl_cfg iwl6005_2agn_d_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6005_2agn_mow1_cfg = {
+const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6005_2agn_mow2_cfg = {
+
+const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
@@ -410,53 +401,53 @@
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
-struct iwl_cfg iwl6030_2agn_cfg = {
+const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6030_2abg_cfg = {
+const struct iwl_cfg iwl6030_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
IWL_DEVICE_6030,
};
-struct iwl_cfg iwl6030_2bgn_cfg = {
+const struct iwl_cfg iwl6030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6030_2bg_cfg = {
+const struct iwl_cfg iwl6030_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
IWL_DEVICE_6030,
};
-struct iwl_cfg iwl6035_2agn_cfg = {
+const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl1030_bgn_cfg = {
+const struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl1030_bg_cfg = {
+const struct iwl_cfg iwl1030_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
IWL_DEVICE_6030,
};
-struct iwl_cfg iwl130_bgn_cfg = {
+const struct iwl_cfg iwl130_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
.rx_with_siso_diversity = true,
};
-struct iwl_cfg iwl130_bg_cfg = {
+const struct iwl_cfg iwl130_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 130 BG",
IWL_DEVICE_6030,
.rx_with_siso_diversity = true,
@@ -477,22 +468,22 @@
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
+ .additional_nic_config = iwl6000i_additional_nic_config,\
.base_params = &iwl6000_base_params, \
- .pa_type = IWL_PA_INTERNAL, \
.led_mode = IWL_LED_BLINK
-struct iwl_cfg iwl6000i_2agn_cfg = {
+const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
IWL_DEVICE_6000i,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6000i_2abg_cfg = {
+const struct iwl_cfg iwl6000i_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
IWL_DEVICE_6000i,
};
-struct iwl_cfg iwl6000i_2bg_cfg = {
+const struct iwl_cfg iwl6000i_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
IWL_DEVICE_6000i,
};
@@ -513,13 +504,13 @@
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-struct iwl_cfg iwl6050_2agn_cfg = {
+const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
IWL_DEVICE_6050,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6050_2abg_cfg = {
+const struct iwl_cfg iwl6050_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
IWL_DEVICE_6050,
};
@@ -538,18 +529,18 @@
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-struct iwl_cfg iwl6150_bgn_cfg = {
+const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
IWL_DEVICE_6150,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6150_bg_cfg = {
+const struct iwl_cfg iwl6150_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
IWL_DEVICE_6150,
};
-struct iwl_cfg iwl6000_3agn_cfg = {
+const struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 988ee45..84cbe7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -68,13 +68,19 @@
#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-agn.h"
-#include "iwl-wifi.h"
-#include "iwl-ucode.h"
/*****************************************************************************
* INIT calibrations framework
*****************************************************************************/
+/* Opaque calibration results */
+struct iwl_calib_result {
+ struct list_head list;
+ size_t cmd_len;
+ struct iwl_calib_hdr hdr;
+ /* data follows */
+};
+
struct statistics_general_data {
u32 beacon_silence_rssi_a;
u32 beacon_silence_rssi_b;
@@ -84,7 +90,7 @@
u32 beacon_energy_c;
};
-int iwl_send_calib_results(struct iwl_trans *trans)
+int iwl_send_calib_results(struct iwl_priv *priv)
{
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
@@ -92,15 +98,15 @@
};
struct iwl_calib_result *res;
- list_for_each_entry(res, &trans->calib_results, list) {
+ list_for_each_entry(res, &priv->calib_results, list) {
int ret;
hcmd.len[0] = res->cmd_len;
hcmd.data[0] = &res->hdr;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_trans_send_cmd(trans, &hcmd);
+ ret = iwl_dvm_send_cmd(priv, &hcmd);
if (ret) {
- IWL_ERR(trans, "Error %d on calib cmd %d\n",
+ IWL_ERR(priv, "Error %d on calib cmd %d\n",
ret, res->hdr.op_code);
return ret;
}
@@ -109,7 +115,7 @@
return 0;
}
-int iwl_calib_set(struct iwl_trans *trans,
+int iwl_calib_set(struct iwl_priv *priv,
const struct iwl_calib_hdr *cmd, int len)
{
struct iwl_calib_result *res, *tmp;
@@ -121,7 +127,7 @@
memcpy(&res->hdr, cmd, len);
res->cmd_len = len;
- list_for_each_entry(tmp, &trans->calib_results, list) {
+ list_for_each_entry(tmp, &priv->calib_results, list) {
if (tmp->hdr.op_code == res->hdr.op_code) {
list_replace(&tmp->list, &res->list);
kfree(tmp);
@@ -130,16 +136,16 @@
}
/* wasn't in list already */
- list_add_tail(&res->list, &trans->calib_results);
+ list_add_tail(&res->list, &priv->calib_results);
return 0;
}
-void iwl_calib_free_results(struct iwl_trans *trans)
+void iwl_calib_free_results(struct iwl_priv *priv)
{
struct iwl_calib_result *res, *tmp;
- list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+ list_for_each_entry_safe(res, tmp, &priv->calib_results, list) {
list_del(&res->list);
kfree(res);
}
@@ -494,7 +500,7 @@
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- return iwl_trans_send_cmd(trans(priv), &cmd_out);
+ return iwl_dvm_send_cmd(priv, &cmd_out);
}
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
@@ -583,7 +589,7 @@
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
- return iwl_trans_send_cmd(trans(priv), &cmd_out);
+ return iwl_dvm_send_cmd(priv, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -636,7 +642,7 @@
data->last_bad_plcp_cnt_cck = 0;
data->last_fa_cnt_cck = 0;
- if (nic(priv)->fw.enhance_sensitivity_table)
+ if (priv->fw->enhance_sensitivity_table)
ret |= iwl_enhance_sensitivity_write(priv);
else
ret |= iwl_sensitivity_write(priv);
@@ -655,7 +661,6 @@
struct iwl_sensitivity_data *data = NULL;
struct statistics_rx_non_phy *rx_info;
struct statistics_rx_phy *ofdm, *cck;
- unsigned long flags;
struct statistics_general_data statis;
if (priv->disable_sens_cal)
@@ -668,13 +673,13 @@
return;
}
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ spin_lock_bh(&priv->statistics.lock);
rx_info = &priv->statistics.rx_non_phy;
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
return;
}
@@ -698,7 +703,7 @@
statis.beacon_energy_c =
le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
@@ -747,7 +752,7 @@
iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
- if (nic(priv)->fw.enhance_sensitivity_table)
+ if (priv->fw->enhance_sensitivity_table)
iwl_enhance_sensitivity_write(priv);
else
iwl_sensitivity_write(priv);
@@ -849,7 +854,7 @@
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(cfg(priv)->valid_tx_ant);
+ find_first_chain(hw_params(priv).valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -874,10 +879,8 @@
}
static void iwlagn_gain_computation(struct iwl_priv *priv,
- u32 average_noise[NUM_RX_CHAINS],
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
+ u32 average_noise[NUM_RX_CHAINS],
+ u8 default_chain)
{
int i;
s32 delta_g;
@@ -925,7 +928,7 @@
priv->phy_calib_chain_noise_gain_cmd);
cmd.delta_gain_1 = data->delta_gain_code[1];
cmd.delta_gain_2 = data->delta_gain_code[2];
- iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
+ iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
CMD_ASYNC, sizeof(cmd), &cmd);
data->radio_write = 1;
@@ -958,7 +961,6 @@
u16 stat_chnum = INITIALIZATION_VALUE;
u8 rxon_band24;
u8 stat_band24;
- unsigned long flags;
struct statistics_rx_non_phy *rx_info;
/*
@@ -983,13 +985,13 @@
return;
}
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ spin_lock_bh(&priv->statistics.lock);
rx_info = &priv->statistics.rx_non_phy;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
return;
}
@@ -1004,7 +1006,7 @@
if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
return;
}
@@ -1023,7 +1025,7 @@
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
data->beacon_count++;
@@ -1083,8 +1085,7 @@
min_average_noise, min_average_noise_antenna_i);
iwlagn_gain_computation(priv, average_noise,
- min_average_noise_antenna_i, min_average_noise,
- find_first_chain(cfg(priv)->valid_rx_ant));
+ find_first_chain(hw_params(priv).valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 5be0d36..f98baab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/sched.h>
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -52,7 +51,7 @@
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
"TX Power requested while scanning!\n"))
return -EAGAIN;
@@ -77,17 +76,19 @@
tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
- if (IWL_UCODE_API(nic(priv)->fw.ucode_ver) == 1)
+ if (IWL_UCODE_API(priv->fw->ucode_ver) == 1)
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
sizeof(tx_power_cmd), &tx_power_cmd);
}
void iwlagn_temperature(struct iwl_priv *priv)
{
+ lockdep_assert_held(&priv->statistics.lock);
+
/* store temperature from correct statistics (in Celsius) */
priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
iwl_tt_handler(priv);
@@ -233,19 +234,19 @@
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
ieee80211_stop_queues(priv->hw);
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
@@ -255,7 +256,7 @@
iwl_trans_wait_tx_queue_empty(trans(priv));
done:
ieee80211_wake_queues(priv->hw);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/*
@@ -434,12 +435,12 @@
if (cfg(priv)->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
@@ -452,7 +453,7 @@
struct iwl_rxon_context *ctx, *found_ctx = NULL;
bool found_ap = false;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* Check whether AP or GO mode is active. */
if (rssi_ena) {
@@ -565,7 +566,7 @@
break;
}
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
/*
* We can not send command to firmware while scanning. When the scan
@@ -574,7 +575,7 @@
* STATUS_SCANNING to avoid race when queue_work two times from
* different notifications, but quit and not perform any work at all.
*/
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
+ if (test_bit(STATUS_SCAN_HW, &priv->status))
goto out;
iwl_update_chain_flags(priv);
@@ -593,7 +594,7 @@
*/
iwlagn_bt_coex_rssi_monitor(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/*
@@ -705,12 +706,11 @@
}
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
- unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
+ struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
@@ -754,9 +754,7 @@
/* FIXME: based on notification, adjust the prio_boost */
- spin_lock_irqsave(&priv->shrd->lock, flags);
priv->bt_ci_compliance = coex->bt_ci_compliance;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
return 0;
}
@@ -971,7 +969,7 @@
u16 p1k[IWLAGN_P1K_SIZE];
int ret, i;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
@@ -1077,7 +1075,7 @@
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
int iwlagn_send_patterns(struct iwl_priv *priv,
@@ -1117,13 +1115,12 @@
}
cmd.data[0] = pattern_cmd;
- err = iwl_trans_send_cmd(trans(priv), &cmd);
+ err = iwl_dvm_send_cmd(priv, &cmd);
kfree(pattern_cmd);
return err;
}
-int iwlagn_suspend(struct iwl_priv *priv,
- struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
{
struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
struct iwl_rxon_cmd rxon;
@@ -1194,9 +1191,9 @@
iwl_trans_stop_device(trans(priv));
- priv->shrd->wowlan = true;
+ priv->wowlan = true;
- ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
+ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
if (ret)
goto out;
@@ -1224,11 +1221,11 @@
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
ieee80211_iter_keys(priv->hw, ctx->vif,
iwlagn_wowlan_program_keys,
&key_data);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (key_data.error) {
ret = -EIO;
goto out;
@@ -1240,16 +1237,16 @@
.flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .len[0] = sizeof(key_data.rsc_tsc),
+ .len[0] = sizeof(*key_data.rsc_tsc),
};
- ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+ ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd);
if (ret)
goto out;
}
if (key_data.use_tkip) {
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_TKIP_PARAMS,
CMD_SYNC, sizeof(tkip_cmd),
&tkip_cmd);
@@ -1265,7 +1262,7 @@
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
kek_kck_cmd.replay_ctr = priv->replay_ctr;
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_KEK_KCK_MATERIAL,
CMD_SYNC, sizeof(kek_kck_cmd),
&kek_kck_cmd);
@@ -1274,12 +1271,12 @@
}
}
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
if (ret)
goto out;
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
CMD_SYNC, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
@@ -1291,3 +1288,41 @@
return ret;
}
#endif
+
+int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
+ IWL_WARN(priv, "Not sending command - %s KILL\n",
+ iwl_is_rfkill(priv) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ /*
+ * Synchronous commands from this op-mode must hold
+ * the mutex, this ensures we don't try to send two
+ * (or more) synchronous commands at a time.
+ */
+ if (cmd->flags & CMD_SYNC)
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
+ !(cmd->flags & CMD_ON_DEMAND)) {
+ IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
+ return -EIO;
+ }
+
+ return iwl_trans_send_cmd(trans(priv), cmd);
+}
+
+int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+ u32 flags, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_dvm_send_cmd(priv, &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index a7d6713..53f8c51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -870,19 +870,16 @@
{
struct iwl_scale_tbl_info *tbl;
bool full_concurrent = priv->bt_full_concurrent;
- unsigned long flags;
if (priv->bt_ant_couple_ok) {
/*
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- spin_lock_irqsave(&priv->shrd->lock, flags);
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
@@ -2680,7 +2677,6 @@
* which requires station table entry to exist).
*/
static void rs_initialize_lq(struct iwl_priv *priv,
- struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta)
{
@@ -2915,7 +2911,7 @@
lq_sta->dbg_fixed_rate = 0;
#endif
- rs_initialize_lq(priv, conf, sta, lq_sta);
+ rs_initialize_lq(priv, sta, lq_sta);
}
static void rs_fill_link_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 30bb5bb..cc0227c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -131,26 +131,27 @@
******************************************************************************/
static int iwlagn_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_error_resp *err_resp = (void *)pkt->data;
IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
"seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
+ le32_to_cpu(err_resp->error_type),
+ get_cmd_string(err_resp->cmd_id),
+ err_resp->cmd_id,
+ le16_to_cpu(err_resp->bad_cmd_seq_num),
+ le32_to_cpu(err_resp->error_info));
return 0;
}
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+ struct iwl_csa_notification *csa = (void *)pkt->data;
/*
* MULTI-FIXME
* See iwlagn_mac_channel_switch.
@@ -158,7 +159,7 @@
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return 0;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
@@ -177,11 +178,11 @@
static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+ struct iwl_spectrum_notification *report = (void *)pkt->data;
if (!report->state) {
IWL_DEBUG_11H(priv,
@@ -195,12 +196,12 @@
}
static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ struct iwl_sleep_notification *sleep = (void *)pkt->data;
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
@@ -208,7 +209,7 @@
}
static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -217,16 +218,16 @@
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for %s:\n", len,
get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
return 0;
}
static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
+ struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
#ifdef CONFIG_IWLWIFI_DEBUG
u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -266,6 +267,8 @@
if (priv->agg_tids_count)
return true;
+ lockdep_assert_held(&priv->statistics.lock);
+
old = &priv->statistics.tx;
actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
@@ -318,7 +321,7 @@
unsigned int msecs)
{
int delta;
- int threshold = cfg(priv)->base_params->plcp_delta_threshold;
+ int threshold = priv->plcp_delta_threshold;
if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
@@ -352,7 +355,7 @@
{
unsigned int msecs;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
@@ -487,7 +490,7 @@
#endif
static int iwlagn_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
unsigned long stamp = jiffies;
@@ -509,9 +512,11 @@
IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
len);
+ spin_lock(&priv->statistics.lock);
+
if (len == sizeof(struct iwl_bt_notif_statistics)) {
struct iwl_bt_notif_statistics *stats;
- stats = &pkt->u.stats_bt;
+ stats = (void *)&pkt->data;
flag = &stats->flag;
common = &stats->general.common;
rx_non_phy = &stats->rx.general.common;
@@ -529,7 +534,7 @@
#endif
} else if (len == sizeof(struct iwl_notif_statistics)) {
struct iwl_notif_statistics *stats;
- stats = &pkt->u.stats;
+ stats = (void *)&pkt->data;
flag = &stats->flag;
common = &stats->general.common;
rx_non_phy = &stats->rx.general;
@@ -542,6 +547,7 @@
WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
len, sizeof(struct iwl_bt_notif_statistics),
sizeof(struct iwl_notif_statistics));
+ spin_unlock(&priv->statistics.lock);
return 0;
}
@@ -569,7 +575,7 @@
priv->rx_statistics_jiffies = stamp;
- set_bit(STATUS_STATISTICS, &priv->shrd->status);
+ set_bit(STATUS_STATISTICS, &priv->status);
/* Reschedule the statistics timer to occur in
* reg_recalib_period seconds to ensure we get a
@@ -578,23 +584,27 @@
mod_timer(&priv->statistics_periodic, jiffies +
msecs_to_jiffies(reg_recalib_period * 1000));
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->shrd->status)) &&
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
iwlagn_rx_calc_noise(priv);
queue_work(priv->workqueue, &priv->run_time_calib_work);
}
if (cfg(priv)->lib->temperature && change)
cfg(priv)->lib->temperature(priv);
+
+ spin_unlock(&priv->statistics.lock);
+
return 0;
}
static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_notif_statistics *stats = (void *)pkt->data;
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+ if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
memset(&priv->accum_stats, 0,
sizeof(priv->accum_stats));
@@ -612,12 +622,13 @@
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->shrd->status;
+ struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+ u32 flags = le32_to_cpu(card_state_notif->flags);
+ unsigned long status = priv->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
@@ -647,32 +658,31 @@
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
else
- clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)))
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up(&priv->shrd->wait_command_queue);
return 0;
}
static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
+ struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data;
- missed_beacon = &pkt->u.missed_beacon;
if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv,
@@ -681,7 +691,7 @@
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->shrd->status))
+ if (!test_bit(STATUS_SCANNING, &priv->status))
iwl_init_sensitivity(priv);
}
return 0;
@@ -690,13 +700,13 @@
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->last_phy_res_valid = true;
- memcpy(&priv->last_phy_res, pkt->u.raw,
+ memcpy(&priv->last_phy_res, pkt->data,
sizeof(struct iwl_rx_phy_res));
return 0;
}
@@ -757,12 +767,14 @@
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
struct iwl_rxon_context *ctx;
+ struct page *p;
+ int offset;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
@@ -782,7 +794,9 @@
return;
}
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+ offset = (void *)hdr - rxb_addr(rxb);
+ p = rxb_steal_page(rxb);
+ skb_add_rx_frag(skb, 0, p, offset, len);
iwl_update_stats(priv, false, fc, len);
@@ -793,23 +807,18 @@
* sometimes even after already having transmitted frames for the
* association because the new RXON may reset the information.
*/
- if (unlikely(ieee80211_is_beacon(fc))) {
+ if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
for_each_context(priv, ctx) {
- if (!ctx->last_tx_rejected)
- continue;
if (compare_ether_addr(hdr->addr3,
ctx->active.bssid_addr))
continue;
- ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
- "channel got active");
+ iwlagn_lift_passive_no_rx(priv);
}
}
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
- rxb->page = NULL;
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -915,7 +924,7 @@
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *header;
@@ -938,12 +947,12 @@
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res = (struct iwl_rx_phy_res *)pkt->data;
+ header = (struct ieee80211_hdr *)(pkt->data + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
@@ -952,10 +961,10 @@
return 0;
}
phy_res = &priv->last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
ampdu_status = iwlagn_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
@@ -1035,12 +1044,12 @@
}
static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_wipan_noa_data *new_data, *old_data;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+ struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data;
/* no condition -- we're in softirq */
old_data = rcu_dereference_protected(priv->noa_data, true);
@@ -1086,7 +1095,7 @@
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
- int (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
handlers = priv->rx_handlers;
@@ -1131,21 +1140,20 @@
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
/* set up notification wait support */
- spin_lock_init(&priv->shrd->notif_wait_lock);
- INIT_LIST_HEAD(&priv->shrd->notif_waits);
- init_waitqueue_head(&priv->shrd->notif_waitq);
+ iwl_notification_wait_init(&priv->notif_wait);
/* Set up BT Rx handlers */
- if (cfg(priv)->lib->bt_rx_handler_setup)
- cfg(priv)->lib->bt_rx_handler_setup(priv);
-
+ if (cfg(priv)->bt_params)
+ iwlagn_bt_rx_handler_setup(priv);
}
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_mem_buffer *rxb,
- struct iwl_device_cmd *cmd)
+int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ void (*pre_rx_handler)(struct iwl_priv *,
+ struct iwl_rx_cmd_buffer *);
int err = 0;
/*
@@ -1153,30 +1161,22 @@
* even if the RX handler consumes the RXB we have
* access to it in the notification wait entry.
*/
- if (!list_empty(&priv->shrd->notif_waits)) {
- struct iwl_notification_wait *w;
+ iwl_notification_wait_notify(&priv->notif_wait, pkt);
- spin_lock(&priv->shrd->notif_wait_lock);
- list_for_each_entry(w, &priv->shrd->notif_waits, list) {
- if (w->cmd != pkt->hdr.cmd)
- continue;
- IWL_DEBUG_RX(priv,
- "Notif: %s, 0x%02x - wake the callers up\n",
- get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- w->triggered = true;
- if (w->fn)
- w->fn(trans(priv), pkt, w->fn_data);
- }
- spin_unlock(&priv->shrd->notif_wait_lock);
-
- wake_up_all(&priv->shrd->notif_waitq);
- }
-
- if (priv->pre_rx_handler &&
- priv->shrd->ucode_owner == IWL_OWNERSHIP_TM)
- priv->pre_rx_handler(priv, rxb);
- else {
+ /* RX data may be forwarded to userspace (using pre_rx_handler) in one
+ * of two cases: the first, that the user owns the uCode through
+ * testmode - in such case the pre_rx_handler is set and no further
+ * processing takes place. The other case is when the user want to
+ * monitor the rx w/o affecting the regular flow - the pre_rx_handler
+ * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow
+ * continues.
+ * We need to use ACCESS_ONCE to prevent a case where the handler
+ * changes between the check and the call.
+ */
+ pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler);
+ if (pre_rx_handler)
+ pre_rx_handler(priv, rxb);
+ if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
/* Based on type of command response or notification,
* handle those that need handling via function in
* rx_handlers table. See iwl_setup_rx_handlers() */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 73653a6..3690907 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -39,7 +39,7 @@
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -60,13 +60,13 @@
u8 old_dev_type = send->dev_type;
int ret;
- iwl_init_notification_wait(priv->shrd, &disable_wait,
- REPLY_WIPAN_DEACTIVATION_COMPLETE,
- NULL, NULL);
+ iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE,
+ NULL, NULL);
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -74,9 +74,10 @@
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
- iwl_remove_notification(priv->shrd, &disable_wait);
+ iwl_remove_notification(&priv->notif_wait, &disable_wait);
} else {
- ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
+ ret = iwl_wait_notification(&priv->notif_wait,
+ &disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
@@ -92,7 +93,7 @@
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
sizeof(*send), send);
send->filter_flags = old_filter;
@@ -121,7 +122,7 @@
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -131,7 +132,7 @@
static int iwlagn_update_beacon(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
@@ -180,7 +181,7 @@
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
@@ -266,7 +267,7 @@
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -274,8 +275,6 @@
}
memcpy(active, &ctx->staging, sizeof(*active));
- iwl_reprogram_ap_sta(priv, ctx);
-
/* IBSS beacon needs to be sent after setting assoc */
if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
if (iwlagn_update_beacon(priv, ctx->vif))
@@ -315,7 +314,7 @@
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
@@ -362,7 +361,7 @@
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
+ if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
@@ -378,7 +377,7 @@
ctx_pan->beacon_int;
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
}
@@ -387,7 +386,7 @@
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -420,12 +419,9 @@
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- return -EINVAL;
-
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EBUSY;
/* This function hardcodes a bunch of dual-mode assumptions */
@@ -434,10 +430,6 @@
if (!ctx->is_active)
return 0;
- /* override BSSID if necessary due to preauth */
- if (ctx->preauth_bssid)
- memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
-
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
@@ -445,8 +437,7 @@
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
- if (!(cfg(priv)->ht_params &&
- cfg(priv)->ht_params->use_rts_for_aggregation))
+ if (!hw_params(priv).use_rts_for_aggregation)
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -466,7 +457,7 @@
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_channel));
@@ -558,17 +549,14 @@
IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- goto out;
-
- if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
}
- if (!iwl_is_ready(priv->shrd)) {
+ if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
@@ -590,8 +578,6 @@
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- unsigned long flags;
-
ch_info = iwl_get_channel_info(priv, channel->band,
channel->hw_value);
if (!is_channel_valid(ch_info)) {
@@ -600,8 +586,6 @@
goto out;
}
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
for_each_context(priv, ctx) {
/* Configure HT40 channels */
if (ctx->ht.enabled != conf_is_ht(conf))
@@ -636,8 +620,6 @@
ctx->vif);
}
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
iwl_update_bcast_stations(priv);
/*
@@ -668,7 +650,7 @@
iwlagn_commit_rxon(priv, ctx);
}
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -685,7 +667,7 @@
struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -789,7 +771,7 @@
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr,
priv->phy_calib_chain_noise_reset_cmd);
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_PHY_CALIBRATION_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
@@ -810,17 +792,17 @@
int ret;
bool force = false;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (unlikely(!iwl_is_ready(priv->shrd))) {
+ if (unlikely(!iwl_is_ready(priv))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return;
}
if (unlikely(!ctx->vif)) {
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return;
}
@@ -851,12 +833,8 @@
* not get stuck in this case either since it
* can happen if userspace gets confused.
*/
- if (ctx->last_tx_rejected) {
- ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv),
- ctx->ctxid,
- "Disassoc: flush queue");
- }
+ iwlagn_lift_passive_no_rx(priv);
+
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
if (ctx->ctxid == IWL_RXON_CTX_BSS)
@@ -932,7 +910,6 @@
if (!priv->disable_chain_noise_cal)
iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
- WARN_ON(ctx->preauth_bssid);
}
if (changes & BSS_CHANGED_IBSS) {
@@ -950,7 +927,7 @@
IWL_ERR(priv, "Error sending IBSS beacon\n");
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
void iwlagn_post_scan(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index f1298cd..c417560 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -26,7 +26,7 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
-
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "iwl-dev.h"
@@ -34,9 +34,10 @@
#include "iwl-agn.h"
#include "iwl-trans.h"
-/* priv->shrd->sta_lock must be held */
static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
+ lockdep_assert_held(&priv->sta_lock);
+
if (sta_id >= IWLAGN_STATION_COUNT) {
IWL_ERR(priv, "invalid sta_id %u", sta_id);
return -EINVAL;
@@ -63,8 +64,8 @@
struct iwl_addsta_cmd *addsta,
struct iwl_rx_packet *pkt)
{
+ struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
int ret = -EIO;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
@@ -76,9 +77,9 @@
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock(&priv->sta_lock);
- switch (pkt->u.add_sta.status) {
+ switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
ret = iwl_sta_ucode_activate(priv, sta_id);
@@ -97,7 +98,7 @@
break;
default:
IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
+ add_sta_resp->status);
break;
}
@@ -118,12 +119,12 @@
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
return ret;
}
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -153,14 +154,14 @@
might_sleep();
}
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret || (flags & CMD_ASYNC))
return ret;
/*else the command was successfully sent in SYNC mode, need to free
* the reply page */
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
if (cmd.handler_status)
IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
@@ -169,34 +170,38 @@
return cmd.handler_status;
}
-static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
+static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx,
+ __le32 *flags, __le32 *mask)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
u8 mimo_ps_mode;
+ *mask = STA_FLG_RTS_MIMO_PROT_MSK |
+ STA_FLG_MIMO_DIS_MSK |
+ STA_FLG_HT40_EN_MSK |
+ STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENSITY_MSK;
+ *flags = 0;
+
if (!sta || !sta_ht_inf->ht_supported)
- goto done;
+ return;
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+
+ IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
"static" :
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
"dynamic" : "disabled");
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
switch (mimo_ps_mode) {
case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ *flags |= STA_FLG_MIMO_DIS_MSK;
break;
case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ *flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
case WLAN_HT_CAP_SM_PS_DISABLED:
break;
@@ -205,20 +210,53 @@
break;
}
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+ *flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+ *flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
+ *flags |= STA_FLG_HT40_EN_MSK;
+}
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_sta *sta)
+{
+ u8 sta_id = iwl_sta_id(sta);
+ __le32 flags, mask;
+ struct iwl_addsta_cmd cmd;
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].sta.station_flags &= ~mask;
+ priv->stations[sta_id].sta.station_flags |= flags;
+ spin_unlock_bh(&priv->sta_lock);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mode = STA_CONTROL_MODIFY_MSK;
+ cmd.station_flags_msk = mask;
+ cmd.station_flags = flags;
+ cmd.sta.sta_id = sta_id;
+
+ return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
+}
+
+static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ __le32 flags, mask;
+
+ iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+ lockdep_assert_held(&priv->sta_lock);
+ priv->stations[index].sta.station_flags &= ~mask;
+ priv->stations[index].sta.station_flags |= flags;
}
/**
@@ -317,18 +355,17 @@
const u8 *addr, bool is_ap,
struct ieee80211_sta *sta, u8 *sta_id_r)
{
- unsigned long flags_spin;
int ret = 0;
u8 sta_id;
struct iwl_addsta_cmd sta_cmd;
*sta_id_r = 0;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
addr);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
@@ -340,7 +377,7 @@
if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
IWL_DEBUG_INFO(priv, "STA %d already in process of being "
"added.\n", sta_id);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EEXIST;
}
@@ -348,24 +385,24 @@
(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
"adding again.\n", sta_id, addr);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EEXIST;
}
priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
/* Add station to device's station table */
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
if (ret) {
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
priv->stations[sta_id].sta.sta.addr);
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
*sta_id_r = sta_id;
return ret;
@@ -373,11 +410,11 @@
/**
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->shrd->sta_lock must be held
*/
static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
{
+ lockdep_assert_held(&priv->sta_lock);
+
/* Ucode must be active and driver must be non active */
if ((priv->stations[sta_id].used &
(IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
@@ -396,8 +433,6 @@
{
struct iwl_rx_packet *pkt;
int ret;
-
- unsigned long flags_spin;
struct iwl_rem_sta_cmd rm_sta_cmd;
struct iwl_host_cmd cmd = {
@@ -413,12 +448,12 @@
cmd.flags |= CMD_WANT_SKB;
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret)
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
pkt->hdr.flags);
@@ -426,14 +461,13 @@
}
if (!ret) {
- switch (pkt->u.rem_sta.status) {
+ struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
+ switch (rem_sta_resp->status) {
case REM_STA_SUCCESS_MSK:
if (!temporary) {
- spin_lock_irqsave(&priv->shrd->sta_lock,
- flags_spin);
+ spin_lock_bh(&priv->sta_lock);
iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->shrd->sta_lock,
- flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
@@ -443,7 +477,7 @@
break;
}
}
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
return ret;
}
@@ -454,10 +488,9 @@
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr)
{
- unsigned long flags;
u8 tid;
- if (!iwl_is_ready(priv->shrd)) {
+ if (!iwl_is_ready(priv)) {
IWL_DEBUG_INFO(priv,
"Unable to remove station %pM, device not ready.\n",
addr);
@@ -475,7 +508,7 @@
if (WARN_ON(sta_id == IWL_INVALID_STATION))
return -EINVAL;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
@@ -505,14 +538,49 @@
if (WARN_ON(priv->num_stations < 0))
priv->num_stations = 0;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_remove_station(priv, addr, sta_id, false);
out_err:
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ u8 tid;
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id);
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return;
+
+ spin_lock_bh(&priv->sta_lock);
+
+ WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE));
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ if (WARN_ON_ONCE(priv->num_stations < 0))
+ priv->num_stations = 0;
+
+ spin_unlock_bh(&priv->sta_lock);
+}
+
/**
* iwl_clear_ucode_stations - clear ucode station table bits
*
@@ -525,12 +593,11 @@
struct iwl_rxon_context *ctx)
{
int i;
- unsigned long flags_spin;
bool cleared = false;
IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
if (ctx && ctx->ctxid != priv->stations[i].ctxid)
continue;
@@ -542,7 +609,7 @@
cleared = true;
}
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
if (!cleared)
IWL_DEBUG_INFO(priv,
@@ -561,20 +628,19 @@
{
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
int i;
bool found = false;
int ret;
bool send_lq;
- if (!iwl_is_ready(priv->shrd)) {
+ if (!iwl_is_ready(priv)) {
IWL_DEBUG_INFO(priv,
"Not ready yet, not restoring any stations.\n");
return;
}
IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
if (ctx->ctxid != priv->stations[i].ctxid)
continue;
@@ -594,27 +660,24 @@
sizeof(struct iwl_addsta_cmd));
send_lq = false;
if (priv->stations[i].lq) {
- if (priv->shrd->wowlan)
+ if (priv->wowlan)
iwl_sta_fill_lq(priv, ctx, i, &lq);
else
memcpy(&lq, priv->stations[i].lq,
sizeof(struct iwl_link_quality_cmd));
send_lq = true;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock,
- flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
if (ret) {
- spin_lock_irqsave(&priv->shrd->sta_lock,
- flags_spin);
+ spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
priv->stations[i].sta.sta.addr);
priv->stations[i].used &=
~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &=
~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->shrd->sta_lock,
- flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
/*
* Rate scaling has already been initialized, send
@@ -623,12 +686,12 @@
if (send_lq)
iwl_send_lq_cmd(priv, ctx, &lq,
CMD_SYNC, true);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
}
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
if (!found)
IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
"no stations to be restored.\n");
@@ -637,52 +700,6 @@
"complete.\n");
}
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- int sta_id = ctx->ap_sta_id;
- int ret;
- struct iwl_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- bool active, have_lq = false;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
- return;
- }
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- sta_cmd.mode = 0;
- if (priv->stations[sta_id].lq) {
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
- have_lq = true;
- }
-
- active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
- if (active) {
- ret = iwl_send_remove_station(
- priv, priv->stations[sta_id].sta.sta.addr,
- sta_id, true);
- if (ret)
- IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- }
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret)
- IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- if (have_lq)
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
-}
-
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
{
int i;
@@ -696,10 +713,9 @@
void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
{
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
if (!(priv->stations[i].used & IWL_STA_BCAST))
continue;
@@ -711,7 +727,7 @@
kfree(priv->stations[i].lq);
priv->stations[i].lq = NULL;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -783,8 +799,6 @@
struct iwl_link_quality_cmd *lq, u8 flags, bool init)
{
int ret = 0;
- unsigned long flags_spin;
-
struct iwl_host_cmd cmd = {
.id = REPLY_TX_LINK_QUALITY_CMD,
.len = { sizeof(struct iwl_link_quality_cmd), },
@@ -796,19 +810,19 @@
return -EINVAL;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
iwl_dump_lq_cmd(priv, lq);
if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
return -EINVAL;
if (is_lq_table_valid(priv, ctx, lq))
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
else
ret = -EINVAL;
@@ -819,9 +833,9 @@
IWL_DEBUG_INFO(priv, "init LQ command complete, "
"clearing sta addition status for sta %d\n",
lq->sta_id);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
return ret;
}
@@ -834,7 +848,7 @@
u32 rate_flags = 0;
__le32 rate_n_flags;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
memset(link_cmd, 0, sizeof(*link_cmd));
@@ -906,7 +920,6 @@
int ret;
u8 sta_id;
struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
if (sta_id_r)
*sta_id_r = IWL_INVALID_STATION;
@@ -920,9 +933,9 @@
if (sta_id_r)
*sta_id_r = sta_id;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
/* Set up default rate scaling table in device's station table */
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
@@ -937,9 +950,9 @@
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -994,7 +1007,7 @@
cmd.len[0] = cmd_size;
if (not_empty || send_if_empty)
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
else
return 0;
}
@@ -1002,7 +1015,7 @@
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
return iwl_send_static_wepkey_cmd(priv, ctx, false);
}
@@ -1013,13 +1026,13 @@
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_is_rfkill(priv->shrd)) {
+ if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv,
"Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
@@ -1038,7 +1051,7 @@
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
@@ -1080,32 +1093,19 @@
struct ieee80211_sta *sta)
{
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- u8 sta_id = IWL_INVALID_STATION;
if (sta)
- sta_id = iwl_sta_id(sta);
+ return iwl_sta_id(sta);
/*
* The device expects GTKs for station interfaces to be
* installed as GTKs for the AP station. If we have no
* station ID, then use the ap_sta_id in that case.
*/
- if (!sta && vif && vif_priv->ctx) {
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- sta_id = vif_priv->ctx->ap_sta_id;
- break;
- default:
- /*
- * In all other cases, the key will be
- * used either for TX only or is bound
- * to a station already.
- */
- break;
- }
- }
+ if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx)
+ return vif_priv->ctx->ap_sta_id;
- return sta_id;
+ return IWL_INVALID_STATION;
}
static int iwlagn_send_sta_key(struct iwl_priv *priv,
@@ -1113,14 +1113,13 @@
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
u32 cmd_flags)
{
- unsigned long flags;
__le16 key_flags;
struct iwl_addsta_cmd sta_cmd;
int i;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
@@ -1187,7 +1186,6 @@
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta)
{
- unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
__le16 key_flags;
@@ -1196,16 +1194,16 @@
if (sta_id == IWL_INVALID_STATION)
return -ENOENT;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
sta_id = IWL_INVALID_STATION;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
if (sta_id == IWL_INVALID_STATION)
return 0;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
ctx->key_mapping_keys--;
@@ -1245,7 +1243,7 @@
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
@@ -1300,21 +1298,20 @@
struct iwl_rxon_context *ctx)
{
struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
u8 sta_id;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
@@ -1323,9 +1320,9 @@
return -ENOMEM;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1339,7 +1336,6 @@
int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- unsigned long flags;
struct iwl_link_quality_cmd *link_cmd;
u8 sta_id = ctx->bcast_sta_id;
@@ -1349,13 +1345,13 @@
return -ENOMEM;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
if (priv->stations[sta_id].lq)
kfree(priv->stations[sta_id].lq);
else
IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1379,18 +1375,17 @@
*/
int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
{
- unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -1398,24 +1393,23 @@
int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid, u16 ssn)
{
- unsigned long flags;
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -1423,11 +1417,10 @@
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid)
{
- unsigned long flags;
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -1435,13 +1428,13 @@
return -ENXIO;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -1450,16 +1443,14 @@
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
- unsigned long flags;
+ struct iwl_addsta_cmd cmd = {
+ .mode = STA_CONTROL_MODIFY_MSK,
+ .station_flags = STA_FLG_PWR_SAVE_MSK,
+ .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+ .sta.sta_id = sta_id,
+ .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ };
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask =
- STA_MODIFY_SLEEP_TX_COUNT_MSK;
- priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
+ iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index 56d7c0e..baaf5ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -34,6 +34,7 @@
#include <net/mac80211.h>
+#include "iwl-agn.h"
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
@@ -173,7 +174,7 @@
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (tt->state == IWL_TI_CT_KILL) {
@@ -188,7 +189,7 @@
}
iwl_read32(trans(priv), CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
- if (!iwl_grab_nic_access(trans(priv)))
+ if (likely(iwl_grab_nic_access(trans(priv))))
iwl_release_nic_access(trans(priv));
spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
@@ -224,7 +225,7 @@
struct iwl_priv *priv = (struct iwl_priv *)data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* temperature timer expired, ready to go into CT_KILL state */
@@ -232,7 +233,7 @@
IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
"temperature timer expired\n");
tt->state = IWL_TI_CT_KILL;
- set_bit(STATUS_CT_KILL, &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
}
}
@@ -310,24 +311,23 @@
tt->tt_power_mode = IWL_POWER_INDEX_5;
break;
}
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->shrd->status);
+ clear_bit(STATUS_CT_KILL, &priv->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
* try again during next temperature read
*/
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
tt->state = old_state;
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
} else {
if (tt->state == IWL_TI_CT_KILL) {
if (force) {
- set_bit(STATUS_CT_KILL,
- &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -341,7 +341,7 @@
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
tt->tt_power_mode);
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
}
@@ -451,9 +451,9 @@
* in case get disabled before */
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->shrd->status);
+ clear_bit(STATUS_CT_KILL, &priv->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
@@ -462,7 +462,7 @@
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
tt->state = old_state;
} else {
IWL_DEBUG_TEMP(priv,
@@ -473,8 +473,7 @@
if (force) {
IWL_DEBUG_TEMP(priv,
"Enter IWL_TI_CT_KILL\n");
- set_bit(STATUS_CT_KILL,
- &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -486,7 +485,7 @@
iwl_perform_ct_kill_task(priv, false);
}
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
}
@@ -505,10 +504,10 @@
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!iwl_is_ready(priv->shrd))
+ if (!iwl_is_ready(priv))
return;
if (tt->state != IWL_TI_CT_KILL) {
@@ -534,10 +533,10 @@
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!iwl_is_ready(priv->shrd))
+ if (!iwl_is_ready(priv))
return;
/* stop ct_kill_exit_tm timer */
@@ -564,7 +563,7 @@
void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
@@ -573,7 +572,7 @@
void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
@@ -585,7 +584,7 @@
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (!priv->thermal_throttle.advanced_tt)
@@ -596,7 +595,7 @@
void iwl_tt_handler(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 5f78567..34adedc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -126,7 +126,7 @@
u8 data_retry_limit;
u8 rate_plcp;
- if (priv->shrd->wowlan) {
+ if (priv->wowlan) {
rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
} else {
@@ -208,10 +208,9 @@
}
static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -249,6 +248,35 @@
}
}
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
+
/*
* start REPLY_TX command process
*/
@@ -260,19 +288,16 @@
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_device_cmd *dev_cmd = NULL;
struct iwl_tx_cmd *tx_cmd;
-
__le16 fc;
u8 hdr_len;
u16 len, seq_number = 0;
u8 sta_id, tid = IWL_MAX_TID_COUNT;
- unsigned long flags;
bool is_agg = false;
if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
- spin_lock_irqsave(&priv->shrd->lock, flags);
- if (iwl_is_rfkill(priv->shrd)) {
+ if (iwl_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock_priv;
}
@@ -308,7 +333,7 @@
sta_id = ctx->bcast_sta_id;
else {
/* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
+ sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -342,13 +367,10 @@
if (info->flags & IEEE80211_TX_CTL_AMPDU)
is_agg = true;
- /* irqs already disabled/saved above when locking priv->shrd->lock */
- spin_lock(&priv->shrd->sta_lock);
-
- dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);
+ dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC);
if (unlikely(!dev_cmd))
- goto drop_unlock_sta;
+ goto drop_unlock_priv;
memset(dev_cmd, 0, sizeof(*dev_cmd));
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
@@ -358,7 +380,7 @@
tx_cmd->len = cpu_to_le16(len);
if (info->control.hw_key)
- iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+ iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
/* TODO need this for burst mode later on */
iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
@@ -373,6 +395,8 @@
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
+ spin_lock(&priv->sta_lock);
+
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
u8 *qc = NULL;
struct iwl_tid_data *tid_data;
@@ -418,8 +442,7 @@
!ieee80211_has_morefrags(fc))
priv->tid_data[sta_id][tid].seq_number = seq_number;
- spin_unlock(&priv->shrd->sta_lock);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock(&priv->sta_lock);
/*
* Avoid atomic ops if it isn't an associated client.
@@ -435,10 +458,9 @@
drop_unlock_sta:
if (dev_cmd)
- kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
- spin_unlock(&priv->shrd->sta_lock);
+ kmem_cache_free(iwl_tx_cmd_pool, dev_cmd);
+ spin_unlock(&priv->sta_lock);
drop_unlock_priv:
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
return -1;
}
@@ -446,7 +468,6 @@
struct ieee80211_sta *sta, u16 tid)
{
struct iwl_tid_data *tid_data;
- unsigned long flags;
int sta_id;
sta_id = iwl_sta_id(sta);
@@ -456,7 +477,7 @@
return -ENXIO;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
@@ -476,7 +497,7 @@
IWL_WARN(priv, "Stopping AGG while state not ON "
"or starting for %d on %d (%d)\n", sta_id, tid,
priv->tid_data[sta_id][tid].agg.state);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -490,7 +511,7 @@
tid_data->next_reclaimed);
priv->tid_data[sta_id][tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -499,14 +520,10 @@
turn_off:
priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
- /* do not restore/save irqs */
- spin_unlock(&priv->shrd->sta_lock);
- spin_lock(&priv->shrd->lock);
+ spin_unlock_bh(&priv->sta_lock);
iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
return 0;
@@ -516,7 +533,6 @@
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_tid_data *tid_data;
- unsigned long flags;
int sta_id;
int ret;
@@ -540,7 +556,7 @@
if (ret)
return ret;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
@@ -549,7 +565,7 @@
ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
if (ret) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -566,7 +582,7 @@
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -576,14 +592,13 @@
{
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- unsigned long flags;
u16 ssn;
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
buf_size, ssn);
@@ -608,8 +623,7 @@
sta_priv->max_agg_bufsize =
min(sta_priv->max_agg_bufsize, buf_size);
- if (cfg(priv)->ht_params &&
- cfg(priv)->ht_params->use_rts_for_aggregation) {
+ if (hw_params(priv).use_rts_for_aggregation) {
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
@@ -639,7 +653,7 @@
struct ieee80211_vif *vif;
u8 *addr;
- lockdep_assert_held(&priv->shrd->sta_lock);
+ lockdep_assert_held(&priv->sta_lock);
addr = priv->stations[sta_id].sta.sta.addr;
ctx = priv->stations[sta_id].ctxid;
@@ -986,19 +1000,19 @@
{
if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
queue_work(priv->workqueue, &priv->tx_flush);
}
}
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
- struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
struct ieee80211_hdr *hdr;
u32 status = le16_to_cpu(tx_resp->status.status);
u16 ssn = iwlagn_get_scd_ssn(tx_resp);
@@ -1006,7 +1020,6 @@
int sta_id;
int freed;
struct ieee80211_tx_info *info;
- unsigned long flags;
struct sk_buff_head skbs;
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
@@ -1017,11 +1030,13 @@
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock(&priv->sta_lock);
if (is_agg)
iwl_rx_reply_tx_agg(priv, tx_resp);
+ __skb_queue_head_init(&skbs);
+
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1041,8 +1056,6 @@
next_reclaimed = ssn;
}
- __skb_queue_head_init(&skbs);
-
if (tid != IWL_TID_NON_QOS) {
priv->tid_data[sta_id][tid].next_reclaimed =
next_reclaimed;
@@ -1051,12 +1064,13 @@
}
/*we can free until ssn % q.n_bd not inclusive */
- WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
- ssn, status, &skbs));
+ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid,
+ txq_id, ssn, &skbs));
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
- while (!skb_queue_empty(&skbs)) {
- skb = __skb_dequeue(&skbs);
+
+ /* process frames */
+ skb_queue_walk(&skbs, skb) {
hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data_qos(hdr->frame_control))
@@ -1064,7 +1078,7 @@
info = IEEE80211_SKB_CB(skb);
ctx = info->driver_data[0];
- kmem_cache_free(priv->tx_cmd_pool,
+ kmem_cache_free(iwl_tx_cmd_pool,
(info->driver_data[1]));
memset(&info->status, 0, sizeof(info->status));
@@ -1072,9 +1086,11 @@
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
- ctx->last_tx_rejected = true;
- iwl_trans_stop_queue(trans(priv), txq_id,
- "Tx on passive channel");
+ /* block and stop all queues */
+ priv->passive_no_rx = true;
+ IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
+ "passive channel");
+ ieee80211_stop_queues(priv->hw);
IWL_DEBUG_TX_REPLY(priv,
"TXQ %d status %s (0x%08x) "
@@ -1098,8 +1114,6 @@
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
- ieee80211_tx_status_irqsafe(priv->hw, skb);
-
freed++;
}
@@ -1107,7 +1121,13 @@
}
iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
+
+ while (!skb_queue_empty(&skbs)) {
+ skb = __skb_dequeue(&skbs);
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
return 0;
}
@@ -1118,17 +1138,16 @@
* of frames sent via aggregation.
*/
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
struct iwl_ht_agg *agg;
struct sk_buff_head reclaimed_skbs;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
- unsigned long flags;
int sta_id;
int tid;
int freed;
@@ -1140,7 +1159,7 @@
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
- if (scd_flow >= hw_params(priv).max_txq_num) {
+ if (scd_flow >= cfg(priv)->base_params->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
return 0;
@@ -1150,12 +1169,12 @@
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
return 0;
}
@@ -1165,8 +1184,8 @@
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
- ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ ba_resp_scd_ssn, &reclaimed_skbs)) {
+ spin_unlock(&priv->sta_lock);
return 0;
}
@@ -1202,9 +1221,8 @@
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
- while (!skb_queue_empty(&reclaimed_skbs)) {
- skb = __skb_dequeue(&reclaimed_skbs);
+ skb_queue_walk(&reclaimed_skbs, skb) {
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
@@ -1213,7 +1231,7 @@
WARN_ON_ONCE(1);
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+ kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
if (freed == 1) {
/* this is the first skb we deliver in this batch */
@@ -1227,10 +1245,14 @@
iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
info);
}
-
- ieee80211_tx_status_irqsafe(priv->hw, skb);
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ skb = __skb_dequeue(&reclaimed_skbs);
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e0fef9f..28422c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -41,9 +41,7 @@
#include <asm/div64.h>
-#include "iwl-ucode.h"
#include "iwl-eeprom.h"
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -134,7 +132,7 @@
* beacon contents.
*/
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
@@ -199,7 +197,7 @@
cmd.data[1] = priv->beacon_skb->data;
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -208,7 +206,7 @@
container_of(work, struct iwl_priv, beacon_update);
struct sk_buff *beacon;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "updating beacon w/o beacon context!\n");
goto out;
@@ -238,7 +236,7 @@
iwlagn_send_beacon_cmd(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_bt_runtime_config(struct work_struct *work)
@@ -246,11 +244,11 @@
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
iwlagn_send_advance_bt_config(priv);
}
@@ -261,13 +259,13 @@
container_of(work, struct iwl_priv, bt_full_concurrency);
struct iwl_rxon_context *ctx;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
goto out;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
goto out;
IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
@@ -285,7 +283,7 @@
iwlagn_send_advance_bt_config(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/**
@@ -302,11 +300,11 @@
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
iwl_send_statistics_request(priv, CMD_ASYNC, false);
@@ -329,14 +327,13 @@
/* Make sure device is powered up for SRAM reads */
spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags);
- if (iwl_grab_nic_access(trans(priv))) {
+ if (unlikely(!iwl_grab_nic_access(trans(priv)))) {
spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
return;
}
/* Set starting address; reads will auto-increment */
iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr);
- rmb();
/*
* Refuse to read more than would have fit into the log from
@@ -355,11 +352,12 @@
ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
if (mode == 0) {
- trace_iwlwifi_dev_ucode_cont_event(priv, 0, time, ev);
+ trace_iwlwifi_dev_ucode_cont_event(
+ trans(priv)->dev, 0, time, ev);
} else {
data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
- trace_iwlwifi_dev_ucode_cont_event(priv, time,
- data, ev);
+ trace_iwlwifi_dev_ucode_cont_event(
+ trans(priv)->dev, time, data, ev);
}
}
/* Allow device to power down */
@@ -424,7 +422,7 @@
else
priv->event_log.wraps_once_count++;
- trace_iwlwifi_dev_ucode_wrap_event(priv,
+ trace_iwlwifi_dev_ucode_wrap_event(trans(priv)->dev,
num_wraps - priv->event_log.num_wraps,
next_entry, priv->event_log.next_entry);
@@ -463,7 +461,7 @@
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (priv->event_log.ucode_trace) {
@@ -479,18 +477,18 @@
struct iwl_priv *priv =
container_of(work, struct iwl_priv, tx_flush);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* do nothing if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
-void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
{
int i;
@@ -552,13 +550,11 @@
{
struct iwl_ct_kill_config cmd;
struct iwl_ct_kill_throttling_config adv_cmd;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&priv->shrd->lock, flags);
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
priv->thermal_throttle.ct_kill_toggle = false;
if (cfg(priv)->base_params->support_ct_kill_exit) {
@@ -567,7 +563,7 @@
adv_cmd.critical_temperature_exit =
cpu_to_le32(hw_params(priv).ct_kill_exit_threshold);
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
if (ret)
@@ -582,7 +578,7 @@
cmd.critical_temperature_R =
cpu_to_le32(hw_params(priv).ct_kill_threshold);
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
@@ -608,7 +604,7 @@
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
@@ -618,9 +614,9 @@
.valid = cpu_to_le32(valid_tx_ant),
};
- if (IWL_UCODE_API(nic(priv)->fw.ucode_ver) > 1) {
+ if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_trans_send_cmd_pdu(trans(priv),
+ return iwl_dvm_send_cmd_pdu(priv,
TX_ANT_CONFIGURATION_CMD,
CMD_SYNC,
sizeof(struct iwl_tx_ant_config_cmd),
@@ -644,12 +640,12 @@
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
/* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->shrd->status);
+ set_bit(STATUS_ALIVE, &priv->status);
/* Enable watchdog to monitor the driver tx queues */
iwl_setup_watchdog(priv);
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
return -ERFKILL;
if (priv->event_log.ucode_trace) {
@@ -673,14 +669,14 @@
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
priv->cur_rssi_ctx = NULL;
- iwl_send_prio_tbl(trans(priv));
+ iwl_send_prio_tbl(priv);
/* FIXME: w/a to force change uCode BT state machine */
- ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
+ ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
@@ -701,9 +697,9 @@
priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, hw_params(priv).valid_tx_ant);
- if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
+ if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
struct iwl_rxon_cmd *active_rxon =
(struct iwl_rxon_cmd *)&ctx->active;
/* apply any changes in staging */
@@ -718,12 +714,12 @@
iwlagn_set_rxon_chain(priv, ctx);
}
- if (!priv->shrd->wowlan) {
+ if (!priv->wowlan) {
/* WoWLAN ucode will not reply in the same way, skip it */
iwl_reset_run_time_calib(priv);
}
- set_bit(STATUS_READY, &priv->shrd->status);
+ set_bit(STATUS_READY, &priv->status);
/* Configure the adapter for unassociated operation */
ret = iwlagn_commit_rxon(priv, ctx);
@@ -738,13 +734,47 @@
return iwl_power_update_mode(priv, true);
}
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+
+ spin_lock_bh(&priv->sta_lock);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+
+ priv->ucode_key_table = 0;
+
+ for_each_context(priv, ctx) {
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+ }
+
+ spin_unlock_bh(&priv->sta_lock);
+}
+
void iwl_down(struct iwl_priv *priv)
{
int exit_pending;
IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
iwl_scan_cancel_timeout(priv, 200);
@@ -756,7 +786,7 @@
ieee80211_remain_on_channel_expired(priv->hw);
exit_pending =
- test_and_set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
@@ -781,7 +811,7 @@
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
@@ -789,15 +819,15 @@
iwl_trans_stop_device(trans(priv));
/* Clear out all status bits but a few that are stable across reset */
- priv->shrd->status &=
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status) <<
+ priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) <<
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) <<
+ test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
+ priv->shrd->status &=
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
+ STATUS_FW_ERROR;
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = NULL;
@@ -814,11 +844,11 @@
struct iwl_priv *priv = container_of(work, struct iwl_priv,
run_time_calib_work);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status)) {
- mutex_unlock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ mutex_unlock(&priv->mutex);
return;
}
@@ -827,7 +857,7 @@
iwl_sensitivity_calibration(priv);
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
void iwlagn_prepare_restart(struct iwl_priv *priv)
@@ -839,7 +869,7 @@
u8 bt_status;
bool bt_is_sco;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
for_each_context(priv, ctx)
ctx->vif = NULL;
@@ -873,13 +903,13 @@
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwlagn_prepare_restart(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
iwl_cancel_deferred_work(priv);
ieee80211_restart_hw(priv->hw);
} else {
@@ -894,7 +924,7 @@
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (!priv->hw_roc_setup)
return;
@@ -917,9 +947,9 @@
struct iwl_priv *priv = container_of(work, struct iwl_priv,
hw_roc_disable_work.work);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/*****************************************************************************
@@ -945,8 +975,8 @@
iwl_setup_scan_deferred_work(priv);
- if (cfg(priv)->lib->bt_setup_deferred_work)
- cfg(priv)->lib->bt_setup_deferred_work(priv);
+ if (cfg(priv)->bt_params)
+ iwlagn_bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
@@ -963,8 +993,8 @@
void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (cfg(priv)->lib->cancel_deferred_work)
- cfg(priv)->lib->cancel_deferred_work(priv);
+ if (cfg(priv)->bt_params)
+ iwlagn_bt_cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
@@ -979,8 +1009,7 @@
del_timer_sync(&priv->ucode_trace);
}
-static void iwl_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
+static void iwl_init_hw_rates(struct ieee80211_rate *rates)
{
int i;
@@ -1004,21 +1033,26 @@
{
int ret;
- spin_lock_init(&priv->shrd->sta_lock);
+ spin_lock_init(&priv->sta_lock);
- mutex_init(&priv->shrd->mutex);
+ mutex_init(&priv->mutex);
- INIT_LIST_HEAD(&trans(priv)->calib_results);
+ INIT_LIST_HEAD(&priv->calib_results);
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
+ priv->plcp_delta_threshold =
+ cfg(priv)->base_params->plcp_delta_threshold;
+
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
priv->agg_tids_count = 0;
+ priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
+
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_RF_RESET;
@@ -1054,7 +1088,7 @@
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
}
- iwl_init_hw_rates(priv, priv->ieee_rates);
+ iwl_init_hw_rates(priv->ieee_rates);
return 0;
@@ -1068,11 +1102,10 @@
{
iwl_free_geos(priv);
iwl_free_channel_map(priv);
- if (priv->tx_cmd_pool)
- kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
kfree(rcu_dereference_raw(priv->noa_data));
+ iwl_calib_free_results(priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
kfree(priv->wowlan_sram);
#endif
@@ -1084,6 +1117,10 @@
static void iwl_set_hw_params(struct iwl_priv *priv)
{
+ if (cfg(priv)->ht_params)
+ hw_params(priv).use_rts_for_aggregation =
+ cfg(priv)->ht_params->use_rts_for_aggregation;
+
if (iwlagn_mod_params.amsdu_size_8K)
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_8K);
@@ -1092,13 +1129,10 @@
get_order(IWL_RX_BUF_SIZE_4K);
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
- cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
hw_params(priv).num_ampdu_queues =
cfg(priv)->base_params->num_of_ampdu_queues;
- hw_params(priv).shadow_reg_enable =
- cfg(priv)->base_params->shadow_reg_enable;
- hw_params(priv).sku = cfg(priv)->sku;
hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
/* Device-specific setup */
@@ -1142,15 +1176,16 @@
#endif
}
-static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans)
+static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
+ const struct iwl_fw *fw)
{
- struct iwl_fw *fw = &nic(trans)->fw;
int err = 0;
struct iwl_priv *priv;
struct ieee80211_hw *hw;
struct iwl_op_mode *op_mode;
u16 num_mac;
u32 ucode_flags;
+ struct iwl_trans_config trans_cfg;
/************************
* 1. Allocating HW data
@@ -1167,9 +1202,32 @@
op_mode->ops = &iwl_dvm_ops;
priv = IWL_OP_MODE_GET_DVM(op_mode);
priv->shrd = trans->shrd;
- priv->shrd->priv = priv;
+ priv->fw = fw;
+ /* TODO: remove fw from shared data later */
+ priv->shrd->fw = fw;
- iwl_trans_configure(trans(priv), op_mode);
+ /*
+ * Populate the state variables that the transport layer needs
+ * to know about.
+ */
+ trans_cfg.op_mode = op_mode;
+
+ ucode_flags = fw->ucode_capa.flags;
+
+#ifndef CONFIG_IWLWIFI_P2P
+ ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
+
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
+ priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
+ trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ } else {
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+ trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ }
+
+ /* Configure transport layer */
+ iwl_trans_configure(trans(priv), &trans_cfg);
/* At this point both hw and priv are allocated. */
@@ -1198,10 +1256,10 @@
* we should init now
*/
spin_lock_init(&trans(priv)->reg_lock);
- spin_lock_init(&priv->shrd->lock);
+ spin_lock_init(&priv->statistics.lock);
/***********************
- * 3. Read REV register
+ * 2. Read REV register
***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
cfg(priv)->name, trans(priv)->hw_rev);
@@ -1211,9 +1269,8 @@
goto out_free_traffic_mem;
/*****************
- * 4. Read EEPROM
+ * 3. Read EEPROM
*****************/
- /* Read the EEPROM */
err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev);
/* Reset chip to save power until we load uCode during "up". */
iwl_trans_stop_hw(trans(priv));
@@ -1225,7 +1282,7 @@
if (err)
goto out_free_eeprom;
- err = iwl_eeprom_check_sku(priv);
+ err = iwl_eeprom_init_hw_params(priv);
if (err)
goto out_free_eeprom;
@@ -1243,28 +1300,27 @@
}
/************************
- * 5. Setup HW constants
+ * 4. Setup HW constants
************************/
iwl_set_hw_params(priv);
- ucode_flags = fw->ucode_capa.flags;
-
-#ifndef CONFIG_IWLWIFI_P2P
- ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
-#endif
- if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+ if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
+ IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
-
- /*
- * if not PAN, then don't support P2P -- might be a uCode
- * packaging bug or due to the eeprom check above
- */
- if (!(ucode_flags & IWL_UCODE_TLV_FLAGS_PAN))
+ /*
+ * if not PAN, then don't support P2P -- might be a uCode
+ * packaging bug or due to the eeprom check above
+ */
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+ trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ /* Configure transport layer again*/
+ iwl_trans_configure(trans(priv), &trans_cfg);
+ }
/*******************
- * 6. Setup priv
+ * 5. Setup priv
*******************/
err = iwl_init_drv(priv);
@@ -1273,7 +1329,7 @@
/* At this point both hw and priv are initialized. */
/********************
- * 7. Setup services
+ * 6. Setup services
********************/
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
@@ -1289,14 +1345,6 @@
priv->new_scan_threshold_behaviour =
!!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
- priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- } else {
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
- priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- }
-
priv->phy_calib_chain_noise_reset_cmd =
fw->ucode_capa.standard_phy_calibration_size;
priv->phy_calib_chain_noise_gain_cmd =
@@ -1308,7 +1356,7 @@
/**************************************************
* This is still part of probe() in a sense...
*
- * 9. Setup and register with mac80211 and debugfs
+ * 7. Setup and register with mac80211 and debugfs
**************************************************/
err = iwlagn_mac_setup_register(priv, &fw->ucode_capa);
if (err)
@@ -1340,18 +1388,10 @@
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- wait_for_completion(&nic(priv)->request_firmware_complete);
-
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
- /* ieee80211_unregister_hw call wil cause iwlagn_mac_stop to
- * to be called and iwl_down since we are removing the device
- * we need to set STATUS_EXIT_PENDING bit.
- */
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
iwl_testmode_cleanup(priv);
iwlagn_mac_unregister(priv);
@@ -1360,8 +1400,6 @@
/*This will stop the queues, move the device to low power state */
iwl_trans_stop_device(trans(priv));
- iwl_dealloc_ucode(nic(priv));
-
iwl_eeprom_free(priv->shrd);
/*netif_stop_queue(dev); */
@@ -1381,6 +1419,60 @@
ieee80211_free_hw(priv->hw);
}
+static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ if (!iwl_check_for_ct_kill(priv)) {
+ IWL_ERR(priv, "Restarting adapter queue is full\n");
+ iwl_nic_error(op_mode);
+ }
+}
+
+static void iwl_nic_config(struct iwl_op_mode *op_mode)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ cfg(priv)->lib->nic_config(priv);
+}
+
+static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ set_bit(ac, &priv->transport_queue_stop);
+ ieee80211_stop_queue(priv->hw, ac);
+}
+
+static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ clear_bit(ac, &priv->transport_queue_stop);
+
+ if (!priv->passive_no_rx)
+ ieee80211_wake_queue(priv->hw, ac);
+}
+
+void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
+{
+ int ac;
+
+ if (!priv->passive_no_rx)
+ return;
+
+ for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) {
+ if (!test_bit(ac, &priv->transport_queue_stop)) {
+ IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d");
+ ieee80211_wake_queue(priv->hw, ac);
+ } else {
+ IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d");
+ }
+ }
+
+ priv->passive_no_rx = false;
+}
+
const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@@ -1390,6 +1482,8 @@
.hw_rf_kill = iwl_set_hw_rfkill_state,
.free_skb = iwl_free_skb,
.nic_error = iwl_nic_error,
+ .cmd_queue_full = iwl_cmd_queue_full,
+ .nic_config = iwl_nic_config,
};
/*****************************************************************************
@@ -1397,6 +1491,9 @@
* driver and module entry point
*
*****************************************************************************/
+
+struct kmem_cache *iwl_tx_cmd_pool;
+
static int __init iwl_init(void)
{
@@ -1404,20 +1501,27 @@
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
+ iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
+ sizeof(struct iwl_device_cmd),
+ sizeof(void *), 0, NULL);
+ if (!iwl_tx_cmd_pool)
+ return -ENOMEM;
+
ret = iwlagn_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
+ goto error_rc_register;
}
ret = iwl_pci_register_driver();
-
if (ret)
- goto error_register;
+ goto error_pci_register;
return ret;
-error_register:
+error_pci_register:
iwlagn_rate_control_unregister();
+error_rc_register:
+ kmem_cache_destroy(iwl_tx_cmd_pool);
return ret;
}
@@ -1425,6 +1529,7 @@
{
iwl_pci_unregister_driver();
iwlagn_rate_control_unregister();
+ kmem_cache_destroy(iwl_tx_cmd_pool);
}
module_exit(iwl_exit);
@@ -1438,8 +1543,6 @@
module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index cb484e2..3780a03 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -82,20 +82,26 @@
void iwlagn_prepare_restart(struct iwl_priv *priv);
void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac);
-void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac);
void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac);
void iwl_nic_error(struct iwl_op_mode *op_mode);
+bool iwl_check_for_ct_kill(struct iwl_priv *priv);
+
+void iwlagn_lift_passive_no_rx(struct iwl_priv *priv);
+
/* MAC80211 */
struct ieee80211_hw *iwl_alloc_all(void);
int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwl_ucode_capabilities *capa);
+ const struct iwl_ucode_capabilities *capa);
void iwlagn_mac_unregister(struct iwl_priv *priv);
+/* commands */
+int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+ u32 flags, u16 len, const void *data);
+
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -110,9 +116,18 @@
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_priv *priv);
+int iwl_init_alive_start(struct iwl_priv *priv);
+int iwl_run_init_ucode(struct iwl_priv *priv);
+int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
+ enum iwl_ucode_type ucode_type);
+int iwl_send_calib_results(struct iwl_priv *priv);
+int iwl_calib_set(struct iwl_priv *priv,
+ const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_priv *priv);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
@@ -124,8 +139,7 @@
#ifdef CONFIG_PM_SLEEP
int iwlagn_send_patterns(struct iwl_priv *priv,
struct cfg80211_wowlan *wowlan);
-int iwlagn_suspend(struct iwl_priv *priv,
- struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
#endif
/* rx */
@@ -142,9 +156,9 @@
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
@@ -179,7 +193,7 @@
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
@@ -220,6 +234,8 @@
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
@@ -227,46 +243,12 @@
u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_sta *sta);
-/**
- * iwl_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-}
-
static inline int iwl_sta_id(struct ieee80211_sta *sta)
{
if (WARN_ON(!sta))
@@ -275,37 +257,6 @@
return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
}
-/**
- * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-
int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -355,7 +306,6 @@
}
/* eeprom */
-void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
extern int iwl_alive_start(struct iwl_priv *priv);
@@ -402,4 +352,58 @@
}
#endif
+/* status checks */
+
+static inline int iwl_is_ready(struct iwl_priv *priv)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(STATUS_READY, &priv->status) &&
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
+ !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_is_alive(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_is_rfkill(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_is_ctkill(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_is_ready_rf(struct iwl_priv *priv)
+{
+ if (iwl_is_rfkill(priv))
+ return 0;
+
+ return iwl_is_ready(priv);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill((m))) \
+ IWL_ERR(m, fmt, ##args); \
+ else \
+ __iwl_err(trans(m)->dev, true, \
+ !iwl_have_debug_level(IWL_DL_RADIO), \
+ fmt, ##args); \
+} while (0)
+#else
+#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill((m))) \
+ IWL_ERR(m, fmt, ##args); \
+ else \
+ __iwl_err(trans(m)->dev, true, true, fmt, ##args); \
+} while (0)
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index 1ad14bb..82152311 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -68,46 +68,46 @@
* This file declares the config structures for all devices.
*/
-extern struct iwl_cfg iwl5300_agn_cfg;
-extern struct iwl_cfg iwl5100_agn_cfg;
-extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bgn_cfg;
-extern struct iwl_cfg iwl5100_abg_cfg;
-extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6005_2agn_cfg;
-extern struct iwl_cfg iwl6005_2abg_cfg;
-extern struct iwl_cfg iwl6005_2bg_cfg;
-extern struct iwl_cfg iwl6005_2agn_sff_cfg;
-extern struct iwl_cfg iwl6005_2agn_d_cfg;
-extern struct iwl_cfg iwl6005_2agn_mow1_cfg;
-extern struct iwl_cfg iwl6005_2agn_mow2_cfg;
-extern struct iwl_cfg iwl1030_bgn_cfg;
-extern struct iwl_cfg iwl1030_bg_cfg;
-extern struct iwl_cfg iwl6030_2agn_cfg;
-extern struct iwl_cfg iwl6030_2abg_cfg;
-extern struct iwl_cfg iwl6030_2bgn_cfg;
-extern struct iwl_cfg iwl6030_2bg_cfg;
-extern struct iwl_cfg iwl6000i_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2abg_cfg;
-extern struct iwl_cfg iwl6000i_2bg_cfg;
-extern struct iwl_cfg iwl6000_3agn_cfg;
-extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl6150_bgn_cfg;
-extern struct iwl_cfg iwl6150_bg_cfg;
-extern struct iwl_cfg iwl1000_bgn_cfg;
-extern struct iwl_cfg iwl1000_bg_cfg;
-extern struct iwl_cfg iwl100_bgn_cfg;
-extern struct iwl_cfg iwl100_bg_cfg;
-extern struct iwl_cfg iwl130_bgn_cfg;
-extern struct iwl_cfg iwl130_bg_cfg;
-extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bgn_d_cfg;
-extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl105_bgn_cfg;
-extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_cfg iwl5300_agn_cfg;
+extern const struct iwl_cfg iwl5100_agn_cfg;
+extern const struct iwl_cfg iwl5350_agn_cfg;
+extern const struct iwl_cfg iwl5100_bgn_cfg;
+extern const struct iwl_cfg iwl5100_abg_cfg;
+extern const struct iwl_cfg iwl5150_agn_cfg;
+extern const struct iwl_cfg iwl5150_abg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_cfg;
+extern const struct iwl_cfg iwl6005_2abg_cfg;
+extern const struct iwl_cfg iwl6005_2bg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern const struct iwl_cfg iwl6005_2agn_d_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
+extern const struct iwl_cfg iwl1030_bgn_cfg;
+extern const struct iwl_cfg iwl1030_bg_cfg;
+extern const struct iwl_cfg iwl6030_2agn_cfg;
+extern const struct iwl_cfg iwl6030_2abg_cfg;
+extern const struct iwl_cfg iwl6030_2bgn_cfg;
+extern const struct iwl_cfg iwl6030_2bg_cfg;
+extern const struct iwl_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_cfg iwl6000i_2abg_cfg;
+extern const struct iwl_cfg iwl6000i_2bg_cfg;
+extern const struct iwl_cfg iwl6000_3agn_cfg;
+extern const struct iwl_cfg iwl6050_2agn_cfg;
+extern const struct iwl_cfg iwl6050_2abg_cfg;
+extern const struct iwl_cfg iwl6150_bgn_cfg;
+extern const struct iwl_cfg iwl6150_bg_cfg;
+extern const struct iwl_cfg iwl1000_bgn_cfg;
+extern const struct iwl_cfg iwl1000_bg_cfg;
+extern const struct iwl_cfg iwl100_bgn_cfg;
+extern const struct iwl_cfg iwl100_bg_cfg;
+extern const struct iwl_cfg iwl130_bgn_cfg;
+extern const struct iwl_cfg iwl130_bg_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern const struct iwl_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl105_bgn_cfg;
+extern const struct iwl_cfg iwl105_bgn_d_cfg;
+extern const struct iwl_cfg iwl135_bgn_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index c20618d..9ed73e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -69,22 +69,9 @@
#ifndef __iwl_commands_h__
#define __iwl_commands_h__
-#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
+#include <linux/types.h>
-struct iwl_priv;
-
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
-
-
-/* Tx rates */
-#define IWL_CCK_RATES 4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
enum {
REPLY_ALIVE = 0x1,
@@ -213,48 +200,6 @@
/* iwl_cmd_header flags value */
#define IWL_CMD_FAILED_MSK 0x40
-#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
-#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
-#define SEQ_RX_FRAME cpu_to_le16(0x8000)
-
-/**
- * struct iwl_cmd_header
- *
- * This header format appears in the beginning of each command sent from the
- * driver, and each response/notification received from uCode.
- */
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
- /*
- * The driver sets up the sequence number to values of its choosing.
- * uCode does not use this value, but passes it back to the driver
- * when sending the response to each driver-originated command, so
- * the driver can match the response to the command. Since the values
- * don't get used by uCode, the driver may set up an arbitrary format.
- *
- * There is one exception: uCode sets bit 15 when it originates
- * the response/notification, i.e. when the response/notification
- * is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_RX when it sends a received frame
- * to the driver; it is not a direct response to any driver command.
- *
- * The Linux driver uses the following format:
- *
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13:14 reserved
- * 15 unsolicited RX or uCode-originated notification
- */
- __le16 sequence;
-
- /* command or response/notification data follows immediately */
- u8 data[0];
-} __packed;
-
-
/**
* iwlagn rate_n_flags bit fields
*
@@ -3151,8 +3096,6 @@
*/
/* Phy calibration command for series */
-/* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
IWL_PHY_CALIBRATE_DC_CMD = 8,
IWL_PHY_CALIBRATE_LO_CMD = 9,
@@ -3161,11 +3104,8 @@
IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18,
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
};
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-
/* This enum defines the bitmap of various calibrations to enable in both
* init ucode and runtime ucode through CALIBRATION_CFG_CMD.
*/
@@ -3905,50 +3845,6 @@
__le64 replay_ctr;
} __packed;
-/******************************************************************************
- * (13)
- * Union of all expected notifications/responses:
- *
- *****************************************************************************/
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
-struct iwl_rx_packet {
- /*
- * The first 4 bytes of the RX frame header contain both the RX frame
- * size and some flags.
- * Bit fields:
- * 31: flag flush RB request
- * 30: flag ignore TC (terminal counter) request
- * 29: flag fast IRQ request
- * 28-14: Reserved
- * 13-00: RX frame size
- */
- __le32 len_n_flags;
- struct iwl_cmd_header hdr;
- union {
- struct iwl_alive_resp alive_frame;
- struct iwl_spectrum_notification spectrum_notif;
- struct iwl_csa_notification csa_notif;
- struct iwl_error_resp err_resp;
- struct iwl_card_state_notif card_state_notif;
- struct iwl_add_sta_resp add_sta;
- struct iwl_rem_sta_resp rem_sta;
- struct iwl_sleep_notification sleep_notif;
- struct iwl_spectrum_resp spectrum;
- struct iwl_notif_statistics stats;
- struct iwl_bt_notif_statistics stats_bt;
- struct iwl_compressed_ba_resp compressed_ba;
- struct iwl_missed_beacon_notif missed_beacon;
- struct iwl_coex_medium_notification coex_medium_notif;
- struct iwl_coex_event_resp coex_event;
- struct iwl_bt_coex_profile_notif bt_coex_profile_notif;
- __le32 status;
- u8 raw[0];
- } u;
-} __packed;
-
-int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
-
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 275e089..8b85940 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -41,7 +41,6 @@
#include "iwl-shared.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-#include "iwl-wifi.h"
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -114,7 +113,7 @@
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
return 0;
}
@@ -137,7 +136,7 @@
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
@@ -147,7 +146,7 @@
sband->bitrates = rates;
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
@@ -202,18 +201,18 @@
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+ hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n",
trans(priv)->hw_id_str);
- cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+ hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
- set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
return 0;
}
@@ -225,7 +224,7 @@
{
kfree(priv->ieee_channels);
kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
+ clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
}
static bool iwl_is_channel_extension(struct iwl_priv *priv,
@@ -317,7 +316,7 @@
conf = &priv->hw->conf;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
@@ -370,7 +369,7 @@
le32_to_cpu(ctx->timing.beacon_init_val),
le16_to_cpu(ctx->timing.atim_window));
- return iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_timing_cmd,
+ return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
}
@@ -799,11 +798,10 @@
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
- &priv->shrd->status))
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
}
@@ -836,7 +834,7 @@
unsigned long reload_jiffies;
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv->shrd) & IWL_DL_FW_ERRORS)
+ if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
#endif
@@ -846,11 +844,11 @@
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
- iwl_abort_notification_waits(priv->shrd);
+ iwl_abort_notification_waits(&priv->notif_wait);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
- clear_bit(STATUS_READY, &priv->shrd->status);
+ clear_bit(STATUS_READY, &priv->status);
wake_up(&priv->shrd->wait_command_queue);
@@ -875,7 +873,7 @@
priv->reload_count = 0;
}
- if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
if (iwlagn_mod_params.restart_fw) {
IWL_DEBUG_FW_ERRORS(priv,
"Restarting adapter due to uCode error.\n");
@@ -893,7 +891,7 @@
bool defer;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (priv->tx_power_user_lmt == tx_power && !force)
return 0;
@@ -913,7 +911,7 @@
return -EINVAL;
}
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return -EIO;
/* scan complete and commit_rxon use tx_power_next value,
@@ -921,7 +919,7 @@
priv->tx_power_next = tx_power;
/* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
if (defer && !force) {
IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
@@ -959,7 +957,7 @@
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
+ if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -972,12 +970,12 @@
};
if (flags & CMD_ASYNC)
- return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
CMD_ASYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
else
- return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
CMD_SYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
@@ -1004,7 +1002,7 @@
{
u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
- if (iwl_get_debug_level(priv->shrd) & IWL_DL_TX) {
+ if (iwl_have_debug_level(IWL_DL_TX)) {
if (!priv->tx_traffic) {
priv->tx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1012,7 +1010,7 @@
return -ENOMEM;
}
}
- if (iwl_get_debug_level(priv->shrd) & IWL_DL_RX) {
+ if (iwl_have_debug_level(IWL_DL_RX)) {
if (!priv->rx_traffic) {
priv->rx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1039,7 +1037,7 @@
__le16 fc;
u16 len;
- if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_TX)))
+ if (likely(!iwl_have_debug_level(IWL_DL_TX)))
return;
if (!priv->tx_traffic)
@@ -1063,7 +1061,7 @@
__le16 fc;
u16 len;
- if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_RX)))
+ if (likely(!iwl_have_debug_level(IWL_DL_RX)))
return;
if (!priv->rx_traffic)
@@ -1220,7 +1218,7 @@
static void iwl_force_rf_reset(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (!iwl_is_any_associated(priv)) {
@@ -1245,7 +1243,7 @@
{
struct iwl_force_reset *force_reset;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
if (mode >= IWL_MAX_FORCE_RESET) {
@@ -1301,7 +1299,7 @@
.flags = CMD_SYNC,
};
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret)
IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
else
@@ -1335,30 +1333,20 @@
int cnt;
unsigned long timeout;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
return;
- timeout = cfg(priv)->base_params->wd_timeout;
+ timeout = hw_params(priv).wd_timeout;
if (timeout == 0)
return;
- /* monitor and check for stuck cmd queue */
- if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
- return;
-
- /* monitor and check for other stuck queues */
- if (iwl_is_any_associated(priv)) {
- for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == priv->shrd->cmd_queue)
- continue;
- if (iwl_check_stuck_queue(priv, cnt))
- return;
- }
- }
+ /* monitor and check for stuck queues */
+ for (cnt = 0; cnt < cfg(priv)->base_params->num_of_queues; cnt++)
+ if (iwl_check_stuck_queue(priv, cnt))
+ return;
mod_timer(&priv->watchdog, jiffies +
msecs_to_jiffies(IWL_WD_TICK(timeout)));
@@ -1366,7 +1354,7 @@
void iwl_setup_watchdog(struct iwl_priv *priv)
{
- unsigned int timeout = cfg(priv)->base_params->wd_timeout;
+ unsigned int timeout = hw_params(priv).wd_timeout;
if (!iwlagn_mod_params.wd_disable) {
/* use system default */
@@ -1471,34 +1459,19 @@
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
-}
+ if (state)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
-void iwl_nic_config(struct iwl_priv *priv)
-{
- cfg(priv)->lib->nic_config(priv);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
}
void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info;
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+ kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
dev_kfree_skb_any(skb);
}
-
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
- ieee80211_wake_queue(priv->hw, ac);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 42630f5e..635eb68 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -77,12 +77,6 @@
struct iwl_lib_ops {
/* set hw dependent parameters */
void (*set_hw_params)(struct iwl_priv *priv);
- /* setup BT Rx handler */
- void (*bt_rx_handler_setup)(struct iwl_priv *priv);
- /* setup BT related deferred work */
- void (*bt_setup_deferred_work)(struct iwl_priv *priv);
- /* cancel deferred work */
- void (*cancel_deferred_work)(struct iwl_priv *priv);
int (*set_channel_switch)(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch);
/* device specific configuration */
@@ -95,72 +89,6 @@
void (*temperature)(struct iwl_priv *priv);
};
-/*
- * @max_ll_items: max number of OTP blocks
- * @shadow_ram_support: shadow support for OTP memory
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @adv_thermal_throttle: support advance thermal throttle
- * @support_ct_kill_exit: support ct kill exit condition
- * @support_wimax_coexist: support wimax/wifi co-exist
- * @plcp_delta_threshold: plcp error rate threshold used to trigger
- * radio tuning when there is a high receiving plcp error rate
- * @chain_noise_scale: default chain noise scale used for gain computation
- * @wd_timeout: TX queues watchdog timeout
- * @max_event_log_size: size of event log buffer size for ucode event logging
- * @shadow_reg_enable: HW shadhow register bit
- * @no_idle_support: do not support idle mode
- * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
- * wd_disable: disable watchdog timer
- */
-struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues;/* def: HW dependent */
- /* for iwl_apm_init() */
- u32 pll_cfg_val;
-
- const u16 max_ll_items;
- const bool shadow_ram_support;
- u16 led_compensation;
- bool adv_thermal_throttle;
- bool support_ct_kill_exit;
- const bool support_wimax_coexist;
- u8 plcp_delta_threshold;
- s32 chain_noise_scale;
- unsigned int wd_timeout;
- u32 max_event_log_size;
- const bool shadow_reg_enable;
- const bool no_idle_support;
- const bool hd_v2;
- const bool wd_disable;
-};
-/*
- * @advanced_bt_coexist: support advanced bt coexist
- * @bt_init_traffic_load: specify initial bt traffic load
- * @bt_prio_boost: default bt priority boost value
- * @agg_time_limit: maximum number of uSec in aggregation
- * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
- */
-struct iwl_bt_params {
- bool advanced_bt_coexist;
- u8 bt_init_traffic_load;
- u8 bt_prio_boost;
- u16 agg_time_limit;
- bool bt_sco_disable;
- bool bt_session_2;
-};
-/*
- * @use_rts_for_aggregation: use rts/cts protection for HT traffic
- */
-struct iwl_ht_params {
- const bool ht_greenfield_support; /* if used set to true */
- bool use_rts_for_aggregation;
- enum ieee80211_smps_mode smps_mode;
-};
-
/***************************
* L i b *
***************************/
@@ -244,8 +172,6 @@
void iwl_force_scan_end(struct iwl_priv *priv);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
-u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
@@ -265,6 +191,10 @@
#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
+/* traffic log definitions */
+#define IWL_TRAFFIC_ENTRIES (256)
+#define IWL_TRAFFIC_ENTRY_SIZE (64)
+
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 4bc2e70..059efab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -104,7 +104,7 @@
}
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
-void __iwl_dbg(struct iwl_shared *shared, struct device *dev,
+void __iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...)
{
@@ -116,7 +116,7 @@
va_start(args, fmt);
vaf.va = &args;
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(shared) & level &&
+ if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
function, &vaf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 01b2330..a6b32a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,12 +47,12 @@
#define IWL_CRIT(m, f, a...) __iwl_crit(trans(m)->dev, f, ## a)
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
-void __iwl_dbg(struct iwl_shared *shared, struct device *dev,
+void __iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...);
#else
static inline void
-__iwl_dbg(struct iwl_shared *shared, struct device *dev,
+__iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...)
{}
@@ -65,35 +65,19 @@
} while (0)
#define IWL_DEBUG(m, level, fmt, args...) \
- __iwl_dbg((m)->shrd, trans(m)->dev, level, false, __func__, fmt, ##args)
+ __iwl_dbg(trans(m)->dev, level, false, __func__, fmt, ##args)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
- __iwl_dbg((m)->shrd, trans(m)->dev, level, true, __func__, fmt, ##args)
+ __iwl_dbg(trans(m)->dev, level, true, __func__, fmt, ##args)
#ifdef CONFIG_IWLWIFI_DEBUG
#define iwl_print_hex_dump(m, level, p, len) \
do { \
- if (iwl_get_debug_level((m)->shrd) & level) \
+ if (iwl_have_debug_level(level)) \
print_hex_dump(KERN_DEBUG, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
-do { \
- if (!iwl_is_rfkill((m)->shrd)) \
- IWL_ERR(m, fmt, ##args); \
- else \
- __iwl_err(trans(m)->dev, true, \
- !(iwl_get_debug_level((m)->shrd) & IWL_DL_RADIO),\
- fmt, ##args); \
-} while (0)
#else
#define iwl_print_hex_dump(m, level, p, len)
-#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
-do { \
- if (!iwl_is_rfkill((m)->shrd)) \
- IWL_ERR(m, fmt, ##args); \
- else \
- __iwl_err(trans(m)->dev, true, true, fmt, ##args); \
-} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ad74138e..9b71c87 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -40,7 +40,6 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-agn.h"
-#include "iwl-wifi.h"
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -235,12 +234,11 @@
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
- struct iwl_nic *nic = nic(priv);
priv->dbgfs_sram_offset = 0x800000;
- if (nic->shrd->ucode_type == IWL_UCODE_INIT)
- priv->dbgfs_sram_len = nic->fw.ucode_init.data.len;
+ if (priv->shrd->ucode_type == IWL_UCODE_INIT)
+ priv->dbgfs_sram_len = priv->fw->ucode_init.data.len;
else
- priv->dbgfs_sram_len = nic->fw.ucode_rt.data.len;
+ priv->dbgfs_sram_len = priv->fw->ucode_rt.data.len;
}
len = priv->dbgfs_sram_len;
@@ -343,7 +341,7 @@
return simple_read_from_buffer(user_buf, count, ppos,
priv->wowlan_sram,
- nic(priv)->fw.ucode_wowlan.data.len);
+ priv->fw->ucode_wowlan.data.len);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -456,7 +454,7 @@
char *buf;
ssize_t ret;
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status))
+ if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -527,32 +525,26 @@
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->shrd->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->shrd->status));
+ test_bit(STATUS_CT_KILL, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->shrd->status));
+ test_bit(STATUS_ALIVE, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->shrd->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->shrd->status));
+ test_bit(STATUS_READY, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status));
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->shrd->status));
+ test_bit(STATUS_EXIT_PENDING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->shrd->status));
+ test_bit(STATUS_STATISTICS, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->shrd->status));
+ test_bit(STATUS_SCANNING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status));
+ test_bit(STATUS_SCAN_ABORTING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->shrd->status));
+ test_bit(STATUS_SCAN_HW, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
test_bit(STATUS_POWER_PMI, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
@@ -758,14 +750,14 @@
if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
return -EINVAL;
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return -EAGAIN;
priv->power_data.debug_sleep_level_override = value;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_power_update_mode(priv, true);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -836,7 +828,7 @@
char *buf;
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (hw_params(priv).max_txq_num * 32 * 8) + 400;
+ (cfg(priv)->base_params->num_of_queues * 32 * 8) + 400;
const u8 *ptr;
ssize_t ret;
@@ -845,8 +837,7 @@
IWL_ERR(priv, "Can not allocate buffer\n");
return -ENOMEM;
}
- if (priv->tx_traffic &&
- (iwl_get_debug_level(priv->shrd) & IWL_DL_TX)) {
+ if (priv->tx_traffic && iwl_have_debug_level(IWL_DL_TX)) {
ptr = priv->tx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
"Tx Traffic idx: %u\n", priv->tx_traffic_idx);
@@ -864,8 +855,7 @@
}
}
- if (priv->rx_traffic &&
- (iwl_get_debug_level(priv->shrd) & IWL_DL_RX)) {
+ if (priv->rx_traffic && iwl_have_debug_level(IWL_DL_RX)) {
ptr = priv->rx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
"Rx Traffic idx: %u\n", priv->rx_traffic_idx);
@@ -920,6 +910,8 @@
int p = 0;
u32 flag;
+ lockdep_assert_held(&priv->statistics.lock);
+
flag = le32_to_cpu(priv->statistics.flag);
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
@@ -953,7 +945,7 @@
struct statistics_rx_non_phy *delta_general, *max_general;
struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -967,6 +959,7 @@
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+ spin_lock_bh(&priv->statistics.lock);
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
general = &priv->statistics.rx_non_phy;
@@ -1363,6 +1356,8 @@
accum_ht->unsupport_mcs,
delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1379,7 +1374,7 @@
ssize_t ret;
struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1392,6 +1387,8 @@
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+ spin_lock_bh(&priv->statistics.lock);
+
tx = &priv->statistics.tx;
accum_tx = &priv->accum_stats.tx;
delta_tx = &priv->delta_stats.tx;
@@ -1541,19 +1538,25 @@
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ if ((hw_params(priv).valid_tx_ant & ANT_A) &&
+ tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ if ((hw_params(priv).valid_tx_ant & ANT_B) &&
+ tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ if ((hw_params(priv).valid_tx_ant & ANT_C) &&
+ tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
tx->tx_power.ant_c);
}
+
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1573,7 +1576,7 @@
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct statistics_div *div, *accum_div, *delta_div, *max_div;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1586,6 +1589,9 @@
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+
+ spin_lock_bh(&priv->statistics.lock);
+
general = &priv->statistics.common;
dbg = &priv->statistics.common.dbg;
div = &priv->statistics.common.div;
@@ -1670,6 +1676,9 @@
accum_general->num_of_sos_states,
delta_general->num_of_sos_states,
max_general->num_of_sos_states);
+
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1686,16 +1695,16 @@
ssize_t ret;
struct statistics_bt_activity *bt, *accum_bt;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
if (!priv->bt_enable_flag)
return -EINVAL;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
if (ret) {
IWL_ERR(priv,
@@ -1713,6 +1722,9 @@
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+
+ spin_lock_bh(&priv->statistics.lock);
+
bt = &priv->statistics.bt_activity;
accum_bt = &priv->accum_stats.bt_activity;
@@ -1758,6 +1770,8 @@
le32_to_cpu(priv->statistics.num_bt_kills),
priv->statistics.accum_num_bt_kills);
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1774,7 +1788,7 @@
(sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
ssize_t ret;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2086,9 +2100,9 @@
return -EFAULT;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -2132,7 +2146,7 @@
if (trace) {
priv->event_log.ucode_trace = true;
- if (iwl_is_alive(priv->shrd)) {
+ if (iwl_is_alive(priv)) {
/* start collecting data now */
mod_timer(&priv->ucode_trace, jiffies);
}
@@ -2221,7 +2235,7 @@
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
- cfg(priv)->base_params->plcp_delta_threshold);
+ priv->plcp_delta_threshold);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -2243,10 +2257,10 @@
return -EINVAL;
if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
- cfg(priv)->base_params->plcp_delta_threshold =
+ priv->plcp_delta_threshold =
IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
else
- cfg(priv)->base_params->plcp_delta_threshold = plcp;
+ priv->plcp_delta_threshold = plcp;
return count;
}
@@ -2322,7 +2336,7 @@
if (sscanf(buf, "%d", &flush) != 1)
return -EINVAL;
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
return -EFAULT;
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
@@ -2348,7 +2362,7 @@
if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
timeout = IWL_DEF_WD_TIMEOUT;
- cfg(priv)->base_params->wd_timeout = timeout;
+ hw_params(priv).wd_timeout = timeout;
iwl_setup_watchdog(priv);
return count;
}
@@ -2411,7 +2425,7 @@
if (cfg(priv)->ht_params)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
- (cfg(priv)->ht_params->use_rts_for_aggregation) ?
+ (hw_params(priv).use_rts_for_aggregation) ?
"rts/cts" : "cts-to-self");
else
pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2438,9 +2452,9 @@
if (sscanf(buf, "%d", &rts) != 1)
return -EINVAL;
if (rts)
- cfg(priv)->ht_params->use_rts_for_aggregation = true;
+ hw_params(priv).use_rts_for_aggregation = true;
else
- cfg(priv)->ht_params->use_rts_for_aggregation = false;
+ hw_params(priv).use_rts_for_aggregation = false;
return count;
}
@@ -2486,52 +2500,6 @@
DEBUGFS_READ_FILE_OPS(reply_tx_error);
DEBUGFS_WRITE_FILE_OPS(echo_test);
-#ifdef CONFIG_IWLWIFI_DEBUG
-static ssize_t iwl_dbgfs_debug_level_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_shared *shrd = priv->shrd;
- char buf[11];
- int len;
-
- len = scnprintf(buf, sizeof(buf), "0x%.8x",
- iwl_get_debug_level(shrd));
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_dbgfs_debug_level_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_shared *shrd = priv->shrd;
- char buf[11];
- unsigned long val;
- int ret;
-
- if (count > sizeof(buf))
- return -EINVAL;
-
- memset(buf, 0, sizeof(buf));
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- shrd->dbg_level_dev = val;
- if (iwl_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- return count;
-}
-DEBUGFS_READ_WRITE_FILE_OPS(debug_level);
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
/*
* Create the debugfs files and directories
*
@@ -2596,9 +2564,6 @@
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_DEBUG
- DEBUGFS_ADD_FILE(debug_level, dir_debug, S_IRUSR | S_IWUSR);
-#endif
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
&priv->disable_sens_cal);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6e5bf0a..aa4b3b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -36,11 +36,10 @@
#include <linux/wait.h>
#include <linux/leds.h>
#include <linux/slab.h>
-#include <net/ieee80211_radiotap.h>
+#include <linux/mutex.h>
#include "iwl-eeprom.h"
#include "iwl-csr.h"
-#include "iwl-prph.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
@@ -50,6 +49,7 @@
#include "iwl-trans.h"
#include "iwl-shared.h"
#include "iwl-op-mode.h"
+#include "iwl-notif-wait.h"
struct iwl_tx_queue;
@@ -294,7 +294,6 @@
struct iwl_sensitivity_ranges {
u16 min_nrg_cck;
- u16 max_nrg_cck;
u16 nrg_th_cck;
u16 nrg_th_ofdm;
@@ -670,11 +669,6 @@
bool enabled, is_40mhz;
u8 extension_chan_offset;
} ht;
-
- u8 bssid[ETH_ALEN];
- bool preauth_bssid;
-
- bool last_tx_rejected;
};
enum iwl_scan_type {
@@ -718,29 +712,44 @@
/*data shared among all the driver's layers */
struct iwl_shared *shrd;
+ const struct iwl_fw *fw;
+ unsigned long status;
+
+ spinlock_t sta_lock;
+ struct mutex mutex;
+
+ unsigned long transport_queue_stop;
+ bool passive_no_rx;
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
- struct kmem_cache *tx_cmd_pool;
+
+ struct list_head calib_results;
struct workqueue_struct *workqueue;
enum ieee80211_band band;
void (*pre_rx_handler)(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ struct iwl_rx_cmd_buffer *rxb);
int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+ struct iwl_notif_wait_data notif_wait;
+
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
+#define IWL_OWNERSHIP_DRIVER 0
+#define IWL_OWNERSHIP_TM 1
+ u8 ucode_owner;
+
/* ucode beacon time */
u32 ucode_beacon_time;
int missed_beacon_threshold;
@@ -766,6 +775,8 @@
struct iwl_channel_info *channel_info; /* channel info array */
u8 channel_count; /* # of channels */
+ u8 plcp_delta_threshold;
+
/* thermal calibration */
s32 temperature; /* Celsius */
s32 last_temperature;
@@ -788,6 +799,8 @@
bool new_scan_threshold_behaviour;
+ bool wowlan;
+
/* EEPROM MAC addresses */
struct mac_address addresses[2];
@@ -845,6 +858,7 @@
struct statistics_bt_activity bt_activity;
__le32 num_bt_kills, accum_num_bt_kills;
#endif
+ spinlock_t lock;
} statistics;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
@@ -968,6 +982,7 @@
bool have_rekey_data;
}; /*iwl_priv */
+extern struct kmem_cache *iwl_tx_cmd_pool;
extern struct iwl_mod_params iwlagn_mod_params;
static inline struct iwl_rxon_context *
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 96e6233..06203d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -41,130 +41,134 @@
static inline void trace_ ## name(proto) {}
#endif
-#define PRIV_ENTRY __field(void *, priv)
-#define PRIV_ASSIGN __entry->priv = priv
+#define DEV_ENTRY __string(dev, dev_name(dev))
+#define DEV_ASSIGN __assign_str(dev, dev_name(dev))
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_io
TRACE_EVENT(iwlwifi_dev_ioread32,
- TP_PROTO(void *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val)
+ TP_printk("[%s] read io[%#x] = %#x",
+ __get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite8,
- TP_PROTO(void *priv, u32 offs, u8 val),
- TP_ARGS(priv, offs, val),
+ TP_PROTO(const struct device *dev, u32 offs, u8 val),
+ TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, offs)
__field(u8, val)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
+ TP_printk("[%s] write io[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite32,
- TP_PROTO(void *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
+ TP_printk("[%s] write io[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_irq,
- TP_PROTO(void *priv),
- TP_ARGS(priv),
+ TP_PROTO(const struct device *dev),
+ TP_ARGS(dev),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
),
/* TP_printk("") doesn't compile */
TP_printk("%d", 0)
);
TRACE_EVENT(iwlwifi_dev_ict_read,
- TP_PROTO(void *priv, u32 index, u32 value),
- TP_ARGS(priv, index, value),
+ TP_PROTO(const struct device *dev, u32 index, u32 value),
+ TP_ARGS(dev, index, value),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, index)
__field(u32, value)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->index = index;
__entry->value = value;
),
- TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value)
+ TP_printk("[%s] read ict[%d] = %#.8x",
+ __get_str(dev), __entry->index, __entry->value)
);
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_ucode
TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
- TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
- TP_ARGS(priv, time, data, ev),
+ TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+ TP_ARGS(dev, time, data, ev),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, time)
__field(u32, data)
__field(u32, ev)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->time = time;
__entry->data = data;
__entry->ev = ev;
),
- TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
- __entry->priv, __entry->time, __entry->data, __entry->ev)
+ TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+ __get_str(dev), __entry->time, __entry->data, __entry->ev)
);
TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
- TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
- TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(dev, wraps, n_entry, p_entry),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, wraps)
__field(u32, n_entry)
__field(u32, p_entry)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->wraps = wraps;
__entry->n_entry = n_entry;
__entry->p_entry = p_entry;
),
- TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
- __entry->priv, __entry->wraps, __entry->n_entry,
+ TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X",
+ __get_str(dev), __entry->wraps, __entry->n_entry,
__entry->p_entry)
);
@@ -232,52 +236,52 @@
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(void *priv, u32 flags,
+ TP_PROTO(const struct device *dev, u32 flags,
const void *hcmd0, size_t len0,
const void *hcmd1, size_t len1,
const void *hcmd2, size_t len2),
- TP_ARGS(priv, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
+ TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__dynamic_array(u8, hcmd0, len0)
__dynamic_array(u8, hcmd1, len1)
__dynamic_array(u8, hcmd2, len2)
__field(u32, flags)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
__entry->flags = flags;
),
- TP_printk("[%p] hcmd %#.2x (%ssync)",
- __entry->priv, ((u8 *)__get_dynamic_array(hcmd0))[0],
+ TP_printk("[%s] hcmd %#.2x (%ssync)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(void *priv, void *rxbuf, size_t len),
- TP_ARGS(priv, rxbuf, len),
+ TP_PROTO(const struct device *dev, void *rxbuf, size_t len),
+ TP_ARGS(dev, rxbuf, len),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__dynamic_array(u8, rxbuf, len)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
),
- TP_printk("[%p] RX cmd %#.2x",
- __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+ TP_printk("[%s] RX cmd %#.2x",
+ __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(void *priv, void *tfd, size_t tfdlen,
+ TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
- TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(size_t, framelen)
__dynamic_array(u8, tfd, tfdlen)
@@ -291,29 +295,28 @@
__dynamic_array(u8, buf1, buf1_len)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->framelen = buf0_len + buf1_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
),
- TP_printk("[%p] TX %.2x (%zu bytes)",
- __entry->priv,
- ((u8 *)__get_dynamic_array(buf0))[0],
+ TP_printk("[%s] TX %.2x (%zu bytes)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
__entry->framelen)
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(void *priv, u32 desc, u32 tsf_low,
+ TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
u32 brd_ver),
- TP_ARGS(priv, desc, tsf_low, data1, data2, line,
+ TP_ARGS(dev, desc, tsf_low, data1, data2, line,
blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
gp3, ucode_ver, hw_ver, brd_ver),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, desc)
__field(u32, tsf_low)
__field(u32, data1)
@@ -332,7 +335,7 @@
__field(u32, brd_ver)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->desc = desc;
__entry->tsf_low = tsf_low;
__entry->data1 = data1;
@@ -350,11 +353,11 @@
__entry->hw_ver = hw_ver;
__entry->brd_ver = brd_ver;
),
- TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+ TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
"blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
"bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
"hw 0x%08X brd 0x%08X",
- __entry->priv, __entry->desc, __entry->tsf_low,
+ __get_str(dev), __entry->desc, __entry->tsf_low,
__entry->data1,
__entry->data2, __entry->line, __entry->blink1,
__entry->blink2, __entry->ilink1, __entry->ilink2,
@@ -364,23 +367,23 @@
);
TRACE_EVENT(iwlwifi_dev_ucode_event,
- TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
- TP_ARGS(priv, time, data, ev),
+ TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+ TP_ARGS(dev, time, data, ev),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, time)
__field(u32, data)
__field(u32, ev)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->time = time;
__entry->data = data;
__entry->ev = ev;
),
- TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
- __entry->priv, __entry->time, __entry->data, __entry->ev)
+ TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+ __get_str(dev), __entry->time, __entry->data, __entry->ev)
);
#endif /* __IWLWIFI_DEVICE_TRACE */
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 8ff5256..29a3ae4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -61,33 +61,686 @@
*
*****************************************************************************/
#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
-#include "iwl-wifi.h"
+#include "iwl-shared.h"
#include "iwl-op-mode.h"
-int iwl_drv_start(struct iwl_shared *shrd,
- struct iwl_trans *trans, struct iwl_cfg *cfg)
+/* private includes */
+#include "iwl-fw-file.h"
+
+/**
+ * struct iwl_drv - drv common data
+ * @fw: the iwl_fw structure
+ * @shrd: pointer to common shared structure
+ * @op_mode: the running op_mode
+ * @fw_index: firmware revision to try loading
+ * @firmware_name: composite filename of ucode file to load
+ * @request_firmware_complete: the firmware has been obtained from user space
+ */
+struct iwl_drv {
+ struct iwl_fw fw;
+
+ struct iwl_shared *shrd;
+ struct iwl_op_mode *op_mode;
+
+ int fw_index; /* firmware we're trying to load */
+ char firmware_name[25]; /* name of firmware file to load */
+
+ struct completion request_firmware_complete;
+};
+
+
+
+static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
{
+ if (desc->v_addr)
+ dma_free_coherent(trans(drv)->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
+{
+ iwl_free_fw_desc(drv, &img->code);
+ iwl_free_fw_desc(drv, &img->data);
+}
+
+static void iwl_dealloc_ucode(struct iwl_drv *drv)
+{
+ iwl_free_fw_img(drv, &drv->fw.ucode_rt);
+ iwl_free_fw_img(drv, &drv->fw.ucode_init);
+ iwl_free_fw_img(drv, &drv->fw.ucode_wowlan);
+}
+
+static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
+ const void *data, size_t len)
+{
+ if (!len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(trans(drv)->dev, len,
+ &desc->p_addr, GFP_KERNEL);
+ if (!desc->v_addr)
+ return -ENOMEM;
+
+ desc->len = len;
+ memcpy(desc->v_addr, data, len);
+ return 0;
+}
+
+static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+
+#define UCODE_EXPERIMENTAL_INDEX 100
+#define UCODE_EXPERIMENTAL_TAG "exp"
+
+static int iwl_request_firmware(struct iwl_drv *drv, bool first)
+{
+ const struct iwl_cfg *cfg = cfg(drv);
+ const char *name_pre = cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
+ drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
+ strcpy(tag, UCODE_EXPERIMENTAL_TAG);
+ } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
+#endif
+ drv->fw_index = cfg->ucode_api_max;
+ sprintf(tag, "%d", drv->fw_index);
+ } else {
+ drv->fw_index--;
+ sprintf(tag, "%d", drv->fw_index);
+ }
+
+ if (drv->fw_index < cfg->ucode_api_min) {
+ IWL_ERR(drv, "no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
+ (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
+ ? "EXPERIMENTAL " : "",
+ drv->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
+ trans(drv)->dev,
+ GFP_KERNEL, drv, iwl_ucode_callback);
+}
+
+struct iwlagn_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
+ size_t inst_size, data_size, init_size, init_data_size,
+ wowlan_inst_size, wowlan_data_size;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+};
+
+static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size, build;
+ char buildstr[25];
+ const u8 *src;
+
+ drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+
+ switch (api_ver) {
+ default:
+ hdr_size = 28;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(drv, "File size too small!\n");
+ return -EINVAL;
+ }
+ build = le32_to_cpu(ucode->u.v2.build);
+ pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
+ pieces->init_data_size =
+ le32_to_cpu(ucode->u.v2.init_data_size);
+ src = ucode->u.v2.data;
+ break;
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(drv, "File size too small!\n");
+ return -EINVAL;
+ }
+ build = 0;
+ pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
+ pieces->init_data_size =
+ le32_to_cpu(ucode->u.v1.init_data_size);
+ src = ucode->u.v1.data;
+ break;
+ }
+
+ if (build)
+ sprintf(buildstr, " build %u%s", build,
+ (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
+ ? " (EXP)" : "");
+ else
+ buildstr[0] = '\0';
+
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%u.%u.%u%s",
+ IWL_UCODE_MAJOR(drv->fw.ucode_ver),
+ IWL_UCODE_MINOR(drv->fw.ucode_ver),
+ IWL_UCODE_API(drv->fw.ucode_ver),
+ IWL_UCODE_SERIAL(drv->fw.ucode_ver),
+ buildstr);
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size != hdr_size + pieces->inst_size +
+ pieces->data_size + pieces->init_size +
+ pieces->init_data_size) {
+
+ IWL_ERR(drv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+
+ return 0;
+}
+
+static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces,
+ struct iwl_ucode_capabilities *capa)
+{
+ struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ struct iwl_ucode_tlv *tlv;
+ size_t len = ucode_raw->size;
+ const u8 *data;
+ int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
+ int tmp;
+ u64 alternatives;
+ u32 tlv_len;
+ enum iwl_ucode_tlv_type tlv_type;
+ const u8 *tlv_data;
+ char buildstr[25];
+ u32 build;
+
+ if (len < sizeof(*ucode)) {
+ IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
+ return -EINVAL;
+ }
+
+ if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
+ IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
+ le32_to_cpu(ucode->magic));
+ return -EINVAL;
+ }
+
+ /*
+ * Check which alternatives are present, and "downgrade"
+ * when the chosen alternative is not present, warning
+ * the user when that happens. Some files may not have
+ * any alternatives, so don't warn in that case.
+ */
+ alternatives = le64_to_cpu(ucode->alternatives);
+ tmp = wanted_alternative;
+ if (wanted_alternative > 63)
+ wanted_alternative = 63;
+ while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
+ wanted_alternative--;
+ if (wanted_alternative && wanted_alternative != tmp)
+ IWL_WARN(drv,
+ "uCode alternative %d not available, choosing %d\n",
+ tmp, wanted_alternative);
+
+ drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+ build = le32_to_cpu(ucode->build);
+
+ if (build)
+ sprintf(buildstr, " build %u%s", build,
+ (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
+ ? " (EXP)" : "");
+ else
+ buildstr[0] = '\0';
+
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%u.%u.%u%s",
+ IWL_UCODE_MAJOR(drv->fw.ucode_ver),
+ IWL_UCODE_MINOR(drv->fw.ucode_ver),
+ IWL_UCODE_API(drv->fw.ucode_ver),
+ IWL_UCODE_SERIAL(drv->fw.ucode_ver),
+ buildstr);
+
+ data = ucode->data;
+
+ len -= sizeof(*ucode);
+
+ while (len >= sizeof(*tlv)) {
+ u16 tlv_alt;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_alt = le16_to_cpu(tlv->alternative);
+ tlv_data = tlv->data;
+
+ if (len < tlv_len) {
+ IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return -EINVAL;
+ }
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ /*
+ * Alternative 0 is always valid.
+ *
+ * Skip alternative TLVs that are not selected.
+ */
+ if (tlv_alt != 0 && tlv_alt != wanted_alternative)
+ continue;
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_INST:
+ pieces->inst = tlv_data;
+ pieces->inst_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_DATA:
+ pieces->data = tlv_data;
+ pieces->data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT:
+ pieces->init = tlv_data;
+ pieces->init_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT_DATA:
+ pieces->init_data = tlv_data;
+ pieces->init_data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_BOOT:
+ IWL_ERR(drv, "Found unexpected BOOT ucode\n");
+ break;
+ case IWL_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ capa->max_probe_length =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_PAN:
+ if (tlv_len)
+ goto invalid_tlv_len;
+ capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
+ break;
+ case IWL_UCODE_TLV_FLAGS:
+ /* must be at least one u32 */
+ if (tlv_len < sizeof(u32))
+ goto invalid_tlv_len;
+ /* and a proper number of u32s */
+ if (tlv_len % sizeof(u32))
+ goto invalid_tlv_len;
+ /*
+ * This driver only reads the first u32 as
+ * right now no more features are defined,
+ * if that changes then either the driver
+ * will not work with the new firmware, or
+ * it'll not take advantage of new features.
+ */
+ capa->flags = le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->init_evtlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->init_evtlog_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->init_errlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->inst_evtlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->inst_evtlog_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->inst_errlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
+ if (tlv_len)
+ goto invalid_tlv_len;
+ drv->fw.enhance_sensitivity_table = true;
+ break;
+ case IWL_UCODE_TLV_WOWLAN_INST:
+ pieces->wowlan_inst = tlv_data;
+ pieces->wowlan_inst_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_WOWLAN_DATA:
+ pieces->wowlan_data = tlv_data;
+ pieces->wowlan_data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ capa->standard_phy_calibration_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ default:
+ IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
+ break;
+ }
+ }
+
+ if (len) {
+ IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
+ iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+ return -EINVAL;
+ }
+
+ return 0;
+
+ invalid_tlv_len:
+ IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
+ iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
+
+ return -EINVAL;
+}
+
+/**
+ * iwl_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct iwl_drv *drv = context;
+ const struct iwl_cfg *cfg = cfg(drv);
+ struct iwl_fw *fw = &drv->fw;
+ struct iwl_ucode_header *ucode;
+ int err;
+ struct iwlagn_firmware_pieces pieces;
+ const unsigned int api_max = cfg->ucode_api_max;
+ unsigned int api_ok = cfg->ucode_api_ok;
+ const unsigned int api_min = cfg->ucode_api_min;
+ u32 api_ver;
+
+ fw->ucode_capa.max_probe_length = 200;
+ fw->ucode_capa.standard_phy_calibration_size =
+ IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ if (!api_ok)
+ api_ok = api_max;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (drv->fw_index <= api_ok)
+ IWL_ERR(drv,
+ "request for firmware file '%s' failed.\n",
+ drv->firmware_name);
+ goto try_again;
+ }
+
+ IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
+ drv->firmware_name, ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IWL_ERR(drv, "File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+ if (ucode->ver)
+ err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces);
+ else
+ err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces,
+ &fw->ucode_capa);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ /* no api version check required for experimental uCode */
+ if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
+ if (api_ver < api_min || api_ver > api_max) {
+ IWL_ERR(drv,
+ "Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n",
+ api_max, api_ver);
+ goto try_again;
+ }
+
+ if (api_ver < api_ok) {
+ if (api_ok != api_max)
+ IWL_ERR(drv, "Firmware has old API version, "
+ "expected v%u through v%u, got v%u.\n",
+ api_ok, api_max, api_ver);
+ else
+ IWL_ERR(drv, "Firmware has old API version, "
+ "expected v%u, got v%u.\n",
+ api_max, api_ver);
+ IWL_ERR(drv, "New firmware can be obtained from "
+ "http://www.intellinuxwireless.org/.\n");
+ }
+ }
+
+ IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ IWL_DEBUG_INFO(drv, "f/w package hdr ucode version raw = 0x%x\n",
+ drv->fw.ucode_ver);
+ IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
+ pieces.inst_size);
+ IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
+ pieces.data_size);
+ IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
+ pieces.init_size);
+ IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
+ pieces.init_data_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > cfg->max_inst_size) {
+ IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > cfg->max_data_size) {
+ IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > cfg->max_inst_size) {
+ IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > cfg->max_data_size) {
+ IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ if (iwl_alloc_fw_desc(drv, &drv->fw.ucode_rt.code,
+ pieces.inst, pieces.inst_size))
+ goto err_pci_alloc;
+ if (iwl_alloc_fw_desc(drv, &drv->fw.ucode_rt.data,
+ pieces.data, pieces.data_size))
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ if (iwl_alloc_fw_desc(drv,
+ &drv->fw.ucode_init.code,
+ pieces.init, pieces.init_size))
+ goto err_pci_alloc;
+ if (iwl_alloc_fw_desc(drv,
+ &drv->fw.ucode_init.data,
+ pieces.init_data, pieces.init_data_size))
+ goto err_pci_alloc;
+ }
+
+ /* WoWLAN instructions and data */
+ if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
+ if (iwl_alloc_fw_desc(drv,
+ &drv->fw.ucode_wowlan.code,
+ pieces.wowlan_inst,
+ pieces.wowlan_inst_size))
+ goto err_pci_alloc;
+ if (iwl_alloc_fw_desc(drv,
+ &drv->fw.ucode_wowlan.data,
+ pieces.wowlan_data,
+ pieces.wowlan_data_size))
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ /*
+ * The (size - 16) / 12 formula is based on the information recorded
+ * for each event, which is of mode 1 (including timestamp) for all
+ * new microcodes that include this information.
+ */
+ fw->init_evtlog_ptr = pieces.init_evtlog_ptr;
+ if (pieces.init_evtlog_size)
+ fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+ else
+ fw->init_evtlog_size =
+ cfg->base_params->max_event_log_size;
+ fw->init_errlog_ptr = pieces.init_errlog_ptr;
+ fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
+ if (pieces.inst_evtlog_size)
+ fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+ else
+ fw->inst_evtlog_size =
+ cfg->base_params->max_event_log_size;
+ fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ if (fw->ucode_capa.standard_phy_calibration_size >
+ IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
+ fw->ucode_capa.standard_phy_calibration_size =
+ IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&drv->request_firmware_complete);
+
+ drv->op_mode = iwl_dvm_ops.start(drv->shrd->trans, &drv->fw);
+
+ if (!drv->op_mode)
+ goto out_unbind;
+
+ return;
+
+ try_again:
+ /* try next, if any */
+ release_firmware(ucode_raw);
+ if (iwl_request_firmware(drv, false))
+ goto out_unbind;
+ return;
+
+ err_pci_alloc:
+ IWL_ERR(drv, "failed to allocate pci memory\n");
+ iwl_dealloc_ucode(drv);
+ release_firmware(ucode_raw);
+ out_unbind:
+ complete(&drv->request_firmware_complete);
+ device_release_driver(trans(drv)->dev);
+}
+
+int iwl_drv_start(struct iwl_shared *shrd,
+ struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+ struct iwl_drv *drv;
int ret;
shrd->cfg = cfg;
- shrd->nic = kzalloc(sizeof(*shrd->nic), GFP_KERNEL);
- if (!shrd->nic) {
- dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_nic");
+ drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+ if (!drv) {
+ dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv");
return -ENOMEM;
}
- shrd->nic->shrd = shrd;
+ drv->shrd = shrd;
+ shrd->drv = drv;
- init_completion(&shrd->nic->request_firmware_complete);
+ init_completion(&drv->request_firmware_complete);
- ret = iwl_request_firmware(shrd->nic, true);
+ ret = iwl_request_firmware(drv, true);
if (ret) {
dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw");
- kfree(shrd->nic);
+ kfree(drv);
+ shrd->drv = NULL;
}
return ret;
@@ -95,9 +748,16 @@
void iwl_drv_stop(struct iwl_shared *shrd)
{
- /* op_mode can be NULL if its start failed */
- if (shrd->nic->op_mode)
- iwl_op_mode_stop(shrd->nic->op_mode);
+ struct iwl_drv *drv = shrd->drv;
- kfree(shrd->nic);
+ wait_for_completion(&drv->request_firmware_complete);
+
+ /* op_mode can be NULL if its start failed */
+ if (drv->op_mode)
+ iwl_op_mode_stop(drv->op_mode);
+
+ iwl_dealloc_ucode(drv);
+
+ kfree(drv);
+ shrd->drv = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 90534a2..3b771c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -105,7 +105,7 @@
* function.
*/
int iwl_drv_start(struct iwl_shared *shrd,
- struct iwl_trans *trans, struct iwl_cfg *cfg);
+ struct iwl_trans *trans, const struct iwl_cfg *cfg);
/**
* iwl_drv_stop - stop the drv
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 07e93787..23cea42 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -75,6 +75,7 @@
#include "iwl-agn.h"
#include "iwl-eeprom.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
/************************** EEPROM BANDS ****************************
*
@@ -252,46 +253,46 @@
}
-int iwl_eeprom_check_sku(struct iwl_priv *priv)
+int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_shared *shrd = priv->shrd;
u16 radio_cfg;
- if (!cfg(priv)->sku) {
- /* not using sku overwrite */
- cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !cfg(priv)->ht_params) {
- IWL_ERR(priv, "Invalid 11n configuration\n");
- return -EINVAL;
- }
+ hw_params(priv).sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !cfg(priv)->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
}
- if (!cfg(priv)->sku) {
+
+ if (!hw_params(priv).sku) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku);
+ IWL_INFO(priv, "Device SKU: 0x%X\n", hw_params(priv).sku);
- if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) {
- /* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
- cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
- cfg(priv)->valid_tx_ant,
- cfg(priv)->valid_rx_ant);
- return -EINVAL;
- }
- IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
- cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant);
+ radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+
+ hw_params(priv).valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ hw_params(priv).valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+ /* check overrides (some devices have wrong EEPROM) */
+ if (cfg(priv)->valid_tx_ant)
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ if (cfg(priv)->valid_rx_ant)
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+
+ if (!hw_params(priv).valid_tx_ant || !hw_params(priv).valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
+ hw_params(priv).valid_tx_ant,
+ hw_params(priv).valid_rx_ant);
+ return -EINVAL;
}
- /*
- * for some special cases,
- * EEPROM did not reflect the correct antenna setting
- * so overwrite the valid tx/rx antenna from .cfg
- */
+
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ hw_params(priv).valid_tx_ant, hw_params(priv).valid_rx_ant);
+
return 0;
}
@@ -512,7 +513,7 @@
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
-static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
@@ -588,7 +589,7 @@
#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
? # x " " : "")
-void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
+static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
struct iwl_shared *shrd = priv->shrd;
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
@@ -1024,8 +1025,8 @@
* driver need to process addition information
* to determine the max channel tx power limits
*/
- if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower)
- cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv);
+ if (cfg(priv)->lib->eeprom_ops.enhanced_txpower)
+ iwl_eeprom_enhanced_txpower(priv);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index cbb8611..e4a7583 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -302,14 +302,14 @@
struct iwl_eeprom_ops {
const u32 regulatory_bands[7];
- void (*update_enhanced_txpower) (struct iwl_priv *priv);
+ bool enhanced_txpower;
};
int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev);
void iwl_eeprom_free(struct iwl_shared *shrd);
int iwl_eeprom_check_version(struct iwl_priv *priv);
-int iwl_eeprom_check_sku(struct iwl_priv *priv);
+int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
similarity index 76%
rename from drivers/net/wireless/iwlwifi/iwl-ucode.h
rename to drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 8bebeb0..7ca6c95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_ucode_h__
-#define __iwl_ucode_h__
+#ifndef __iwl_fw_file_h__
+#define __iwl_fw_file_h__
#include <linux/netdevice.h>
@@ -126,22 +126,6 @@
IWL_UCODE_TLV_FLAGS = 18,
};
-/**
- * enum iwl_ucode_tlv_flag - ucode API flags
- * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
- * was a separate TLV but moved here to save space.
- * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
- * treats good CRC threshold as a boolean
- * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- */
-enum iwl_ucode_tlv_flag {
- IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
- IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
- IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
-};
-
struct iwl_ucode_tlv {
__le16 type; /* see above */
__le16 alternative; /* see comment */
@@ -173,48 +157,4 @@
u8 data[0];
};
-struct iwl_ucode_capabilities {
- u32 max_probe_length;
- u32 standard_phy_calibration_size;
- u32 flags;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- dma_addr_t p_addr; /* hardware address */
- void *v_addr; /* software address */
- u32 len; /* size in bytes */
-};
-
-struct fw_img {
- struct fw_desc code; /* firmware code image */
- struct fw_desc data; /* firmware data image */
-};
-
-/**
- * struct iwl_fw - variables associated with the firmware
- *
- * @ucode_ver: ucode version from the ucode file
- * @fw_version: firmware version string
- * @ucode_rt: run time ucode image
- * @ucode_init: init ucode image
- * @ucode_wowlan: wake on wireless ucode image (optional)
- * @ucode_capa: capabilities parsed from the ucode file.
- * @enhance_sensitivity_table: device can do enhanced sensitivity.
- */
-struct iwl_fw {
-
- u32 ucode_ver;
-
- char fw_version[ETHTOOL_BUSINFO_LEN];
-
- /* ucode images */
- struct fw_img ucode_rt;
- struct fw_img ucode_init;
- struct fw_img ucode_wowlan;
-
- struct iwl_ucode_capabilities ucode_capa;
- bool enhance_sensitivity_table;
-};
-
-#endif /* __iwl_ucode_h__ */
+#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
similarity index 61%
copy from drivers/net/wireless/iwlwifi/iwl-ucode.h
copy to drivers/net/wireless/iwlwifi/iwl-fw.h
index 8bebeb0..453812a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -60,71 +60,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_ucode_h__
-#define __iwl_ucode_h__
-
-#include <linux/netdevice.h>
-
-/* v1/v2 uCode file layout */
-struct iwl_ucode_header {
- __le32 ver; /* major/minor/API/serial */
- union {
- struct {
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v1;
- struct {
- __le32 build; /* build number */
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v2;
- } u;
-};
-
-/*
- * new TLV uCode file layout
- *
- * The new TLV file format contains TLVs, that each specify
- * some piece of data. To facilitate "groups", for example
- * different instruction image with different capabilities,
- * bundled with the same init image, an alternative mechanism
- * is provided:
- * When the alternative field is 0, that means that the item
- * is always valid. When it is non-zero, then it is only
- * valid in conjunction with items of the same alternative,
- * in which case the driver (user) selects one alternative
- * to use.
- */
-
-enum iwl_ucode_tlv_type {
- IWL_UCODE_TLV_INVALID = 0, /* unused */
- IWL_UCODE_TLV_INST = 1,
- IWL_UCODE_TLV_DATA = 2,
- IWL_UCODE_TLV_INIT = 3,
- IWL_UCODE_TLV_INIT_DATA = 4,
- IWL_UCODE_TLV_BOOT = 5,
- IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
- IWL_UCODE_TLV_PAN = 7,
- IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
- IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
- IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
- IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
- IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
- IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
- IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
- IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
- IWL_UCODE_TLV_WOWLAN_INST = 16,
- IWL_UCODE_TLV_WOWLAN_DATA = 17,
- IWL_UCODE_TLV_FLAGS = 18,
-};
+#ifndef __iwl_fw_h__
+#define __iwl_fw_h__
+#include <linux/types.h>
/**
* enum iwl_ucode_tlv_flag - ucode API flags
@@ -142,36 +80,10 @@
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
};
-struct iwl_ucode_tlv {
- __le16 type; /* see above */
- __le16 alternative; /* see comment */
- __le32 length; /* not including type/length fields */
- u8 data[0];
-};
-
-#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
-
-struct iwl_tlv_ucode_header {
- /*
- * The TLV style ucode header is distinguished from
- * the v1/v2 style header by first four bytes being
- * zero, as such is an invalid combination of
- * major/minor/API/serial versions.
- */
- __le32 zero;
- __le32 magic;
- u8 human_readable[64];
- __le32 ver; /* major/minor/API/serial */
- __le32 build;
- __le64 alternatives; /* bitmask of valid alternatives */
- /*
- * The data contained herein has a TLV layout,
- * see above for the TLV header and types.
- * Note that each TLV is padded to a length
- * that is a multiple of 4 for alignment.
- */
- u8 data[0];
-};
+/* The default calibrate table size if not specified by firmware file */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -191,6 +103,12 @@
struct fw_desc data; /* firmware data image */
};
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -201,9 +119,14 @@
* @ucode_wowlan: wake on wireless ucode image (optional)
* @ucode_capa: capabilities parsed from the ucode file.
* @enhance_sensitivity_table: device can do enhanced sensitivity.
+ * @init_evtlog_ptr: event log offset for init ucode.
+ * @init_evtlog_size: event log size for init ucode.
+ * @init_errlog_ptr: error log offfset for init ucode.
+ * @inst_evtlog_ptr: event log offset for runtime ucode.
+ * @inst_evtlog_size: event log size for runtime ucode.
+ * @inst_errlog_ptr: error log offfset for runtime ucode.
*/
struct iwl_fw {
-
u32 ucode_ver;
char fw_version[ETHTOOL_BUSINFO_LEN];
@@ -215,6 +138,9 @@
struct iwl_ucode_capabilities ucode_capa;
bool enhance_sensitivity_table;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
};
-#endif /* __iwl_ucode_h__ */
+#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index e2e3b5c..081dd34 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -118,16 +118,17 @@
return 0;
}
-int iwl_grab_nic_access(struct iwl_trans *trans)
+bool iwl_grab_nic_access(struct iwl_trans *trans)
{
int ret = iwl_grab_nic_access_silent(trans);
- if (ret) {
+ if (unlikely(ret)) {
u32 val = iwl_read32(trans, CSR_GP_CNTRL);
- IWL_ERR(trans,
- "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ WARN_ONCE(1, "Timeout waiting for hardware access "
+ "(CSR_GP_CNTRL 0x%08x)\n", val);
+ return false;
}
- return ret;
+ return true;
}
void iwl_release_nic_access(struct iwl_trans *trans)
@@ -135,6 +136,13 @@
lockdep_assert_held(&trans->reg_lock);
__iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
}
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
@@ -156,7 +164,7 @@
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
- if (!iwl_grab_nic_access(trans)) {
+ if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, reg, value);
iwl_release_nic_access(trans);
}
@@ -181,7 +189,6 @@
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
{
iwl_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
return iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
}
@@ -189,7 +196,6 @@
{
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
@@ -211,7 +217,7 @@
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
- if (!iwl_grab_nic_access(trans)) {
+ if (likely(iwl_grab_nic_access(trans))) {
__iwl_write_prph(trans, addr, val);
iwl_release_nic_access(trans);
}
@@ -223,9 +229,11 @@
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- __iwl_write_prph(trans, reg, __iwl_read_prph(trans, reg) | mask);
- iwl_release_nic_access(trans);
+ if (likely(iwl_grab_nic_access(trans))) {
+ __iwl_write_prph(trans, reg,
+ __iwl_read_prph(trans, reg) | mask);
+ iwl_release_nic_access(trans);
+ }
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
@@ -235,10 +243,11 @@
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- __iwl_write_prph(trans, reg,
- (__iwl_read_prph(trans, reg) & mask) | bits);
- iwl_release_nic_access(trans);
+ if (likely(iwl_grab_nic_access(trans))) {
+ __iwl_write_prph(trans, reg,
+ (__iwl_read_prph(trans, reg) & mask) | bits);
+ iwl_release_nic_access(trans);
+ }
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
@@ -248,10 +257,11 @@
u32 val;
spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- val = __iwl_read_prph(trans, reg);
- __iwl_write_prph(trans, reg, (val & ~mask));
- iwl_release_nic_access(trans);
+ if (likely(iwl_grab_nic_access(trans))) {
+ val = __iwl_read_prph(trans, reg);
+ __iwl_write_prph(trans, reg, (val & ~mask));
+ iwl_release_nic_access(trans);
+ }
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
@@ -263,15 +273,12 @@
u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
-
- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- rmb();
-
- for (offs = 0; offs < words; offs++)
- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
-
- iwl_release_nic_access(trans);
+ if (likely(iwl_grab_nic_access(trans))) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < words; offs++)
+ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(trans);
+ }
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
@@ -292,10 +299,8 @@
u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
- if (!iwl_grab_nic_access(trans)) {
+ if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- wmb();
-
for (offs = 0; offs < words; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(trans);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 782486f..09b8567 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -35,20 +35,20 @@
static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
- trace_iwlwifi_dev_iowrite8(priv(trans), ofs, val);
+ trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
iwl_trans_write8(trans, ofs, val);
}
static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
- trace_iwlwifi_dev_iowrite32(priv(trans), ofs, val);
+ trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
iwl_trans_write32(trans, ofs, val);
}
static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read32(trans, ofs);
- trace_iwlwifi_dev_ioread32(priv(trans), ofs, val);
+ trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
return val;
}
@@ -61,7 +61,7 @@
int timeout);
int iwl_grab_nic_access_silent(struct iwl_trans *trans);
-int iwl_grab_nic_access(struct iwl_trans *trans);
+bool iwl_grab_nic_access(struct iwl_trans *trans);
void iwl_release_nic_access(struct iwl_trans *trans);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 5c7741f..1993a2b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -112,7 +112,7 @@
iwl_write32(trans(priv), CSR_LED_REG,
reg & CSR_LED_BSM_CTRL_MSK);
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
/* Set led pattern command */
@@ -126,7 +126,7 @@
};
int ret;
- if (!test_bit(STATUS_READY, &priv->shrd->status))
+ if (!test_bit(STATUS_READY, &priv->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index 03f7705..9212ee3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -42,9 +42,7 @@
#include <asm/div64.h>
-#include "iwl-ucode.h"
#include "iwl-eeprom.h"
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -136,7 +134,7 @@
* other mac80211 functions grouped here.
*/
int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwl_ucode_capabilities *capa)
+ const struct iwl_ucode_capabilities *capa)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -161,11 +159,14 @@
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP
+ /* enable 11w if the uCode advertise */
if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->sta_data_size = sizeof(struct iwl_station_priv);
@@ -195,7 +196,8 @@
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
- if (nic(priv)->fw.ucode_wowlan.code.len &&
+ if (priv->fw->ucode_wowlan.code.len &&
+ trans(priv)->ops->wowlan_suspend &&
device_can_wakeup(trans(priv)->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -262,9 +264,9 @@
struct iwl_rxon_context *ctx;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
}
@@ -277,13 +279,13 @@
}
}
- ret = iwl_run_init_ucode(trans(priv));
+ ret = iwl_run_init_ucode(priv);
if (ret) {
IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
goto error;
}
- ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR);
+ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
goto error;
@@ -295,9 +297,9 @@
return 0;
error:
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ set_bit(STATUS_EXIT_PENDING, &priv->status);
iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
IWL_ERR(priv, "Unable to initialize device.\n");
return ret;
@@ -311,16 +313,16 @@
IWL_DEBUG_MAC80211(priv, "enter\n");
/* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
ret = __iwl_up(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
if (ret)
return ret;
IWL_DEBUG_INFO(priv, "Start UP work done.\n");
/* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
ret = -EIO;
iwlagn_led_enable(priv);
@@ -341,9 +343,9 @@
priv->is_open = 0;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_down(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
iwl_cancel_deferred_work(priv);
@@ -368,7 +370,7 @@
return;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
goto out;
@@ -380,7 +382,7 @@
priv->have_rekey_data = true;
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -397,7 +399,7 @@
return -EINVAL;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
/* Don't attempt WoWLAN when not associated, tear down instead. */
if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
@@ -406,24 +408,22 @@
goto out;
}
- ret = iwlagn_suspend(priv, hw, wowlan);
+ ret = iwlagn_suspend(priv, wowlan);
if (ret)
goto error;
device_set_wakeup_enable(trans(priv)->dev, true);
- /* Now let the ucode operate on its own */
- iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+ iwl_trans_wowlan_suspend(trans(priv));
goto out;
error:
- priv->shrd->wowlan = false;
+ priv->wowlan = false;
iwlagn_prepare_restart(priv);
ieee80211_restart_hw(priv->hw);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -439,7 +439,7 @@
int ret = -EIO;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
@@ -448,7 +448,7 @@
if (iwlagn_hw_valid_rtc_data_addr(base)) {
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
ret = iwl_grab_nic_access_silent(trans(priv));
- if (ret == 0) {
+ if (likely(ret == 0)) {
iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base);
status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(trans(priv));
@@ -457,17 +457,16 @@
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (ret == 0) {
- struct iwl_nic *nic = nic(priv);
if (!priv->wowlan_sram)
priv->wowlan_sram =
- kzalloc(nic->fw.ucode_wowlan.data.len,
+ kzalloc(priv->fw->ucode_wowlan.data.len,
GFP_KERNEL);
if (priv->wowlan_sram)
_iwl_read_targ_mem_words(
trans(priv), 0x800000,
priv->wowlan_sram,
- nic->fw.ucode_wowlan.data.len / 4);
+ priv->fw->ucode_wowlan.data.len / 4);
}
#endif
}
@@ -475,7 +474,7 @@
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- priv->shrd->wowlan = false;
+ priv->wowlan = false;
device_set_wakeup_enable(trans(priv)->dev, false);
@@ -485,7 +484,7 @@
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
ieee80211_resume_disconnect(vif);
@@ -563,7 +562,7 @@
if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
return 0;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
@@ -614,7 +613,7 @@
ret = -EINVAL;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -633,11 +632,11 @@
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
- if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE))
+ if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -649,8 +648,6 @@
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
break;
case IEEE80211_AMPDU_TX_START:
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
@@ -666,10 +663,8 @@
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
priv->agg_tids_count);
}
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- if (!priv->agg_tids_count && cfg(priv)->ht_params &&
- cfg(priv)->ht_params->use_rts_for_aggregation) {
+ if (!priv->agg_tids_count &&
+ hw_params(priv).use_rts_for_aggregation) {
/*
* switch off RTS/CTS if it was previously enabled
*/
@@ -683,7 +678,7 @@
ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
@@ -696,12 +691,9 @@
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret = 0;
+ int ret;
u8 sta_id;
- IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
sta->addr);
sta_priv->sta_id = IWL_INVALID_STATION;
@@ -716,17 +708,119 @@
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
- goto out;
+ return ret;
}
sta_priv->sta_id = sta_id;
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl_rs_rate_init(priv, sta, sta_id);
- out:
- mutex_unlock(&priv->shrd->mutex);
+ return 0;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /*
+ * Station will be removed from device when the RXON
+ * is set to unassociated -- just deactivate it here
+ * to avoid re-programming it.
+ */
+ ret = 0;
+ iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr);
+ } else {
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_DEBUG_QUIET_RFKILL(priv,
+ "Error removing station %pM\n", sta->addr);
+ }
+ return ret;
+}
+
+static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ enum {
+ NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT,
+ } op = NONE;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ mutex_lock(&priv->mutex);
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ op = ADD;
+ else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ op = REMOVE;
+ else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ op = HT_RATE_INIT;
+ } else {
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ op = ADD_RATE_INIT;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ op = REMOVE;
+ }
+
+ switch (op) {
+ case ADD:
+ ret = iwlagn_mac_sta_add(hw, vif, sta);
+ break;
+ case REMOVE:
+ ret = iwlagn_mac_sta_remove(hw, vif, sta);
+ break;
+ case ADD_RATE_INIT:
+ ret = iwlagn_mac_sta_add(hw, vif, sta);
+ if (ret)
+ break;
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv,
+ "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
+ ret = 0;
+ break;
+ case HT_RATE_INIT:
+ /* Initialize rate scaling */
+ ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta);
+ if (ret)
+ break;
+ IWL_DEBUG_INFO(priv,
+ "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
+ ret = 0;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ /*
+ * mac80211 might WARN if we fail, but due the way we
+ * (badly) handle hard rfkill, we might fail here
+ */
+ if (iwl_is_rfkill(priv))
+ ret = 0;
+
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -753,14 +847,14 @@
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
goto out;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
@@ -779,8 +873,6 @@
goto out;
}
- spin_lock_irq(&priv->shrd->lock);
-
priv->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
@@ -797,23 +889,21 @@
iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
- spin_unlock_irq(&priv->shrd->lock);
-
iwl_set_rate(priv);
/*
* at this point, staging_rxon has the
* configuration for channel switch
*/
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = cpu_to_le16(ch);
if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -843,7 +933,7 @@
#undef CHK
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
for_each_context(priv, ctx) {
ctx->staging.filter_flags &= ~filter_nand;
@@ -855,7 +945,7 @@
*/
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
/*
* Receiving all multicast frames is always enabled by the
@@ -871,14 +961,14 @@
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
goto done;
}
- if (iwl_is_rfkill(priv->shrd)) {
+ if (iwl_is_rfkill(priv)) {
IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
goto done;
}
@@ -897,7 +987,7 @@
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
iwl_trans_wait_tx_queue_empty(trans(priv));
done:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -917,9 +1007,9 @@
return -EOPNOTSUPP;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
err = -EBUSY;
goto out;
}
@@ -988,7 +1078,7 @@
iwlagn_disable_roc(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1002,102 +1092,22 @@
return -EOPNOTSUPP;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
-
- if (ctx->ctxid != IWL_RXON_CTX_PAN)
- return 0;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx)) {
- ret = 0;
- goto out;
- }
-
- if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
- &priv->shrd->status)) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
- if (ret)
- goto out;
-
- if (WARN_ON(sta_id != ctx->ap_sta_id)) {
- ret = -EIO;
- goto out_remove_sta;
- }
-
- memcpy(ctx->bssid, bssid, ETH_ALEN);
- ctx->preauth_bssid = true;
-
- ret = iwlagn_commit_rxon(priv, ctx);
-
- if (ret == 0)
- goto out;
-
- out_remove_sta:
- iwl_remove_station(priv, sta_id, bssid);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
-
- if (ctx->ctxid != IWL_RXON_CTX_PAN)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx))
- goto out;
-
- iwl_remove_station(priv, ctx->ap_sta_id, bssid);
- ctx->preauth_bssid = false;
- /* no need to commit */
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (cfg(priv)->bt_params &&
cfg(priv)->bt_params->advanced_bt_coexist) {
@@ -1112,7 +1122,7 @@
"ignoring RSSI callback\n");
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -1133,7 +1143,6 @@
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
- unsigned long flags;
int q;
if (WARN_ON(!ctx))
@@ -1141,7 +1150,7 @@
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (!iwl_is_ready_rf(priv->shrd)) {
+ if (!iwl_is_ready_rf(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
return -EIO;
}
@@ -1153,7 +1162,7 @@
q = AC_NUM - 1 - queue;
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ mutex_lock(&priv->mutex);
ctx->qos_data.def_qos_parm.ac[q].cw_min =
cpu_to_le16(params->cw_min);
@@ -1165,7 +1174,7 @@
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
@@ -1193,7 +1202,7 @@
struct ieee80211_vif *vif = ctx->vif;
int err;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/*
* This variable will be correct only when there's just
@@ -1238,11 +1247,11 @@
cancel_delayed_work_sync(&priv->hw_roc_disable_work);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwlagn_disable_roc(priv);
- if (!iwl_is_ready_rf(priv->shrd)) {
+ if (!iwl_is_ready_rf(priv)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
goto out;
@@ -1285,7 +1294,7 @@
ctx->vif = NULL;
priv->iw_mode = NL80211_IFTYPE_STATION;
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1297,7 +1306,7 @@
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (priv->scan_vif == vif) {
iwl_scan_cancel_timeout(priv, 200);
@@ -1329,7 +1338,7 @@
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (WARN_ON(ctx->vif != vif)) {
struct iwl_rxon_context *tmp;
@@ -1342,7 +1351,7 @@
iwl_teardown_interface(priv, vif, false);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1364,9 +1373,9 @@
newtype = ieee80211_iftype_p2p(newtype, newp2p);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+ if (!ctx->vif || !iwl_is_ready_rf(priv)) {
/*
* Huh? But wait ... this can maybe happen when
* we're in the middle of a firmware restart!
@@ -1428,7 +1437,7 @@
err = 0;
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1446,7 +1455,7 @@
if (req->n_channels == 0)
return -EINVAL;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
/*
* If an internal scan is in progress, just set
@@ -1475,47 +1484,20 @@
IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->shrd->mutex);
-
- return ret;
-}
-
-static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
- "station %pM\n", sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
- if (ret)
- IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
+ mutex_unlock(&priv->mutex);
return ret;
}
static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
- unsigned long flags;
+ struct iwl_addsta_cmd cmd = {
+ .mode = STA_CONTROL_MODIFY_MSK,
+ .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+ .sta.sta_id = sta_id,
+ };
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask = 0;
- priv->stations[sta_id].sta.sleep_tx_count = 0;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
+ iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
}
static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1572,8 +1554,7 @@
.ampdu_action = iwlagn_mac_ampdu_action,
.hw_scan = iwlagn_mac_hw_scan,
.sta_notify = iwlagn_mac_sta_notify,
- .sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwlagn_mac_sta_remove,
+ .sta_state = iwlagn_mac_sta_state,
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
@@ -1582,8 +1563,6 @@
.rssi_callback = iwlagn_mac_rssi_callback,
CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
- .tx_sync = iwlagn_mac_tx_sync,
- .finish_tx_sync = iwlagn_mac_finish_tx_sync,
.set_tim = iwlagn_mac_set_tim,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
new file mode 100644
index 0000000..88dc4a0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -0,0 +1,157 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+
+#include "iwl-notif-wait.h"
+
+
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
+{
+ spin_lock_init(¬if_wait->notif_wait_lock);
+ INIT_LIST_HEAD(¬if_wait->notif_waits);
+ init_waitqueue_head(¬if_wait->notif_waitq);
+}
+
+void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt)
+{
+ if (!list_empty(¬if_wait->notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(¬if_wait->notif_wait_lock);
+ list_for_each_entry(w, ¬if_wait->notif_waits, list) {
+ if (w->cmd != pkt->hdr.cmd)
+ continue;
+ w->triggered = true;
+ if (w->fn)
+ w->fn(notif_wait, pkt, w->fn_data);
+ }
+ spin_unlock(¬if_wait->notif_wait_lock);
+
+ wake_up_all(¬if_wait->notif_waitq);
+ }
+}
+
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
+{
+ unsigned long flags;
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_irqsave(¬if_wait->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, ¬if_wait->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_irqrestore(¬if_wait->notif_wait_lock, flags);
+
+ wake_up_all(¬if_wait->notif_waitq);
+}
+
+
+void
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data),
+ void *fn_data)
+{
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(¬if_wait->notif_wait_lock);
+ list_add(&wait_entry->list, ¬if_wait->notif_waits);
+ spin_unlock_bh(¬if_wait->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(notif_wait->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ spin_lock_bh(¬if_wait->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(¬if_wait->notif_wait_lock);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(¬if_wait->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(¬if_wait->notif_wait_lock);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
new file mode 100644
index 0000000..5e8af95
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -0,0 +1,129 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_notif_wait_h__
+#define __iwl_notif_wait_h__
+
+#include <linux/wait.h>
+
+#include "iwl-trans.h"
+
+struct iwl_notif_wait_data {
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+};
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data);
+ void *fn_data;
+
+ u8 cmd;
+ bool triggered, aborted;
+};
+
+
+/* caller functions */
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
+void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt);
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+
+/* user functions */
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data),
+ void *fn_data);
+
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry);
+
+#endif /* __iwl_notif_wait_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index d4fc9be..6ea4163 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -67,7 +67,8 @@
struct iwl_trans;
struct sk_buff;
struct iwl_device_cmd;
-struct iwl_rx_mem_buffer;
+struct iwl_rx_cmd_buffer;
+struct iwl_fw;
/**
* DOC: Operational mode - what is it ?
@@ -121,17 +122,23 @@
* there are Tx packets pending in the transport layer.
* Must be atomic
* @nic_error: error notification. Must be atomic
+ * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
+ * @nic_config: configure NIC, called before firmware is started.
+ * May sleep
*/
struct iwl_op_mode_ops {
- struct iwl_op_mode *(*start)(struct iwl_trans *trans);
+ struct iwl_op_mode *(*start)(struct iwl_trans *trans,
+ const struct iwl_fw *fw);
void (*stop)(struct iwl_op_mode *op_mode);
- int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_mem_buffer *rxb,
+ int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac);
void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac);
void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
void (*nic_error)(struct iwl_op_mode *op_mode);
+ void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
+ void (*nic_config)(struct iwl_op_mode *op_mode);
};
/**
@@ -156,7 +163,7 @@
}
static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
return op_mode->ops->rx(op_mode, rxb, cmd);
@@ -190,6 +197,17 @@
op_mode->ops->nic_error(op_mode);
}
+static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ op_mode->ops->cmd_queue_full(op_mode);
+}
+
+static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+ op_mode->ops->nic_config(op_mode);
+}
+
/*****************************************************
* Op mode layers implementations
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 06e0041..c5e339e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -263,7 +263,7 @@
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
struct iwl_shared *shrd;
struct iwl_trans *iwl_trans;
int err;
@@ -278,17 +278,9 @@
#ifdef CONFIG_IWLWIFI_IDI
iwl_trans = iwl_trans_idi_alloc(shrd, pdev, ent);
- if (iwl_trans == NULL) {
- err = -ENOMEM;
- goto out_free_bus;
- }
-
- shrd->trans = iwl_trans;
- pci_set_drvdata(pdev, iwl_trans);
-
- err = iwl_drv_start(shrd, iwl_trans, cfg);
#else
iwl_trans = iwl_trans_pcie_alloc(shrd, pdev, ent);
+#endif
if (iwl_trans == NULL) {
err = -ENOMEM;
goto out_free_bus;
@@ -298,7 +290,6 @@
pci_set_drvdata(pdev, iwl_trans);
err = iwl_drv_start(shrd, iwl_trans, cfg);
-#endif
if (err)
goto out_free_trans;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index fd008c4..958d9d0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -215,7 +215,7 @@
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
- if (hw_params(priv).shadow_reg_enable)
+ if (cfg(priv)->base_params->shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -301,7 +301,7 @@
if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
- if (hw_params(priv).shadow_reg_enable)
+ if (cfg(priv)->base_params->shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -336,7 +336,7 @@
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return iwl_trans_send_cmd_pdu(trans(priv), POWER_TABLE_CMD, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -348,7 +348,7 @@
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
- if (priv->shrd->wowlan)
+ if (priv->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
else if (!cfg(priv)->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
@@ -383,7 +383,7 @@
int ret;
bool update_chains;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* Don't update the RX chain when chain noise calibration is running */
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
@@ -392,12 +392,12 @@
if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
return 0;
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return -EIO;
/* scan complete use sleep_power_next, need to be updated */
memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->shrd->status) && !force) {
+ if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a4d1101..75dc20b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -216,10 +216,6 @@
#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
-#define SCD_QUEUECHAIN_SEL_ALL(priv) \
- (((1<<hw_params(priv).max_txq_num) - 1) &\
- (~(1<<(priv)->shrd->cmd_queue)))
-
#define SCD_BASE (PRPH_BASE + 0xa02c00)
#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0)
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 42ee1c4..902efe4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -57,39 +57,39 @@
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
int ret;
- struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_ABORT_CMD,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
+ __le32 *status;
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->shrd->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
- test_bit(STATUS_FW_ERROR, &priv->shrd->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (!test_bit(STATUS_READY, &priv->status) ||
+ !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
+ !test_bit(STATUS_SCAN_HW, &priv->status) ||
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status))
return -EIO;
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret)
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->u.status != CAN_ABORT_STATUS) {
+ status = (void *)cmd.resp_pkt->data;
+ if (*status != CAN_ABORT_STATUS) {
/* The scan abort will return 1 for success or
* 2 for "failure". A failure condition can be
* due to simply not being in an active scan which
* can occur if we send the scan abort before we
* the microcode has notified us that a scan is
* completed. */
- IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
+ IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n",
+ le32_to_cpu(*status));
ret = -EIO;
}
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
return ret;
}
@@ -116,20 +116,20 @@
{
bool aborted;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status))
+ if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status))
return;
IWL_DEBUG_SCAN(priv, "Completed scan.\n");
cancel_delayed_work(&priv->scan_check);
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
+ aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
if (aborted)
IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
goto out_settings;
}
@@ -165,7 +165,7 @@
out_settings:
/* Can we still talk to firmware ? */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
iwlagn_post_scan(priv);
@@ -173,18 +173,18 @@
void iwl_force_scan_end(struct iwl_priv *priv)
{
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
return;
}
IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->shrd->status);
- clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
- clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ clear_bit(STATUS_SCAN_COMPLETE, &priv->status);
iwl_complete_scan(priv, true);
}
@@ -192,14 +192,14 @@
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
return;
}
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
+ if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
return;
}
@@ -231,14 +231,14 @@
{
unsigned long timeout = jiffies + msecs_to_jiffies(ms);
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
iwl_do_scan_abort(priv);
while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->shrd->status))
+ if (!test_bit(STATUS_SCAN_HW, &priv->status))
goto finished;
msleep(20);
}
@@ -261,13 +261,12 @@
/* Service response to REPLY_SCAN_CMD (0x80) */
static int iwl_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanreq_notification *notif =
- (struct iwl_scanreq_notification *)pkt->u.raw;
+ struct iwl_scanreq_notification *notif = (void *)pkt->data;
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
@@ -276,12 +275,12 @@
/* Service SCAN_START_NOTIFICATION (0x82) */
static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanstart_notification *notif =
- (struct iwl_scanstart_notification *)pkt->u.raw;
+ struct iwl_scanstart_notification *notif = (void *)pkt->data;
+
priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
IWL_DEBUG_SCAN(priv, "Scan start: "
"%d [802.11%s] "
@@ -303,13 +302,12 @@
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanresults_notification *notif =
- (struct iwl_scanresults_notification *)pkt->u.raw;
+ struct iwl_scanresults_notification *notif = (void *)pkt->data;
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
@@ -329,11 +327,11 @@
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+ struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
scan_notif->scanned_channels,
@@ -352,8 +350,8 @@
* to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
* to avoid a race there.
*/
- set_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
- clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ set_bit(STATUS_SCAN_COMPLETE, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
queue_work(priv->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
@@ -574,6 +572,53 @@
return added;
}
+/**
+ * iwl_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
+ const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16)len;
+}
+
static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct iwl_host_cmd cmd = {
@@ -596,7 +641,7 @@
u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (vif)
ctx = iwl_rxon_ctx_from_vif(vif);
@@ -793,7 +838,7 @@
scan->rx_chain = cpu_to_le16(rx_chain);
switch (priv->scan_type) {
case IWL_SCAN_NORMAL:
- cmd_len = iwl_fill_probe_req(priv,
+ cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
@@ -803,7 +848,7 @@
case IWL_SCAN_RADIO_RESET:
case IWL_SCAN_ROC:
/* use bcast addr, will not be transmitted but must be valid */
- cmd_len = iwl_fill_probe_req(priv,
+ cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
@@ -882,15 +927,15 @@
scan->len = cpu_to_le16(cmd.len[0]);
/* set scan bit here for PAN params */
- set_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ set_bit(STATUS_SCAN_HW, &priv->status);
ret = iwlagn_set_pan_params(priv);
if (ret)
return ret;
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret) {
- clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
iwlagn_set_pan_params(priv);
}
@@ -913,22 +958,22 @@
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
cancel_delayed_work(&priv->scan_check);
- if (!iwl_is_ready_rf(priv->shrd)) {
+ if (!iwl_is_ready_rf(priv)) {
IWL_WARN(priv, "Request scan called when driver not ready.\n");
return -EIO;
}
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_SCAN(priv,
"Multiple concurrent scan requests in parallel.\n");
return -EBUSY;
}
- if (test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
return -EBUSY;
}
@@ -938,14 +983,14 @@
scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
"internal short ");
- set_bit(STATUS_SCANNING, &priv->shrd->status);
+ set_bit(STATUS_SCANNING, &priv->status);
priv->scan_type = scan_type;
priv->scan_start = jiffies;
priv->scan_band = band;
ret = iwlagn_request_scan(priv, vif);
if (ret) {
- clear_bit(STATUS_SCANNING, &priv->shrd->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
priv->scan_type = IWL_SCAN_NORMAL;
return ret;
}
@@ -973,14 +1018,14 @@
IWL_DEBUG_SCAN(priv, "Start internal scan\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
goto unlock;
}
- if (test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
goto unlock;
}
@@ -988,7 +1033,7 @@
if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
unlock:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_scan_check(struct work_struct *data)
@@ -1001,56 +1046,9 @@
/* Since we are here firmware does not finish scan and
* most likely is in bad shape, so we don't bother to
* send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->shrd->mutex);
-}
-
-/**
- * iwl_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ies, int ie_len, int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* ...next IE... */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our indirect SSID IE */
- left -= 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
-
- if (WARN_ON(left < ie_len))
- return len;
-
- if (ies && ie_len) {
- memcpy(pos, ies, ie_len);
- len += ie_len;
- }
-
- return (u16)len;
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_abort_scan(struct work_struct *work)
@@ -1061,9 +1059,9 @@
/* We keep scan_check work queued in case when firmware will not
* report back scan completed notification */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_scan_completed(struct work_struct *work)
@@ -1071,9 +1069,9 @@
struct iwl_priv *priv =
container_of(work, struct iwl_priv, scan_completed);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_process_scan_complete(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
@@ -1091,8 +1089,8 @@
cancel_work_sync(&priv->scan_completed);
if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index a644162..cf34087 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -65,12 +65,11 @@
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/gfp.h>
-#include <linux/mm.h> /* for page_address */
#include <net/mac80211.h>
#include "iwl-commands.h"
+#include "iwl-fw.h"
/**
* DOC: shared area - role and goal
@@ -116,7 +115,6 @@
* Holds the module parameters
*
* @sw_crypto: using hardware encryption, default = 0
- * @num_of_queues: number of tx queue, HW dependent
* @disable_11n: disable 11n capabilities, default = 0,
* use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 1
@@ -138,7 +136,6 @@
*/
struct iwl_mod_params {
int sw_crypto;
- int num_of_queues;
unsigned int disable_11n;
int amsdu_size_8K;
int antenna;
@@ -163,7 +160,6 @@
*
* Holds the module parameters
*
- * @max_txq_num: Max # Tx queues supported
* @num_ampdu_queues: num of ampdu queues
* @tx_chains_num: Number of TX chains
* @rx_chains_num: Number of RX chains
@@ -177,16 +173,16 @@
* relevant for 1000, 6000 and up
* @wd_timeout: TX queues watchdog timeout
* @struct iwl_sensitivity_ranges: range of sensitivity values
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
*/
struct iwl_hw_params {
- u8 max_txq_num;
u8 num_ampdu_queues;
u8 tx_chains_num;
u8 rx_chains_num;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 ht40_channel;
- bool shadow_reg_enable;
+ bool use_rts_for_aggregation;
u16 sku;
u32 rx_page_order;
u32 ct_kill_threshold;
@@ -213,45 +209,6 @@
IWL_UCODE_WOWLAN,
};
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
- struct list_head list;
-
- void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
- void *data);
- void *fn_data;
-
- u8 cmd;
- bool triggered, aborted;
-};
-
-/**
- * enum iwl_pa_type - Power Amplifier type
- * @IWL_PA_SYSTEM: based on uCode configuration
- * @IWL_PA_INTERNAL: use Internal only
- */
-enum iwl_pa_type {
- IWL_PA_SYSTEM = 0,
- IWL_PA_INTERNAL = 1,
-};
-
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@@ -268,6 +225,73 @@
IWL_LED_DISABLE,
};
+/*
+ * @max_ll_items: max number of OTP blocks
+ * @shadow_ram_support: shadow support for OTP memory
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @support_wimax_coexist: support wimax/wifi co-exist
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @wd_timeout: TX queues watchdog timeout
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @shadow_reg_enable: HW shadhow register bit
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * @no_idle_support: do not support idle mode
+ * wd_disable: disable watchdog timer
+ */
+struct iwl_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues;/* def: HW dependent */
+ /* for iwl_apm_init() */
+ u32 pll_cfg_val;
+
+ const u16 max_ll_items;
+ const bool shadow_ram_support;
+ u16 led_compensation;
+ bool adv_thermal_throttle;
+ bool support_ct_kill_exit;
+ const bool support_wimax_coexist;
+ u8 plcp_delta_threshold;
+ s32 chain_noise_scale;
+ unsigned int wd_timeout;
+ u32 max_event_log_size;
+ const bool shadow_reg_enable;
+ const bool hd_v2;
+ const bool no_idle_support;
+ const bool wd_disable;
+};
+
+/*
+ * @advanced_bt_coexist: support advanced bt coexist
+ * @bt_init_traffic_load: specify initial bt traffic load
+ * @bt_prio_boost: default bt priority boost value
+ * @agg_time_limit: maximum number of uSec in aggregation
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
+ */
+struct iwl_bt_params {
+ bool advanced_bt_coexist;
+ u8 bt_init_traffic_load;
+ u8 bt_prio_boost;
+ u16 agg_time_limit;
+ bool bt_sco_disable;
+ bool bt_session_2;
+};
+/*
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ */
+struct iwl_ht_params {
+ const bool ht_greenfield_support; /* if used set to true */
+ bool use_rts_for_aggregation;
+ enum ieee80211_smps_mode smps_mode;
+};
+
/**
* struct iwl_cfg
* @name: Offical name of the device
@@ -282,7 +306,6 @@
* @max_data_size: The maximal length of the fw data section
* @valid_tx_ant: valid transmit antenna
* @valid_rx_ant: valid receive antenna
- * @sku: sku information from EEPROM
* @eeprom_ver: EEPROM version
* @eeprom_calib_ver: EEPROM calibration version
* @lib: pointer to the lib ops
@@ -290,7 +313,6 @@
* @base_params: pointer to basic parameters
* @ht_params: point to ht patameters
* @bt_params: pointer to bt parameters
- * @pa_type: used by 6000 series only to identify the type of Power Amplifier
* @need_temp_offset_calib: need to perform temperature offset calibration
* @no_xtal_calib: some devices do not need crystal calibration data,
* don't send it to those
@@ -321,17 +343,15 @@
const u32 max_inst_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
- u16 sku;
u16 eeprom_ver;
u16 eeprom_calib_ver;
const struct iwl_lib_ops *lib;
void (*additional_nic_config)(struct iwl_priv *priv);
/* params not likely to change within a device family */
- struct iwl_base_params *base_params;
+ const struct iwl_base_params *base_params;
/* params likely to change within a device family */
- struct iwl_ht_params *ht_params;
- struct iwl_bt_params *bt_params;
- enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
+ const struct iwl_ht_params *ht_params;
+ const struct iwl_bt_params *bt_params;
const bool need_temp_offset_calib; /* if used set to true */
const bool no_xtal_calib;
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
@@ -346,10 +366,6 @@
/**
* struct iwl_shared - shared fields for all the layers of the driver
*
- * @dbg_level_dev: dbg level set per device. Prevails on
- * iwlagn_mod_params.debug_level if set (!= 0)
- * @ucode_owner: IWL_OWNERSHIP_*
- * @cmd_queue: command queue number
* @status: STATUS_*
* @wowlan: are we running wowlan uCode
* @valid_contexts: microcode/device supports multiple contexts
@@ -360,39 +376,20 @@
* @nic: pointer to the nic data
* @hw_params: see struct iwl_hw_params
* @lock: protect general shared data
- * @sta_lock: protects the station table.
- * If lock and sta_lock are needed, lock must be acquired first.
- * @mutex:
- * @wait_command_queue: the wait_queue for SYNC host command nad uCode load
+ * @wait_command_queue: the wait_queue for SYNC host commands
* @eeprom: pointer to the eeprom/OTP image
* @ucode_type: indicator of loaded ucode image
- * @notif_waits: things waiting for notification
- * @notif_wait_lock: lock protecting notification
- * @notif_waitq: head of notification wait queue
* @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 dbg_level_dev;
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-#define IWL_OWNERSHIP_DRIVER 0
-#define IWL_OWNERSHIP_TM 1
- u8 ucode_owner;
- u8 cmd_queue;
unsigned long status;
- bool wowlan;
u8 valid_contexts;
- struct iwl_cfg *cfg;
- struct iwl_priv *priv;
+ const struct iwl_cfg *cfg;
struct iwl_trans *trans;
- struct iwl_nic *nic;
+ void *drv;
struct iwl_hw_params hw_params;
-
- spinlock_t lock;
- spinlock_t sta_lock;
- struct mutex mutex;
+ const struct iwl_fw *fw;
wait_queue_head_t wait_command_queue;
@@ -402,11 +399,6 @@
/* ucode related variables */
enum iwl_ucode_type ucode_type;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
-
struct {
u32 error_event_table;
u32 log_event_table;
@@ -415,111 +407,13 @@
};
/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
-#define priv(_m) ((_m)->shrd->priv)
#define cfg(_m) ((_m)->shrd->cfg)
-#define nic(_m) ((_m)->shrd->nic)
#define trans(_m) ((_m)->shrd->trans)
#define hw_params(_m) ((_m)->shrd->hw_params)
-#ifdef CONFIG_IWLWIFI_DEBUG
-/*
- * iwl_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
+static inline bool iwl_have_debug_level(u32 level)
{
- if (shrd->dbg_level_dev)
- return shrd->dbg_level_dev;
- else
- return iwlagn_mod_params.debug_level;
-}
-#else
-static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
-{
- return iwlagn_mod_params.debug_level;
-}
-#endif
-
-static inline void iwl_free_pages(struct iwl_shared *shrd, unsigned long page)
-{
- free_pages(page, shrd->hw_params.rx_page_order);
-}
-
-/**
- * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
+ return iwlagn_mod_params.debug_level & level;
}
enum iwl_rxon_context_id {
@@ -530,34 +424,9 @@
};
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-void iwl_nic_config(struct iwl_priv *priv);
const char *get_cmd_string(u8 cmd);
-bool iwl_check_for_ct_kill(struct iwl_priv *priv);
-
-
-/* notification wait support */
-void iwl_abort_notification_waits(struct iwl_shared *shrd);
-void __acquires(wait_entry)
-iwl_init_notification_wait(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_trans *trans,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data);
-int __must_check __releases(wait_entry)
-iwl_wait_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout);
-void __releases(wait_entry)
-iwl_remove_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry);
#define IWL_CMD(x) case x: return #x
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
/*****************************************************
* DRIVER STATUS FUNCTIONS
@@ -583,46 +452,4 @@
#define STATUS_CHANNEL_SWITCH_PENDING 19
#define STATUS_SCAN_COMPLETE 20
-static inline int iwl_is_ready(struct iwl_shared *shrd)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &shrd->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &shrd->status) &&
- !test_bit(STATUS_EXIT_PENDING, &shrd->status);
-}
-
-static inline int iwl_is_alive(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_ALIVE, &shrd->status);
-}
-
-static inline int iwl_is_init(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_INIT, &shrd->status);
-}
-
-static inline int iwl_is_rfkill_hw(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_RF_KILL_HW, &shrd->status);
-}
-
-static inline int iwl_is_rfkill(struct iwl_shared *shrd)
-{
- return iwl_is_rfkill_hw(shrd);
-}
-
-static inline int iwl_is_ctkill(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_CT_KILL, &shrd->status);
-}
-
-static inline int iwl_is_ready_rf(struct iwl_shared *shrd)
-{
- if (iwl_is_rfkill(shrd))
- return 0;
-
- return iwl_is_ready(shrd);
-}
-
#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 23eea06..b06c676 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -70,7 +70,6 @@
#include <net/mac80211.h>
#include <net/netlink.h>
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
@@ -79,6 +78,7 @@
#include "iwl-testmode.h"
#include "iwl-trans.h"
#include "iwl-fh.h"
+#include "iwl-prph.h"
/* Periphery registers absolute lower bound. This is used in order to
@@ -125,13 +125,15 @@
[IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
};
/*
* See the struct iwl_rx_packet in iwl-commands.h for the format of the
* received events from the device
*/
-static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
+static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (pkt)
@@ -162,7 +164,7 @@
*/
static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb;
@@ -183,7 +185,8 @@
return;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
- NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data);
+ /* the length doesn't include len_n_flags field, so add it manually */
+ NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data);
cfg80211_testmode_event(skb, GFP_ATOMIC);
return;
@@ -194,7 +197,7 @@
void iwl_testmode_init(struct iwl_priv *priv)
{
- priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+ priv->pre_rx_handler = NULL;
priv->testmode_trace.trace_enabled = false;
priv->testmode_mem.read_in_progress = false;
}
@@ -283,7 +286,7 @@
IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret) {
IWL_ERR(priv, "Failed to send hcmd\n");
return ret;
@@ -292,7 +295,7 @@
return ret;
/* Handling return of SKB to the user */
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ pkt = cmd.resp_pkt;
if (!pkt) {
IWL_ERR(priv, "HCMD received a null response packet\n");
return ret;
@@ -309,7 +312,7 @@
/* The reply is in a page, that we cannot send to user space. */
memcpy(reply_buf, &(pkt->hdr), reply_len);
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf);
@@ -419,23 +422,23 @@
struct iwl_notification_wait calib_wait;
int ret;
- iwl_init_notification_wait(priv->shrd, &calib_wait,
- CALIBRATION_COMPLETE_NOTIFICATION,
- NULL, NULL);
- ret = iwl_init_alive_start(trans(priv));
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ CALIBRATION_COMPLETE_NOTIFICATION,
+ NULL, NULL);
+ ret = iwl_init_alive_start(priv);
if (ret) {
IWL_ERR(priv, "Fail init calibration: %d\n", ret);
goto cfg_init_calib_error;
}
- ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
if (ret)
IWL_ERR(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
- iwl_remove_notification(priv->shrd, &calib_wait);
+ iwl_remove_notification(&priv->notif_wait, &calib_wait);
return ret;
}
@@ -484,7 +487,7 @@
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (status)
IWL_ERR(priv, "Error loading init ucode: %d\n", status);
break;
@@ -495,7 +498,7 @@
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (status) {
IWL_ERR(priv,
"Error loading runtime ucode: %d\n", status);
@@ -510,7 +513,7 @@
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
iwl_scan_cancel_timeout(priv, 200);
iwl_trans_stop_device(trans);
- status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
if (status) {
IWL_ERR(priv,
"Error loading WOWLAN ucode: %d\n", status);
@@ -553,7 +556,7 @@
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
IWL_INFO(priv, "uCode version raw: 0x%x\n",
- nic(priv)->fw.ucode_ver);
+ priv->fw->ucode_ver);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
@@ -561,7 +564,7 @@
return -ENOMEM;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION,
- nic(priv)->fw.ucode_ver);
+ priv->fw->ucode_ver);
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -590,16 +593,16 @@
}
switch (priv->shrd->ucode_type) {
case IWL_UCODE_REGULAR:
- inst_size = nic(priv)->fw.ucode_rt.code.len;
- data_size = nic(priv)->fw.ucode_rt.data.len;
+ inst_size = priv->fw->ucode_rt.code.len;
+ data_size = priv->fw->ucode_rt.data.len;
break;
case IWL_UCODE_INIT:
- inst_size = nic(priv)->fw.ucode_init.code.len;
- data_size = nic(priv)->fw.ucode_init.data.len;
+ inst_size = priv->fw->ucode_init.code.len;
+ data_size = priv->fw->ucode_init.data.len;
break;
case IWL_UCODE_WOWLAN:
- inst_size = nic(priv)->fw.ucode_wowlan.code.len;
- data_size = nic(priv)->fw.ucode_wowlan.data.len;
+ inst_size = priv->fw->ucode_wowlan.code.len;
+ data_size = priv->fw->ucode_wowlan.data.len;
break;
case IWL_UCODE_NONE:
IWL_ERR(priv, "No uCode has not been loaded\n");
@@ -713,7 +716,7 @@
return -EMSGSIZE;
}
-static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -770,9 +773,13 @@
}
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
- if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
- priv->shrd->ucode_owner = owner;
- else {
+ if (owner == IWL_OWNERSHIP_DRIVER) {
+ priv->ucode_owner = owner;
+ priv->pre_rx_handler = NULL;
+ } else if (owner == IWL_OWNERSHIP_TM) {
+ priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+ priv->ucode_owner = owner;
+ } else {
IWL_ERR(priv, "Invalid owner\n");
return -EINVAL;
}
@@ -905,9 +912,9 @@
}
}
-static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw, struct nlattr **tb,
- struct sk_buff *skb,
- struct netlink_callback *cb)
+static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int idx, length;
@@ -937,6 +944,20 @@
return -ENOBUFS;
}
+static int iwl_testmode_notifications(struct ieee80211_hw *hw,
+ struct nlattr **tb)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ bool enable;
+
+ enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
+ if (enable)
+ priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+ else
+ priv->pre_rx_handler = NULL;
+ return 0;
+}
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -976,7 +997,7 @@
return -ENOMSG;
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
@@ -1022,13 +1043,19 @@
result = iwl_testmode_indirect_mem(hw, tb);
break;
+ case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ IWL_DEBUG_INFO(priv, "testmode notifications cmd "
+ "to driver\n");
+ result = iwl_testmode_notifications(hw, tb);
+ break;
+
default:
IWL_ERR(priv, "Unknown testmode command\n");
result = -ENOSYS;
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return result;
}
@@ -1063,21 +1090,21 @@
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
- result = iwl_testmode_trace_dump(hw, tb, skb, cb);
+ result = iwl_testmode_trace_dump(hw, skb, cb);
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
- result = iwl_testmode_buffer_dump(hw, tb, skb, cb);
+ result = iwl_testmode_buffer_dump(hw, skb, cb);
break;
default:
result = -EINVAL;
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return result;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 69b2e80..6ba211b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -122,6 +122,9 @@
* Fore reading, a READ command is sent from the userspace and the data
* is returned when the user calls a DUMP command.
* For writing, only a WRITE command is used.
+ * @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ * Command to enable/disable notifications (currently RX packets) from the
+ * driver to userspace.
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
@@ -152,7 +155,8 @@
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
- IWL_TM_CMD_MAX = 29,
+ IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
+ IWL_TM_CMD_MAX = 30,
};
/*
@@ -256,6 +260,10 @@
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
* indicates that the user wants to receive the response of the command
* in a reply SKB. If it's not present, the response is not returned.
+ * @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
+ * flag enables (if present) or disables (if not) the forwarding
+ * to userspace.
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -282,7 +290,8 @@
IWL_TM_ATTR_FW_INST_SIZE = 21,
IWL_TM_ATTR_FW_DATA_SIZE = 22,
IWL_TM_ATTR_UCODE_CMD_SKB = 23,
- IWL_TM_ATTR_MAX = 24,
+ IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
+ IWL_TM_ATTR_MAX = 25,
};
/* uCode trace buffer */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 5b26b71..6796559 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
+#include <linux/wait.h>
#include <linux/pci.h>
#include "iwl-fh.h"
@@ -49,6 +50,12 @@
/*This file includes the declaration that are internal to the
* trans_pcie layer */
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
/**
* struct isr_statistics - interrupt statistics
*
@@ -109,6 +116,26 @@
size_t size;
};
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_inc_wrap(int index, int n_bd)
+{
+ return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_dec_wrap(int index, int n_bd)
+{
+ return --index & (n_bd - 1);
+}
+
/*
* This queue number is required for proper operation
* because the ucode will stop/start the scheduler as
@@ -169,6 +196,7 @@
* @meta: array of meta data for each command/tx buffer
* @dma_addr_cmd: physical address of cmd/tx buffer array
* @txb: array of per-TFD driver data
+ * lock: queue lock
* @time_stamp: time (in jiffies) of last read_ptr change
* @need_update: indicates need to update read/write index
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
@@ -187,6 +215,7 @@
struct iwl_device_cmd **cmd;
struct iwl_cmd_meta *meta;
struct sk_buff **skbs;
+ spinlock_t lock;
unsigned long time_stamp;
u8 need_update;
u8 sched_retry;
@@ -202,6 +231,7 @@
* @rxq: all the RX queue data
* @rx_replenish: work that will be called when buffers need to be allocated
* @trans: pointer to the generic transport area
+ * @irq - the irq number for the device
* @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
@@ -215,6 +245,10 @@
* queue_stop_count: tracks what SW queue is stopped
* @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_write_waitq: wait queue for uCode load
+ * @status - transport specific status flags
+ * @cmd_queue - command queue number
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
@@ -231,6 +265,7 @@
struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
+ unsigned int irq;
spinlock_t irq_lock;
u32 inta_mask;
u32 scd_base_addr;
@@ -251,6 +286,11 @@
/* PCI bus related data */
struct pci_dev *pci_dev;
void __iomem *hw_base;
+
+ bool ucode_write_complete;
+ wait_queue_head_t ucode_write_waitq;
+ unsigned long status;
+ u8 cmd_queue;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -285,7 +325,7 @@
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_tx_cmd_complete(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb, int handler_status);
+ struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
@@ -318,7 +358,8 @@
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -332,14 +373,19 @@
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+ set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
+static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+{
+ IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
+ iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
/*
* we have 8 bits used like this:
*
@@ -365,7 +411,7 @@
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, const char *msg)
+ struct iwl_tx_queue *txq)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -376,19 +422,19 @@
if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
iwl_op_mode_queue_not_full(trans->op_mode, ac);
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
- hwq, ac, msg);
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
+ hwq, ac);
} else {
- IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
- " stop count %d. %s",
- hwq, ac, atomic_read(&trans_pcie->
- queue_stop_count[ac]), msg);
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Don't wake hwq %d ac %d stop count %d",
+ hwq, ac,
+ atomic_read(&trans_pcie->queue_stop_count[ac]));
}
}
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, const char *msg)
+ struct iwl_tx_queue *txq)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -399,34 +445,22 @@
if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
iwl_op_mode_queue_full(trans->op_mode, ac);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
- " stop count %d. %s",
- hwq, ac, atomic_read(&trans_pcie->
- queue_stop_count[ac]), msg);
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Stop hwq %d ac %d stop count %d",
+ hwq, ac,
+ atomic_read(&trans_pcie->queue_stop_count[ac]));
} else {
- IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
- " stop count %d. %s",
- hwq, ac, atomic_read(&trans_pcie->
- queue_stop_count[ac]), msg);
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Don't stop hwq %d ac %d stop count %d",
+ hwq, ac,
+ atomic_read(&trans_pcie->queue_stop_count[ac]));
}
} else {
- IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
- hwq, msg);
+ IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
+ hwq);
}
}
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
int txq_id)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 2c910fd..9bc3c73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -30,11 +30,9 @@
#include <linux/wait.h>
#include <linux/gfp.h>
-/*TODO: Remove include to iwl-core.h*/
-#include "iwl-core.h"
+#include "iwl-prph.h"
#include "iwl-io.h"
#include "iwl-trans-pcie-int.h"
-#include "iwl-wifi.h"
#include "iwl-op-mode.h"
#ifdef CONFIG_IWLWIFI_IDI
@@ -142,7 +140,7 @@
if (q->need_update == 0)
goto exit_unlock;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* shadow register enabled */
/* Device expects a multiple of 8 */
q->write_actual = (q->write & ~0x7);
@@ -358,6 +356,113 @@
iwlagn_rx_replenish(trans_pcie->trans);
}
+static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_device_cmd *cmd;
+ unsigned long flags;
+ int len, err;
+ u16 sequence;
+ struct iwl_rx_cmd_buffer rxcb;
+ struct iwl_rx_packet *pkt;
+ bool reclaim;
+ int index, cmd_index;
+
+ if (WARN_ON(!rxb))
+ return;
+
+ dma_unmap_page(trans->dev, rxb->page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+
+ rxcb._page = rxb->page;
+ pkt = rxb_addr(&rxcb);
+
+ IWL_DEBUG_RX(trans, "%s, 0x%02x\n",
+ get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+ (pkt->hdr.cmd != REPLY_RX) &&
+ (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+ (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+ (pkt->hdr.cmd != REPLY_TX);
+
+ sequence = le16_to_cpu(pkt->hdr.sequence);
+ index = SEQ_TO_INDEX(sequence);
+ cmd_index = get_cmd_index(&txq->q, index);
+
+ if (reclaim)
+ cmd = txq->cmd[cmd_index];
+ else
+ cmd = NULL;
+
+ /* warn if this is cmd response / notification and the uCode
+ * didn't set the SEQ_RX_FRAME for a frame that is
+ * uCode-originated
+ * If you saw this code after the second half of 2012, then
+ * please remove it
+ */
+ WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
+ (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
+ "reclaim is false, SEQ_RX_FRAME unset: %s\n",
+ get_cmd_string(pkt->hdr.cmd));
+
+ err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+
+ /*
+ * XXX: After here, we should always check rxcb._page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * iwl_trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxcb._page)
+ iwl_tx_cmd_complete(trans, &rxcb, err);
+ else
+ IWL_WARN(trans, "Claim null rxb?\n");
+ }
+
+ /* page was stolen from us */
+ if (rxcb._page == NULL)
+ rxb->page = NULL;
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ dma_map_page(trans->dev, rxb->page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
/**
* iwl_rx_handle - Main entry function for receiving responses from uCode
*
@@ -367,20 +472,12 @@
*/
static void iwl_rx_handle(struct iwl_trans *trans)
{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
- struct iwl_device_cmd *cmd;
u32 r, i;
- int reclaim;
- unsigned long flags;
u8 fill_rx = 0;
u32 count = 8;
int total_empty;
- int index, cmd_index;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
@@ -400,102 +497,14 @@
fill_rx = 1;
while (i != r) {
- int len, err;
- u16 sequence;
+ struct iwl_rx_mem_buffer *rxb;
rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- if (WARN_ON(rxb == NULL)) {
- i = (i + 1) & RX_QUEUE_MASK;
- continue;
- }
-
rxq->queue[i] = NULL;
- dma_unmap_page(trans->dev, rxb->page_dma,
- PAGE_SIZE << hw_params(trans).rx_page_order,
- DMA_FROM_DEVICE);
- pkt = rxb_addr(rxb);
+ IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb);
- IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv(trans), pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- sequence = le16_to_cpu(pkt->hdr.sequence);
- index = SEQ_TO_INDEX(sequence);
- cmd_index = get_cmd_index(&txq->q, index);
-
- if (reclaim)
- cmd = txq->cmd[cmd_index];
- else
- cmd = NULL;
-
- /* warn if this is cmd response / notification and the uCode
- * didn't set the SEQ_RX_FRAME for a frame that is
- * uCode-originated
- * If you saw this code after the second half of 2012, then
- * please remove it
- */
- WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
- (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
- "reclaim is false, SEQ_RX_FRAME unset: %s\n",
- get_cmd_string(pkt->hdr.cmd));
-
- err = iwl_op_mode_rx(trans->op_mode, rxb, cmd);
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking
- * iwl_trans_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_tx_cmd_complete(trans, rxb, err);
- else
- IWL_WARN(trans, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = dma_map_page(trans->dev, rxb->page,
- 0, PAGE_SIZE <<
- hw_params(trans).rx_page_order,
- DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_handle_rxbuf(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
@@ -591,17 +600,16 @@
{
u32 base;
struct iwl_error_event_table table;
- struct iwl_nic *nic = nic(trans);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
base = trans->shrd->device_pointers.error_event_table;
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
- base = nic->init_errlog_ptr;
+ base = trans->shrd->fw->init_errlog_ptr;
} else {
if (!base)
- base = nic->inst_errlog_ptr;
+ base = trans->shrd->fw->inst_errlog_ptr;
}
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
@@ -623,7 +631,7 @@
trans_pcie->isr_stats.err_code = table.error_id;
- trace_iwlwifi_dev_ucode_error(priv(nic), table.error_id, table.tsf_low,
+ trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.line,
table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1,
@@ -689,7 +697,7 @@
}
IWL_ERR(trans, "Loaded firmware version: %s\n",
- nic(trans)->fw.fw_version);
+ trans->shrd->fw->fw_version);
iwl_dump_nic_error_log(trans);
iwl_dump_csr(trans);
@@ -715,7 +723,6 @@
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
unsigned long reg_flags;
- struct iwl_nic *nic = nic(trans);
if (num_events == 0)
return pos;
@@ -723,10 +730,10 @@
base = trans->shrd->device_pointers.log_event_table;
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
- base = nic->init_evtlog_ptr;
+ base = trans->shrd->fw->init_evtlog_ptr;
} else {
if (!base)
- base = nic->inst_evtlog_ptr;
+ base = trans->shrd->fw->inst_evtlog_ptr;
}
if (mode == 0)
@@ -738,11 +745,11 @@
/* Make sure device is powered up for SRAM reads */
spin_lock_irqsave(&trans->reg_lock, reg_flags);
- iwl_grab_nic_access(trans);
+ if (unlikely(!iwl_grab_nic_access(trans)))
+ goto out_unlock;
/* Set starting address; reads will auto-increment */
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
- rmb();
/* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing. */
@@ -756,7 +763,7 @@
"EVT_LOG:0x%08x:%04u\n",
time, ev);
} else {
- trace_iwlwifi_dev_ucode_event(priv(trans), 0,
+ trace_iwlwifi_dev_ucode_event(trans->dev, 0,
time, ev);
IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
time, ev);
@@ -770,7 +777,7 @@
} else {
IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv(trans), time,
+ trace_iwlwifi_dev_ucode_event(trans->dev, time,
data, ev);
}
}
@@ -778,6 +785,7 @@
/* Allow device to power down */
iwl_release_nic_access(trans);
+out_unlock:
spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
return pos;
}
@@ -832,17 +840,16 @@
u32 logsize;
int pos = 0;
size_t bufsz = 0;
- struct iwl_nic *nic = nic(trans);
base = trans->shrd->device_pointers.log_event_table;
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
- logsize = nic->init_evtlog_size;
+ logsize = trans->shrd->fw->init_evtlog_size;
if (!base)
- base = nic->init_evtlog_ptr;
+ base = trans->shrd->fw->init_evtlog_ptr;
} else {
- logsize = nic->inst_evtlog_size;
+ logsize = trans->shrd->fw->inst_evtlog_size;
if (!base)
- base = nic->inst_evtlog_ptr;
+ base = trans->shrd->fw->inst_evtlog_ptr;
}
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
@@ -881,7 +888,7 @@
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
+ if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -901,7 +908,7 @@
if (!*buf)
return -ENOMEM;
}
- if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
+ if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
* start at the oldest entry,
@@ -960,7 +967,7 @@
inta = trans_pcie->inta;
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
/* just for debug */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
@@ -989,7 +996,7 @@
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
@@ -1009,30 +1016,16 @@
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(trans, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
+ bool hw_rfkill;
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
+ hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW,
- &trans->shrd->status);
- else
- clear_bit(STATUS_RF_KILL_HW,
- &trans->shrd->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rf_kill);
- }
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -1057,7 +1050,7 @@
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < hw_params(trans).max_txq_num; i++)
+ for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++)
iwl_txq_update_write_ptr(trans,
&trans_pcie->txq[i]);
@@ -1122,8 +1115,8 @@
isr_stats->tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
- trans->ucode_write_complete = 1;
- wake_up(&trans->shrd->wait_command_queue);
+ trans_pcie->ucode_write_complete = true;
+ wake_up(&trans_pcie->ucode_write_waitq);
}
if (inta & ~handled) {
@@ -1138,13 +1131,11 @@
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
+ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL) {
- IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
- }
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
}
/******************************************************************************
@@ -1269,7 +1260,7 @@
if (!trans)
return IRQ_NONE;
- trace_iwlwifi_dev_irq(priv(trans));
+ trace_iwlwifi_dev_irq(trans->dev);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1301,7 +1292,7 @@
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
"fh 0x%08x\n", inta, inta_mask, inta_fh);
@@ -1312,7 +1303,7 @@
/* iwl_irq_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
@@ -1323,7 +1314,7 @@
none:
/* re-enable interrupts here since we don't have anything to service. */
/* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
@@ -1359,7 +1350,7 @@
if (!trans_pcie->use_ict)
return iwl_isr(irq, data);
- trace_iwlwifi_dev_irq(priv(trans));
+ trace_iwlwifi_dev_irq(trans->dev);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1376,7 +1367,7 @@
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
if (!read) {
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
goto none;
@@ -1395,7 +1386,7 @@
iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index,
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
read);
} while (read);
@@ -1423,7 +1414,7 @@
/* iwl_irq_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
@@ -1439,7 +1430,7 @@
/* re-enable interrupts here since we don't have anything to service.
* only Re-enable if disabled by irq.
*/
- if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 4e1e74e..eb430b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -41,6 +41,43 @@
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific code), the AC->hw
+ * queue mapping is the identity mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+
/**
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
@@ -99,7 +136,7 @@
if (txq->need_update == 0)
return;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* shadow register enabled */
iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
@@ -217,6 +254,8 @@
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ lockdep_assert_held(&txq->lock);
+
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
/* free SKB */
@@ -358,7 +397,7 @@
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans->shrd->cmd_queue)
+ if (txq_id != trans_pcie->cmd_queue)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -440,6 +479,15 @@
scd_retry ? "BA" : "AC/CMD", txq_id);
}
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
u8 ctx, u16 tid)
{
@@ -547,7 +595,8 @@
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++)
if (!test_and_set_bit(txq_id,
&trans_pcie->txq_ctx_active_msk))
return txq_id;
@@ -616,15 +665,13 @@
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
- unsigned long flags;
u32 idx;
u16 copy_size, cmd_size;
- bool is_ct_kill = false;
bool had_nocopy = false;
int i;
u8 *cmd_dest;
@@ -639,12 +686,6 @@
return -EIO;
}
- if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
- !(cmd->flags & CMD_ON_DEMAND)) {
- IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
- return -EIO;
- }
-
copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr);
@@ -674,23 +715,13 @@
if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
return -EINVAL;
- if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
- IWL_WARN(trans, "Not sending command - %s KILL\n",
- iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&trans->hcmd_lock, flags);
+ spin_lock_bh(&txq->lock);
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
- is_ct_kill = iwl_check_for_ct_kill(priv(trans));
- if (!is_ct_kill) {
- IWL_ERR(trans, "Restarting adapter queue is full\n");
- iwl_op_mode_nic_error(trans->op_mode);
- }
+ iwl_op_mode_cmd_queue_full(trans->op_mode);
return -ENOSPC;
}
@@ -707,7 +738,7 @@
out_cmd->hdr.cmd = cmd->id;
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
INDEX_TO_SEQ(q->write_ptr));
/* and copy the data that needs to be copied */
@@ -727,7 +758,7 @@
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, trans->shrd->cmd_queue);
+ q->write_ptr, idx, trans_pcie->cmd_queue);
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
DMA_BIDIRECTIONAL);
@@ -779,7 +810,7 @@
/* check that tracing gets all possible blocks */
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
trace_bufs[0], trace_lens[0],
trace_bufs[1], trace_lens[1],
trace_bufs[2], trace_lens[2]);
@@ -790,7 +821,7 @@
iwl_txq_update_write_ptr(trans, txq);
out:
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock_bh(&txq->lock);
return idx;
}
@@ -809,6 +840,8 @@
struct iwl_queue *q = &txq->q;
int nfreed = 0;
+ lockdep_assert_held(&txq->lock);
+
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
"index %d is out of range [0-%d] %d %d.\n", __func__,
@@ -838,7 +871,7 @@
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
-void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
+void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
int handler_status)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -849,21 +882,22 @@
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
- unsigned long flags;
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans->shrd->cmd_queue,
+ if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->shrd->cmd_queue, sequence,
- trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
+ spin_lock(&txq->lock);
+
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
@@ -875,12 +909,13 @@
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
- meta->source->handler_status = handler_status;
- rxb->page = NULL;
- }
+ struct page *p = rxb_steal_page(rxb);
- spin_lock_irqsave(&trans->hcmd_lock, flags);
+ meta->source->resp_pkt = pkt;
+ meta->source->_rx_page_addr = (unsigned long)page_address(p);
+ meta->source->_rx_page_order = hw_params(trans).rx_page_order;
+ meta->source->handler_status = handler_status;
+ }
iwl_hcmd_queue_reclaim(trans, txq_id, index);
@@ -898,7 +933,7 @@
meta->flags = 0;
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
@@ -912,12 +947,9 @@
return -EINVAL;
- if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
- return -EBUSY;
-
ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
@@ -931,26 +963,22 @@
int cmd_idx;
int ret;
- lockdep_assert_held(&trans->shrd->mutex);
-
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
- if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
- return -EBUSY;
-
-
- if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- return -ECANCELED;
- }
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
IWL_ERR(trans, "Command %s failed: FW Error\n",
get_cmd_string(cmd->id));
return -EIO;
}
- set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+
+ if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
+ &trans->shrd->status))) {
+ IWL_ERR(trans, "Command %s: a command is already active!\n",
+ get_cmd_string(cmd->id));
+ return -EIO;
+ }
+
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
@@ -958,7 +986,7 @@
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
@@ -970,15 +998,15 @@
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
struct iwl_tx_queue *txq =
- &trans_pcie->txq[trans->shrd->cmd_queue];
+ &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Current CMD queue read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
@@ -990,7 +1018,7 @@
}
}
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
ret = -EIO;
@@ -1007,13 +1035,13 @@
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
+ trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
- if (cmd->reply_page) {
- iwl_free_pages(trans->shrd, cmd->reply_page);
- cmd->reply_page = 0;
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
}
return ret;
@@ -1038,9 +1066,11 @@
int freed = 0;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->shrd->cmd_queue))
+ if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return 0;
+ lockdep_assert_held(&txq->lock);
+
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 9f8b239..91628565 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -75,8 +75,12 @@
#include "iwl-shared.h"
#include "iwl-eeprom.h"
#include "iwl-agn-hw.h"
-#include "iwl-core.h"
-#include "iwl-ucode.h"
+
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
+ (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+ (~(1<<(trans_pcie)->cmd_queue)))
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
@@ -301,6 +305,7 @@
{
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
return -EINVAL;
@@ -313,7 +318,7 @@
if (!txq->meta || !txq->cmd)
goto error;
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++) {
txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
GFP_KERNEL);
@@ -324,7 +329,7 @@
/* Alloc driver data array and TFD circular buffer */
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
- if (txq_id != trans->shrd->cmd_queue) {
+ if (txq_id != trans_pcie->cmd_queue) {
txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
GFP_KERNEL);
if (!txq->skbs) {
@@ -352,7 +357,7 @@
txq->skbs = NULL;
/* since txq->cmd has been zeroed,
* all non allocated cmd[i] will be NULL */
- if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+ if (txq->cmd && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
kfree(txq->cmd[i]);
kfree(txq->meta);
@@ -390,6 +395,8 @@
if (ret)
return ret;
+ spin_lock_init(&txq->lock);
+
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
@@ -409,8 +416,6 @@
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
enum dma_data_direction dma_dir;
- unsigned long flags;
- spinlock_t *lock;
if (!q->n_bd)
return;
@@ -418,22 +423,19 @@
/* In the command queue, all the TBs are mapped as BIDI
* so unmap them as such.
*/
- if (txq_id == trans->shrd->cmd_queue) {
+ if (txq_id == trans_pcie->cmd_queue)
dma_dir = DMA_BIDIRECTIONAL;
- lock = &trans->hcmd_lock;
- } else {
+ else
dma_dir = DMA_TO_DEVICE;
- lock = &trans->shrd->sta_lock;
- }
- spin_lock_irqsave(lock, flags);
+ spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
/* The read_ptr needs to bound by q->n_window */
iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock_bh(&txq->lock);
}
/**
@@ -457,7 +459,7 @@
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++)
kfree(txq->cmd[i]);
@@ -495,7 +497,7 @@
/* Tx queues */
if (trans_pcie->txq) {
for (txq_id = 0;
- txq_id < hw_params(trans).max_txq_num; txq_id++)
+ txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
iwl_tx_queue_free(trans, txq_id);
}
@@ -520,7 +522,7 @@
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+ u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
@@ -544,7 +546,7 @@
goto error;
}
- trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
+ trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
sizeof(struct iwl_tx_queue), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
@@ -553,8 +555,9 @@
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
@@ -598,8 +601,9 @@
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
@@ -687,6 +691,7 @@
*/
static int iwl_apm_init(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
@@ -756,7 +761,7 @@
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- set_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status);
+ set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
out:
return ret;
@@ -782,9 +787,10 @@
static void iwl_apm_stop(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
/* Stop device's DMA activity */
iwl_apm_stop_master(trans);
@@ -819,7 +825,7 @@
iwl_set_pwr_vmain(trans);
- iwl_nic_config(priv(trans));
+ iwl_op_mode_nic_config(trans->op_mode);
#ifndef CONFIG_IWLWIFI_IDI
/* Allocate the RX queue, or reset if it is already allocated */
@@ -830,14 +836,12 @@
if (iwl_tx_init(trans))
return -ENOMEM;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
0x800FFFFF);
}
- set_bit(STATUS_INIT, &trans->shrd->status);
-
return 0;
}
@@ -948,13 +952,14 @@
* ucode
*/
static int iwl_load_section(struct iwl_trans *trans, const char *name,
- struct fw_desc *image, u32 dst_addr)
+ const struct fw_desc *image, u32 dst_addr)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
- trans->ucode_write_complete = 0;
+ trans_pcie->ucode_write_complete = false;
iwl_write_direct32(trans,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -985,8 +990,8 @@
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(trans, "%s uCode section being loaded...\n", name);
- ret = wait_event_timeout(trans->shrd->wait_command_queue,
- trans->ucode_write_complete, 5 * HZ);
+ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+ trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Could not load the %s uCode section\n",
name);
@@ -996,7 +1001,8 @@
return 0;
}
-static int iwl_load_given_ucode(struct iwl_trans *trans, struct fw_img *image)
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
{
int ret = 0;
@@ -1016,13 +1022,14 @@
return 0;
}
-static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, struct fw_img *fw)
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw)
{
int ret;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
- trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
@@ -1032,22 +1039,19 @@
trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
- if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
- iwl_prepare_card_hw(trans)) {
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(trans, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
- else
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (iwl_is_rfkill(trans->shrd)) {
- iwl_op_mode_hw_rf_kill(trans->op_mode, true);
- iwl_enable_interrupts(trans);
+ if (hw_rfkill) {
+ iwl_enable_rfkill_int(trans);
return -ERFKILL;
}
@@ -1073,9 +1077,7 @@
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- iwl_load_given_ucode(trans, fw);
-
- return 0;
+ return iwl_load_given_ucode(trans, fw);
}
/*
@@ -1116,7 +1118,8 @@
a += 4)
iwl_write_targ_mem(trans, a, 0);
for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
+ SCD_TRANS_TBL_OFFSET_QUEUE(
+ cfg(trans)->base_params->num_of_queues);
a += 4)
iwl_write_targ_mem(trans, a, 0);
@@ -1135,11 +1138,11 @@
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans));
+ SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
iwl_write_prph(trans, SCD_AGGR_SEL, 0);
/* initiate the queues */
- for (i = 0; i < hw_params(trans).max_txq_num; i++) {
+ for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@@ -1156,7 +1159,7 @@
}
iwl_write_prph(trans, SCD_INTERRUPT_MASK,
- IWL_MASK(0, hw_params(trans).max_txq_num));
+ IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -1167,7 +1170,7 @@
else
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+ iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
/* make sure all queue are not stopped */
memset(&trans_pcie->queue_stopped[0], 0,
@@ -1216,7 +1219,7 @@
*/
static int iwl_trans_tx_stop(struct iwl_trans *trans)
{
- int ch, txq_id;
+ int ch, txq_id, ret;
unsigned long flags;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1229,9 +1232,10 @@
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
iwl_write_direct32(trans,
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+ ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
+ 1000);
+ if (ret < 0)
IWL_ERR(trans, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
iwl_read_direct32(trans,
@@ -1245,7 +1249,8 @@
}
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++)
iwl_tx_queue_unmap(trans, txq_id);
return 0;
@@ -1271,7 +1276,7 @@
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
iwl_trans_tx_stop(trans);
#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_rx_stop(trans);
@@ -1297,7 +1302,7 @@
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* wait to make sure we flush pending tasklet*/
- synchronize_irq(trans->irq);
+ synchronize_irq(trans_pcie->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
cancel_work_sync(&trans_pcie->rx_replenish);
@@ -1306,6 +1311,17 @@
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
}
+static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+{
+ /* let the ucode operate on its own */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ iwl_disable_interrupts(trans);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid)
@@ -1358,6 +1374,8 @@
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ spin_lock(&txq->lock);
+
/* In AGG mode, the index in the ring must correspond to the WiFi
* sequence number. This is a HW requirements to help the SCD to parse
* the BA.
@@ -1404,7 +1422,7 @@
&dev_cmd->hdr, firstlen,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
- return -1;
+ goto out_err;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
@@ -1426,7 +1444,7 @@
dma_unmap_addr(out_meta, mapping),
dma_unmap_len(out_meta, len),
DMA_BIDIRECTIONAL);
- return -1;
+ goto out_err;
}
}
@@ -1457,7 +1475,7 @@
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
- trace_iwlwifi_dev_tx(priv(trans),
+ trace_iwlwifi_dev_tx(trans->dev,
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
@@ -1478,10 +1496,14 @@
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(trans, txq, "Queue is full");
+ iwl_stop_queue(trans, txq);
}
}
+ spin_unlock(&txq->lock);
return 0;
+ out_err:
+ spin_unlock(&txq->lock);
+ return -1;
}
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
@@ -1489,6 +1511,7 @@
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
+ bool hw_rfkill;
trans_pcie->inta_mask = CSR_INI_SET_MASK;
@@ -1498,11 +1521,11 @@
iwl_alloc_isr_ict(trans);
- err = request_irq(trans->irq, iwl_isr_ict, IRQF_SHARED,
+ err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n",
- trans->irq);
+ trans_pcie->irq);
goto error;
}
@@ -1518,21 +1541,14 @@
iwl_apm_init(trans);
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(trans,
- CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
- else
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
-
- iwl_op_mode_hw_rf_kill(trans->op_mode,
- test_bit(STATUS_RF_KILL_HW,
- &trans->shrd->status));
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return err;
err_free_irq:
- free_irq(trans->irq, trans);
+ free_irq(trans_pcie->irq, trans);
error:
iwl_free_isr_ict(trans);
tasklet_kill(&trans_pcie->irq_tasklet);
@@ -1546,13 +1562,11 @@
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
/* Even if we stop the HW, we still want the RF kill interrupt */
- IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+ iwl_enable_rfkill_int(trans);
}
static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
- int txq_id, int ssn, u32 status,
- struct sk_buff_head *skbs)
+ int txq_id, int ssn, struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1560,6 +1574,8 @@
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
+ spin_lock(&txq->lock);
+
txq->time_stamp = jiffies;
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
@@ -1574,6 +1590,7 @@
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
"agg_txq[sta_id[tid] %d", txq_id,
trans_pcie->agg_txq[sta_id][tid]);
+ spin_unlock(&txq->lock);
return 1;
}
@@ -1582,28 +1599,35 @@
txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
- (!txq->sched_retry ||
- status != TX_STATUS_FAIL_PASSIVE_NO_RX))
- iwl_wake_queue(trans, txq, "Packets reclaimed");
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
}
+
+ spin_unlock(&txq->lock);
return 0;
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
- iowrite8(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
- iowrite32(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
- u32 val = ioread32(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
- return val;
+ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1611,18 +1635,17 @@
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_calib_free_results(trans);
iwl_trans_pcie_tx_free(trans);
#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_pcie_rx_free(trans);
#endif
if (trans_pcie->irq_requested == true) {
- free_irq(trans->irq, trans);
+ free_irq(trans_pcie->irq, trans);
iwl_free_isr_ict(trans);
}
pci_disable_msi(trans_pcie->pci_dev);
- pci_iounmap(trans_pcie->pci_dev, trans_pcie->hw_base);
+ iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
@@ -1633,42 +1656,20 @@
#ifdef CONFIG_PM_SLEEP
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend
- * function first but since iwlagn_mac_stop() has no knowledge of
- * who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- *
- * But of course ... if we have configured WoWLAN then we did other
- * things already :-)
- */
- if (!trans->shrd->wowlan) {
- iwl_apm_stop(trans);
- } else {
- iwl_disable_interrupts(trans);
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- }
-
return 0;
}
static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
- bool hw_rfkill = false;
+ bool hw_rfkill;
- iwl_enable_interrupts(trans);
-
- if (!(iwl_read32(trans, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ iwl_enable_rfkill_int(trans);
else
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ iwl_enable_interrupts(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
@@ -1676,32 +1677,6 @@
}
#endif /* CONFIG_PM_SLEEP */
-static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg)
-{
- u8 ac, txq_id;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- for (ac = 0; ac < AC_NUM; ac++) {
- txq_id = trans_pcie->ac_to_queue[ctx][ac];
- IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
- ac,
- (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
- ? "stopped" : "awake");
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
- }
-}
-
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
- const char *msg)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
-}
-
#define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -1714,8 +1689,8 @@
int ret = 0;
/* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
- if (cnt == trans->shrd->cmd_queue)
+ for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+ if (cnt == trans_pcie->cmd_queue)
continue;
txq = &trans_pcie->txq[cnt];
q = &txq->q;
@@ -1960,7 +1935,9 @@
int pos = 0;
int cnt;
int ret;
- const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
+ size_t bufsz;
+
+ bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
if (!trans_pcie->txq) {
IWL_ERR(trans, "txq not ready\n");
@@ -1970,7 +1947,7 @@
if (!buf)
return -ENOMEM;
- for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
@@ -2219,7 +2196,7 @@
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .wake_any_queue = iwl_trans_pcie_wake_any_queue,
+ .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
.send_cmd = iwl_trans_pcie_send_cmd,
@@ -2231,7 +2208,6 @@
.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
.free = iwl_trans_pcie_free,
- .stop_queue = iwl_trans_pcie_stop_queue,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
@@ -2245,6 +2221,7 @@
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
+ .configure = iwl_trans_pcie_configure,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
@@ -2267,8 +2244,8 @@
trans->ops = &trans_ops_pcie;
trans->shrd = shrd;
trans_pcie->trans = trans;
- spin_lock_init(&trans->hcmd_lock);
spin_lock_init(&trans_pcie->irq_lock);
+ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
/* W/A - seems to solve weird behavior. We need to remove this if we
* don't want to stay in L1 all the time. This wastes a lot of power */
@@ -2304,9 +2281,9 @@
goto out_pci_disable_device;
}
- trans_pcie->hw_base = pci_iomap(pdev, 0, 0);
+ trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_iomap failed");
+ dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
err = -ENODEV;
goto out_pci_release_regions;
}
@@ -2330,7 +2307,7 @@
"pci_enable_msi failed(0X%x)", err);
trans->dev = &pdev->dev;
- trans->irq = pdev->irq;
+ trans_pcie->irq = pdev->irq;
trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
deleted file mode 100644
index 506c062..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include "iwl-trans.h"
-
-int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
- u32 flags, u16 len, const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = { len, },
- .data = { data, },
- .flags = flags,
- };
-
- return iwl_trans_send_cmd(trans, &cmd);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 4e7e6c0..7d1990c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -64,6 +64,7 @@
#define __iwl_trans_h__
#include <linux/ieee80211.h>
+#include <linux/mm.h> /* for page_address */
#include "iwl-shared.h"
#include "iwl-debug.h"
@@ -121,6 +122,62 @@
#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s) ((s) & 0xff)
+#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+ u8 cmd; /* Command ID: REPLY_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+ /*
+ * The driver sets up the sequence number to values of its choosing.
+ * uCode does not use this value, but passes it back to the driver
+ * when sending the response to each driver-originated command, so
+ * the driver can match the response to the command. Since the values
+ * don't get used by uCode, the driver may set up an arbitrary format.
+ *
+ * There is one exception: uCode sets bit 15 when it originates
+ * the response/notification, i.e. when the response/notification
+ * is not a direct response to a command sent by the driver. For
+ * example, uCode issues REPLY_RX when it sends a received frame
+ * to the driver; it is not a direct response to any driver command.
+ *
+ * The Linux driver uses the following format:
+ *
+ * 0:7 tfd index - position within TX queue
+ * 8:12 TX queue id
+ * 13:14 reserved
+ * 15 unsolicited RX or uCode-originated notification
+ */
+ __le16 sequence;
+} __packed;
+
+
+#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
+
+struct iwl_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-14: Reserved
+ * 13-00: RX frame size
+ */
+ __le32 len_n_flags;
+ struct iwl_cmd_header hdr;
+ u8 data[];
+} __packed;
/**
* enum CMD_MODE - how to send the host commands ?
@@ -173,7 +230,9 @@
* struct iwl_host_cmd - Host command to the uCode
*
* @data: array of chunks that composes the data of the host command
- * @reply_page: pointer to the page that holds the response to the host command
+ * @resp_pkt: response packet, if %CMD_WANT_SKB was set
+ * @_rx_page_order: (internally used to free response packet)
+ * @_rx_page_addr: (internally used to free response packet)
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
@@ -183,7 +242,9 @@
*/
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TFDS];
- unsigned long reply_page;
+ struct iwl_rx_packet *resp_pkt;
+ unsigned long _rx_page_addr;
+ u32 _rx_page_order;
int handler_status;
u32 flags;
@@ -192,6 +253,40 @@
u8 id;
};
+static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
+{
+ free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
+}
+
+struct iwl_rx_cmd_buffer {
+ struct page *_page;
+};
+
+static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
+{
+ return page_address(r->_page);
+}
+
+static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
+{
+ struct page *p = r->_page;
+ r->_page = NULL;
+ return p;
+}
+
+/**
+ * struct iwl_trans_config - transport configuration
+ *
+ * @op_mode: pointer to the upper layer.
+ * Must be set before any other call.
+ * @cmd_queue: the index of the command queue.
+ * Must be set before start_fw.
+ */
+struct iwl_trans_config {
+ struct iwl_op_mode *op_mode;
+ u8 cmd_queue;
+};
+
/**
* struct iwl_trans_ops - transport specific operations
*
@@ -207,9 +302,11 @@
* May sleep
* @fw_alive: called when the fw sends alive notification
* May sleep
- * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
* @stop_device:stops the whole device (embedded CPU put to reset)
* May sleep
+ * @wowlan_suspend: put the device into the correct mode for WoWLAN during
+ * suspend. This is optional, if not implemented WoWLAN will not be
+ * supported. This callback may sleep.
* @send_cmd:send a host command
* May sleep only if CMD_SYNC is set
* @tx: send an skb
@@ -217,17 +314,16 @@
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
* @tx_agg_alloc: allocate resources for a TX BA session
- * May sleep
+ * Must be atomic
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
* May sleep
* @tx_agg_disable: de-configure a Tx queue to send AMPDUs
- * May sleep
+ * Must be atomic
* @free: release all the ressource for the transport layer itself such as
* irq, tasklet etc... From this point on, the device may not issue
* any interrupt (incl. RFKILL).
* May sleep
- * @stop_queue: stop a specific queue
* @check_stuck_queue: check if a specific queue is stuck
* @wait_tx_queue_empty: wait until all tx queues are empty
* May sleep
@@ -238,18 +334,19 @@
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
+ * @configure: configure parameters required by the transport layer from
+ * the op_mode. May be called several times before start_fw, can't be
+ * called after that.
*/
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans);
- int (*start_fw)(struct iwl_trans *trans, struct fw_img *fw);
+ int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
void (*fw_alive)(struct iwl_trans *trans);
void (*stop_device)(struct iwl_trans *trans);
- void (*wake_any_queue)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg);
+ void (*wowlan_suspend)(struct iwl_trans *trans);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -257,8 +354,7 @@
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid);
int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
- int txq_id, int ssn, u32 status,
- struct sk_buff_head *skbs);
+ int txq_id, int ssn, struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
int sta_id, int tid);
@@ -270,8 +366,6 @@
void (*free)(struct iwl_trans *trans);
- void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
-
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
@@ -282,14 +376,8 @@
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- struct list_head list;
- size_t cmd_len;
- struct iwl_calib_hdr hdr;
- /* data follows */
+ void (*configure)(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
};
/**
@@ -309,52 +397,44 @@
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
- * @hcmd_lock: protects HCMD
* @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
- * @irq - the irq number for the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
- * @ucode_write_complete: indicates that the ucode has been copied.
* @nvm_device_type: indicates OTP or eeprom
* @pm_support: set to true in start_hw if link pm is supported
- * @calib_results: list head for init calibration results
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
struct iwl_shared *shrd;
enum iwl_trans_state state;
- spinlock_t hcmd_lock;
spinlock_t reg_lock;
struct device *dev;
- unsigned int irq;
u32 hw_rev;
u32 hw_id;
char hw_id_str[52];
- u8 ucode_write_complete;
-
int nvm_device_type;
bool pm_support;
- struct list_head calib_results;
-
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
};
static inline void iwl_trans_configure(struct iwl_trans *trans,
- struct iwl_op_mode *op_mode)
+ const struct iwl_trans_config *trans_cfg)
{
/*
* only set the op_mode for the moment. Later on, this function will do
* more
*/
- trans->op_mode = op_mode;
+ trans->op_mode = trans_cfg->op_mode;
+
+ trans->ops->configure(trans, trans_cfg);
}
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
@@ -382,7 +462,8 @@
trans->state = IWL_TRANS_FW_ALIVE;
}
-static inline int iwl_trans_start_fw(struct iwl_trans *trans, struct fw_img *fw)
+static inline int iwl_trans_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw)
{
might_sleep();
@@ -398,17 +479,12 @@
trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg)
+static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans)
{
- if (trans->state != IWL_TRANS_FW_ALIVE)
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
-
- trans->ops->wake_any_queue(trans, ctx, msg);
+ might_sleep();
+ trans->ops->wowlan_suspend(trans);
}
-
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
@@ -418,9 +494,6 @@
return trans->ops->send_cmd(trans, cmd);
}
-int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
- u32 flags, u16 len, const void *data);
-
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid)
@@ -432,21 +505,18 @@
}
static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
- int tid, int txq_id, int ssn, u32 status,
+ int tid, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
- return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
- status, skbs);
+ return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
int sta_id, int tid)
{
- might_sleep();
-
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
@@ -456,8 +526,6 @@
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
int sta_id, int tid)
{
- might_sleep();
-
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
@@ -483,15 +551,6 @@
trans->ops->free(trans);
}
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
- const char *msg)
-{
- if (trans->state != IWL_TRANS_FW_ALIVE)
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
-
- trans->ops->stop_queue(trans, q, msg);
-}
-
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
@@ -541,14 +600,6 @@
}
/*****************************************************
-* Utils functions
-******************************************************/
-int iwl_send_calib_results(struct iwl_trans *trans);
-int iwl_calib_set(struct iwl_trans *trans,
- const struct iwl_calib_hdr *cmd, int len);
-void iwl_calib_free_results(struct iwl_trans *trans);
-
-/*****************************************************
* Transport layers implementations + their allocation function
******************************************************/
struct pci_dev;
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index b16efc0..d97cf44 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -28,14 +28,8 @@
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include <linux/firmware.h>
-#include "iwl-ucode.h"
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -83,56 +77,16 @@
*
******************************************************************************/
-static void iwl_free_fw_desc(struct iwl_nic *nic, struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(trans(nic)->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_nic *nic, struct fw_img *img)
-{
- iwl_free_fw_desc(nic, &img->code);
- iwl_free_fw_desc(nic, &img->data);
-}
-
-void iwl_dealloc_ucode(struct iwl_nic *nic)
-{
- iwl_free_fw_img(nic, &nic->fw.ucode_rt);
- iwl_free_fw_img(nic, &nic->fw.ucode_init);
- iwl_free_fw_img(nic, &nic->fw.ucode_wowlan);
-}
-
-static int iwl_alloc_fw_desc(struct iwl_nic *nic, struct fw_desc *desc,
- const void *data, size_t len)
-{
- if (!len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(trans(nic)->dev, len,
- &desc->p_addr, GFP_KERNEL);
- if (!desc->v_addr)
- return -ENOMEM;
-
- desc->len = len;
- memcpy(desc->v_addr, data, len);
- return 0;
-}
-
-static inline struct fw_img *iwl_get_ucode_image(struct iwl_nic *nic,
- enum iwl_ucode_type ucode_type)
+static inline const struct fw_img *
+iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
{
switch (ucode_type) {
case IWL_UCODE_INIT:
- return &nic->fw.ucode_init;
+ return &priv->fw->ucode_init;
case IWL_UCODE_WOWLAN:
- return &nic->fw.ucode_wowlan;
+ return &priv->fw->ucode_wowlan;
case IWL_UCODE_REGULAR:
- return &nic->fw.ucode_rt;
+ return &priv->fw->ucode_rt;
case IWL_UCODE_NONE:
break;
}
@@ -142,23 +96,23 @@
/*
* Calibration
*/
-static int iwl_set_Xtal_calib(struct iwl_trans *trans)
+static int iwl_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+ return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
}
-static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
+static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
@@ -167,48 +121,48 @@
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
- IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
- return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+ return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
}
-static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
sizeof(*offset_calib_low));
if (!(cmd.radio_sensor_offset_low)) {
- IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
+ IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
memcpy(&cmd.burntVoltageRef, &hdr->voltage,
sizeof(hdr->voltage));
- IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
- IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_low));
- IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
+ IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
- return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+ return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
}
-static int iwl_send_calib_cfg(struct iwl_trans *trans)
+static int iwl_send_calib_cfg(struct iwl_priv *priv)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
@@ -224,47 +178,47 @@
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_trans_send_cmd(trans, &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+ struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
/* reduce the size of the length field itself */
len -= 4;
- if (iwl_calib_set(trans(priv), hdr, len))
+ if (iwl_calib_set(priv, hdr, len))
IWL_ERR(priv, "Failed to record calibration data %d\n",
hdr->op_code);
return 0;
}
-int iwl_init_alive_start(struct iwl_trans *trans)
+int iwl_init_alive_start(struct iwl_priv *priv)
{
int ret;
- if (cfg(trans)->bt_params &&
- cfg(trans)->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
- ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
- ret = iwl_send_calib_cfg(trans);
+ ret = iwl_send_calib_cfg(priv);
if (ret)
return ret;
@@ -272,21 +226,21 @@
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (cfg(trans)->need_temp_offset_calib) {
- if (cfg(trans)->temp_offset_v2)
- return iwl_set_temperature_offset_calib_v2(trans);
+ if (cfg(priv)->need_temp_offset_calib) {
+ if (cfg(priv)->temp_offset_v2)
+ return iwl_set_temperature_offset_calib_v2(priv);
else
- return iwl_set_temperature_offset_calib(trans);
+ return iwl_set_temperature_offset_calib(priv);
}
return 0;
}
-static int iwl_send_wimax_coex(struct iwl_trans *trans)
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
{
struct iwl_wimax_coex_cmd coex_cmd;
- if (cfg(trans)->base_params->support_wimax_coexist) {
+ if (cfg(priv)->base_params->support_wimax_coexist) {
/* UnMask wake up src at associated sleep */
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
@@ -305,7 +259,7 @@
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return iwl_trans_send_cmd_pdu(trans,
+ return iwl_dvm_send_cmd_pdu(priv,
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
@@ -332,64 +286,54 @@
0, 0, 0, 0, 0, 0, 0
};
-void iwl_send_prio_tbl(struct iwl_trans *trans)
+void iwl_send_prio_tbl(struct iwl_priv *priv)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
sizeof(iwl_bt_prio_tbl));
- if (iwl_trans_send_cmd_pdu(trans,
+ if (iwl_dvm_send_cmd_pdu(priv,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
- IWL_ERR(trans, "failed to send BT prio tbl command\n");
+ IWL_ERR(priv, "failed to send BT prio tbl command\n");
}
-int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_trans_send_cmd_pdu(trans,
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
- IWL_ERR(trans, "failed to send BT env command\n");
+ IWL_ERR(priv, "failed to send BT env command\n");
return ret;
}
-static int iwl_alive_notify(struct iwl_trans *trans)
+static int iwl_alive_notify(struct iwl_priv *priv)
{
- struct iwl_priv *priv = priv(trans);
- struct iwl_rxon_context *ctx;
int ret;
- if (!priv->tx_cmd_pool)
- priv->tx_cmd_pool =
- kmem_cache_create("iwl_dev_cmd",
- sizeof(struct iwl_device_cmd),
- sizeof(void *), 0, NULL);
+ iwl_trans_fw_alive(trans(priv));
- if (!priv->tx_cmd_pool)
- return -ENOMEM;
+ priv->passive_no_rx = false;
+ priv->transport_queue_stop = 0;
- iwl_trans_fw_alive(trans);
- for_each_context(priv, ctx)
- ctx->last_tx_rejected = false;
-
- ret = iwl_send_wimax_coex(trans);
+ ret = iwl_send_wimax_coex(priv);
if (ret)
return ret;
if (!cfg(priv)->no_xtal_calib) {
- ret = iwl_set_Xtal_calib(trans);
+ ret = iwl_set_Xtal_calib(priv);
if (ret)
return ret;
}
- return iwl_send_calib_results(trans);
+ return iwl_send_calib_results(priv);
}
@@ -398,24 +342,23 @@
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwl_verify_inst_sparse(struct iwl_nic *nic,
- struct fw_desc *fw_desc)
+static int iwl_verify_inst_sparse(struct iwl_priv *priv,
+ const struct fw_desc *fw_desc)
{
- struct iwl_trans *trans = trans(nic);
__le32 *image = (__le32 *)fw_desc->v_addr;
u32 len = fw_desc->len;
u32 val;
u32 i;
- IWL_DEBUG_FW(nic, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(trans, HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -423,28 +366,27 @@
return 0;
}
-static void iwl_print_mismatch_inst(struct iwl_nic *nic,
- struct fw_desc *fw_desc)
+static void iwl_print_mismatch_inst(struct iwl_priv *priv,
+ const struct fw_desc *fw_desc)
{
- struct iwl_trans *trans = trans(nic);
__le32 *image = (__le32 *)fw_desc->v_addr;
u32 len = fw_desc->len;
u32 val;
u32 offs;
int errors = 0;
- IWL_DEBUG_FW(nic, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
- iwl_write_direct32(trans, HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
- IWL_ERR(nic, "uCode INST section at "
+ IWL_ERR(priv, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
@@ -456,24 +398,24 @@
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
-static int iwl_verify_ucode(struct iwl_nic *nic,
+static int iwl_verify_ucode(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type)
{
- struct fw_img *img = iwl_get_ucode_image(nic, ucode_type);
+ const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
if (!img) {
- IWL_ERR(nic, "Invalid ucode requested (%d)\n", ucode_type);
+ IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
return -EINVAL;
}
- if (!iwl_verify_inst_sparse(nic, &img->code)) {
- IWL_DEBUG_FW(nic, "uCode is good in inst SRAM\n");
+ if (!iwl_verify_inst_sparse(priv, &img->code)) {
+ IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
return 0;
}
- IWL_ERR(nic, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+ IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
- iwl_print_mismatch_inst(nic, &img->code);
+ iwl_print_mismatch_inst(priv, &img->code);
return -EIO;
}
@@ -482,119 +424,57 @@
u8 subtype;
};
-static void iwl_alive_fn(struct iwl_trans *trans,
+static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt,
void *data)
{
+ struct iwl_priv *priv =
+ container_of(notif_wait, struct iwl_priv, notif_wait);
struct iwl_alive_data *alive_data = data;
struct iwl_alive_resp *palive;
- palive = &pkt->u.alive_frame;
+ palive = (void *)pkt->data;
- IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
- trans->shrd->device_pointers.error_event_table =
+ priv->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
- trans->shrd->device_pointers.log_event_table =
+ priv->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
-/* notification wait support */
-void iwl_init_notification_wait(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_trans *trans,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data)
-{
- wait_entry->fn = fn;
- wait_entry->fn_data = fn_data;
- wait_entry->cmd = cmd;
- wait_entry->triggered = false;
- wait_entry->aborted = false;
-
- spin_lock_bh(&shrd->notif_wait_lock);
- list_add(&wait_entry->list, &shrd->notif_waits);
- spin_unlock_bh(&shrd->notif_wait_lock);
-}
-
-int iwl_wait_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout)
-{
- int ret;
-
- ret = wait_event_timeout(shrd->notif_waitq,
- wait_entry->triggered || wait_entry->aborted,
- timeout);
-
- spin_lock_bh(&shrd->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&shrd->notif_wait_lock);
-
- if (wait_entry->aborted)
- return -EIO;
-
- /* return value is always >= 0 */
- if (ret <= 0)
- return -ETIMEDOUT;
- return 0;
-}
-
-void iwl_remove_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry)
-{
- spin_lock_bh(&shrd->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&shrd->notif_wait_lock);
-}
-
-void iwl_abort_notification_waits(struct iwl_shared *shrd)
-{
- unsigned long flags;
- struct iwl_notification_wait *wait_entry;
-
- spin_lock_irqsave(&shrd->notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &shrd->notif_waits, list)
- wait_entry->aborted = true;
- spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
-
- wake_up_all(&shrd->notif_waitq);
-}
-
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
-int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
struct iwl_alive_data alive_data;
- struct fw_img *fw;
+ const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
- iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
- iwl_alive_fn, &alive_data);
-
- old_type = trans->shrd->ucode_type;
- trans->shrd->ucode_type = ucode_type;
- fw = iwl_get_ucode_image(nic(trans), ucode_type);
+ old_type = priv->shrd->ucode_type;
+ priv->shrd->ucode_type = ucode_type;
+ fw = iwl_get_ucode_image(priv, ucode_type);
if (!fw)
return -EINVAL;
- ret = iwl_trans_start_fw(trans, fw);
+ iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE,
+ iwl_alive_fn, &alive_data);
+
+ ret = iwl_trans_start_fw(trans(priv), fw);
if (ret) {
- trans->shrd->ucode_type = old_type;
- iwl_remove_notification(trans->shrd, &alive_wait);
+ priv->shrd->ucode_type = old_type;
+ iwl_remove_notification(&priv->notif_wait, &alive_wait);
return ret;
}
@@ -602,16 +482,16 @@
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
- ret = iwl_wait_notification(trans->shrd, &alive_wait,
+ ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
UCODE_ALIVE_TIMEOUT);
if (ret) {
- trans->shrd->ucode_type = old_type;
+ priv->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
- IWL_ERR(trans, "Loaded ucode is not valid!\n");
- trans->shrd->ucode_type = old_type;
+ IWL_ERR(priv, "Loaded ucode is not valid!\n");
+ priv->shrd->ucode_type = old_type;
return -EIO;
}
@@ -621,9 +501,9 @@
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(nic(trans), ucode_type);
+ ret = iwl_verify_ucode(priv, ucode_type);
if (ret) {
- trans->shrd->ucode_type = old_type;
+ priv->shrd->ucode_type = old_type;
return ret;
}
@@ -631,41 +511,41 @@
msleep(5);
}
- ret = iwl_alive_notify(trans);
+ ret = iwl_alive_notify(priv);
if (ret) {
- IWL_WARN(trans,
+ IWL_WARN(priv,
"Could not complete ALIVE transition: %d\n", ret);
- trans->shrd->ucode_type = old_type;
+ priv->shrd->ucode_type = old_type;
return ret;
}
return 0;
}
-int iwl_run_init_ucode(struct iwl_trans *trans)
+int iwl_run_init_ucode(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&trans->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* No init ucode required? Curious, but maybe ok */
- if (!nic(trans)->fw.ucode_init.code.len)
+ if (!priv->fw->ucode_init.code.len)
return 0;
- if (trans->shrd->ucode_type != IWL_UCODE_NONE)
+ if (priv->shrd->ucode_type != IWL_UCODE_NONE)
return 0;
- iwl_init_notification_wait(trans->shrd, &calib_wait,
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
/* Will also start the device */
- ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
+ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (ret)
goto error;
- ret = iwl_init_alive_start(trans);
+ ret = iwl_init_alive_start(priv);
if (ret)
goto error;
@@ -673,594 +553,15 @@
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
- ret = iwl_wait_notification(trans->shrd, &calib_wait,
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
UCODE_CALIB_TIMEOUT);
goto out;
error:
- iwl_remove_notification(trans->shrd, &calib_wait);
+ iwl_remove_notification(&priv->notif_wait, &calib_wait);
out:
/* Whatever happened, stop the device */
- iwl_trans_stop_device(trans);
+ iwl_trans_stop_device(trans(priv));
return ret;
}
-
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-
-#define UCODE_EXPERIMENTAL_TAG "exp"
-
-int __must_check iwl_request_firmware(struct iwl_nic *nic, bool first)
-{
- struct iwl_cfg *cfg = cfg(nic);
- const char *name_pre = cfg->fw_name_pre;
- char tag[8];
-
- if (first) {
-#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
- nic->fw_index = UCODE_EXPERIMENTAL_INDEX;
- strcpy(tag, UCODE_EXPERIMENTAL_TAG);
- } else if (nic->fw_index == UCODE_EXPERIMENTAL_INDEX) {
-#endif
- nic->fw_index = cfg->ucode_api_max;
- sprintf(tag, "%d", nic->fw_index);
- } else {
- nic->fw_index--;
- sprintf(tag, "%d", nic->fw_index);
- }
-
- if (nic->fw_index < cfg->ucode_api_min) {
- IWL_ERR(nic, "no suitable firmware found!\n");
- return -ENOENT;
- }
-
- sprintf(nic->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
- IWL_DEBUG_INFO(nic, "attempting to load firmware %s'%s'\n",
- (nic->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? "EXPERIMENTAL " : "",
- nic->firmware_name);
-
- return request_firmware_nowait(THIS_MODULE, 1, nic->firmware_name,
- trans(nic)->dev,
- GFP_KERNEL, nic, iwl_ucode_callback);
-}
-
-struct iwlagn_firmware_pieces {
- const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
- size_t inst_size, data_size, init_size, init_data_size,
- wowlan_inst_size, wowlan_data_size;
-
- u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
- u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
-};
-
-static int iwl_parse_v1_v2_firmware(struct iwl_nic *nic,
- const struct firmware *ucode_raw,
- struct iwlagn_firmware_pieces *pieces)
-{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
- u32 api_ver, hdr_size, build;
- char buildstr[25];
- const u8 *src;
-
- nic->fw.ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(nic->fw.ucode_ver);
-
- switch (api_ver) {
- default:
- hdr_size = 28;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(nic, "File size too small!\n");
- return -EINVAL;
- }
- build = le32_to_cpu(ucode->u.v2.build);
- pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
- pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
- pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
- pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
- src = ucode->u.v2.data;
- break;
- case 0:
- case 1:
- case 2:
- hdr_size = 24;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(nic, "File size too small!\n");
- return -EINVAL;
- }
- build = 0;
- pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
- pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
- pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
- pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
- src = ucode->u.v1.data;
- break;
- }
-
- if (build)
- sprintf(buildstr, " build %u%s", build,
- (nic->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
- else
- buildstr[0] = '\0';
-
- snprintf(nic->fw.fw_version,
- sizeof(nic->fw.fw_version),
- "%u.%u.%u.%u%s",
- IWL_UCODE_MAJOR(nic->fw.ucode_ver),
- IWL_UCODE_MINOR(nic->fw.ucode_ver),
- IWL_UCODE_API(nic->fw.ucode_ver),
- IWL_UCODE_SERIAL(nic->fw.ucode_ver),
- buildstr);
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != hdr_size + pieces->inst_size +
- pieces->data_size + pieces->init_size +
- pieces->init_data_size) {
-
- IWL_ERR(nic,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- return -EINVAL;
- }
-
- pieces->inst = src;
- src += pieces->inst_size;
- pieces->data = src;
- src += pieces->data_size;
- pieces->init = src;
- src += pieces->init_size;
- pieces->init_data = src;
- src += pieces->init_data_size;
-
- return 0;
-}
-
-static int iwl_parse_tlv_firmware(struct iwl_nic *nic,
- const struct firmware *ucode_raw,
- struct iwlagn_firmware_pieces *pieces,
- struct iwl_ucode_capabilities *capa)
-{
- struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
- struct iwl_ucode_tlv *tlv;
- size_t len = ucode_raw->size;
- const u8 *data;
- int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
- int tmp;
- u64 alternatives;
- u32 tlv_len;
- enum iwl_ucode_tlv_type tlv_type;
- const u8 *tlv_data;
- char buildstr[25];
- u32 build;
-
- if (len < sizeof(*ucode)) {
- IWL_ERR(nic, "uCode has invalid length: %zd\n", len);
- return -EINVAL;
- }
-
- if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
- IWL_ERR(nic, "invalid uCode magic: 0X%x\n",
- le32_to_cpu(ucode->magic));
- return -EINVAL;
- }
-
- /*
- * Check which alternatives are present, and "downgrade"
- * when the chosen alternative is not present, warning
- * the user when that happens. Some files may not have
- * any alternatives, so don't warn in that case.
- */
- alternatives = le64_to_cpu(ucode->alternatives);
- tmp = wanted_alternative;
- if (wanted_alternative > 63)
- wanted_alternative = 63;
- while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
- wanted_alternative--;
- if (wanted_alternative && wanted_alternative != tmp)
- IWL_WARN(nic,
- "uCode alternative %d not available, choosing %d\n",
- tmp, wanted_alternative);
-
- nic->fw.ucode_ver = le32_to_cpu(ucode->ver);
- build = le32_to_cpu(ucode->build);
-
- if (build)
- sprintf(buildstr, " build %u%s", build,
- (nic->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
- else
- buildstr[0] = '\0';
-
- snprintf(nic->fw.fw_version,
- sizeof(nic->fw.fw_version),
- "%u.%u.%u.%u%s",
- IWL_UCODE_MAJOR(nic->fw.ucode_ver),
- IWL_UCODE_MINOR(nic->fw.ucode_ver),
- IWL_UCODE_API(nic->fw.ucode_ver),
- IWL_UCODE_SERIAL(nic->fw.ucode_ver),
- buildstr);
-
- data = ucode->data;
-
- len -= sizeof(*ucode);
-
- while (len >= sizeof(*tlv)) {
- u16 tlv_alt;
-
- len -= sizeof(*tlv);
- tlv = (void *)data;
-
- tlv_len = le32_to_cpu(tlv->length);
- tlv_type = le16_to_cpu(tlv->type);
- tlv_alt = le16_to_cpu(tlv->alternative);
- tlv_data = tlv->data;
-
- if (len < tlv_len) {
- IWL_ERR(nic, "invalid TLV len: %zd/%u\n",
- len, tlv_len);
- return -EINVAL;
- }
- len -= ALIGN(tlv_len, 4);
- data += sizeof(*tlv) + ALIGN(tlv_len, 4);
-
- /*
- * Alternative 0 is always valid.
- *
- * Skip alternative TLVs that are not selected.
- */
- if (tlv_alt != 0 && tlv_alt != wanted_alternative)
- continue;
-
- switch (tlv_type) {
- case IWL_UCODE_TLV_INST:
- pieces->inst = tlv_data;
- pieces->inst_size = tlv_len;
- break;
- case IWL_UCODE_TLV_DATA:
- pieces->data = tlv_data;
- pieces->data_size = tlv_len;
- break;
- case IWL_UCODE_TLV_INIT:
- pieces->init = tlv_data;
- pieces->init_size = tlv_len;
- break;
- case IWL_UCODE_TLV_INIT_DATA:
- pieces->init_data = tlv_data;
- pieces->init_data_size = tlv_len;
- break;
- case IWL_UCODE_TLV_BOOT:
- IWL_ERR(nic, "Found unexpected BOOT ucode\n");
- break;
- case IWL_UCODE_TLV_PROBE_MAX_LEN:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- capa->max_probe_length =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_PAN:
- if (tlv_len)
- goto invalid_tlv_len;
- capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
- break;
- case IWL_UCODE_TLV_FLAGS:
- /* must be at least one u32 */
- if (tlv_len < sizeof(u32))
- goto invalid_tlv_len;
- /* and a proper number of u32s */
- if (tlv_len % sizeof(u32))
- goto invalid_tlv_len;
- /*
- * This driver only reads the first u32 as
- * right now no more features are defined,
- * if that changes then either the driver
- * will not work with the new firmware, or
- * it'll not take advantage of new features.
- */
- capa->flags = le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->init_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->init_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->init_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->inst_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->inst_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->inst_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
- if (tlv_len)
- goto invalid_tlv_len;
- nic->fw.enhance_sensitivity_table = true;
- break;
- case IWL_UCODE_TLV_WOWLAN_INST:
- pieces->wowlan_inst = tlv_data;
- pieces->wowlan_inst_size = tlv_len;
- break;
- case IWL_UCODE_TLV_WOWLAN_DATA:
- pieces->wowlan_data = tlv_data;
- pieces->wowlan_data_size = tlv_len;
- break;
- case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- capa->standard_phy_calibration_size =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- default:
- IWL_DEBUG_INFO(nic, "unknown TLV: %d\n", tlv_type);
- break;
- }
- }
-
- if (len) {
- IWL_ERR(nic, "invalid TLV after parsing: %zd\n", len);
- iwl_print_hex_dump(nic, IWL_DL_FW, (u8 *)data, len);
- return -EINVAL;
- }
-
- return 0;
-
- invalid_tlv_len:
- IWL_ERR(nic, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
- iwl_print_hex_dump(nic, IWL_DL_FW, tlv_data, tlv_len);
-
- return -EINVAL;
-}
-
-/**
- * iwl_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
- struct iwl_nic *nic = context;
- struct iwl_cfg *cfg = cfg(nic);
- struct iwl_fw *fw = &nic->fw;
- struct iwl_ucode_header *ucode;
- int err;
- struct iwlagn_firmware_pieces pieces;
- const unsigned int api_max = cfg->ucode_api_max;
- unsigned int api_ok = cfg->ucode_api_ok;
- const unsigned int api_min = cfg->ucode_api_min;
- u32 api_ver;
-
- fw->ucode_capa.max_probe_length = 200;
- fw->ucode_capa.standard_phy_calibration_size =
- IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- if (!api_ok)
- api_ok = api_max;
-
- memset(&pieces, 0, sizeof(pieces));
-
- if (!ucode_raw) {
- if (nic->fw_index <= api_ok)
- IWL_ERR(nic,
- "request for firmware file '%s' failed.\n",
- nic->firmware_name);
- goto try_again;
- }
-
- IWL_DEBUG_INFO(nic, "Loaded firmware file '%s' (%zd bytes).\n",
- nic->firmware_name, ucode_raw->size);
-
- /* Make sure that we got at least the API version number */
- if (ucode_raw->size < 4) {
- IWL_ERR(nic, "File size way too small!\n");
- goto try_again;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- if (ucode->ver)
- err = iwl_parse_v1_v2_firmware(nic, ucode_raw, &pieces);
- else
- err = iwl_parse_tlv_firmware(nic, ucode_raw, &pieces,
- &fw->ucode_capa);
-
- if (err)
- goto try_again;
-
- api_ver = IWL_UCODE_API(nic->fw.ucode_ver);
-
- /*
- * api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward
- */
- /* no api version check required for experimental uCode */
- if (nic->fw_index != UCODE_EXPERIMENTAL_INDEX) {
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(nic,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver < api_ok) {
- if (api_ok != api_max)
- IWL_ERR(nic, "Firmware has old API version, "
- "expected v%u through v%u, got v%u.\n",
- api_ok, api_max, api_ver);
- else
- IWL_ERR(nic, "Firmware has old API version, "
- "expected v%u, got v%u.\n",
- api_max, api_ver);
- IWL_ERR(nic, "New firmware can be obtained from "
- "http://www.intellinuxwireless.org/.\n");
- }
- }
-
- IWL_INFO(nic, "loaded firmware version %s", nic->fw.fw_version);
-
- /*
- * For any of the failures below (before allocating pci memory)
- * we will try to load a version with a smaller API -- maybe the
- * user just got a corrupted version of the latest API.
- */
-
- IWL_DEBUG_INFO(nic, "f/w package hdr ucode version raw = 0x%x\n",
- nic->fw.ucode_ver);
- IWL_DEBUG_INFO(nic, "f/w package hdr runtime inst size = %Zd\n",
- pieces.inst_size);
- IWL_DEBUG_INFO(nic, "f/w package hdr runtime data size = %Zd\n",
- pieces.data_size);
- IWL_DEBUG_INFO(nic, "f/w package hdr init inst size = %Zd\n",
- pieces.init_size);
- IWL_DEBUG_INFO(nic, "f/w package hdr init data size = %Zd\n",
- pieces.init_data_size);
-
- /* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > cfg->max_inst_size) {
- IWL_ERR(nic, "uCode instr len %Zd too large to fit in\n",
- pieces.inst_size);
- goto try_again;
- }
-
- if (pieces.data_size > cfg->max_data_size) {
- IWL_ERR(nic, "uCode data len %Zd too large to fit in\n",
- pieces.data_size);
- goto try_again;
- }
-
- if (pieces.init_size > cfg->max_inst_size) {
- IWL_ERR(nic, "uCode init instr len %Zd too large to fit in\n",
- pieces.init_size);
- goto try_again;
- }
-
- if (pieces.init_data_size > cfg->max_data_size) {
- IWL_ERR(nic, "uCode init data len %Zd too large to fit in\n",
- pieces.init_data_size);
- goto try_again;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- if (iwl_alloc_fw_desc(nic, &nic->fw.ucode_rt.code,
- pieces.inst, pieces.inst_size))
- goto err_pci_alloc;
- if (iwl_alloc_fw_desc(nic, &nic->fw.ucode_rt.data,
- pieces.data, pieces.data_size))
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (pieces.init_size && pieces.init_data_size) {
- if (iwl_alloc_fw_desc(nic,
- &nic->fw.ucode_init.code,
- pieces.init, pieces.init_size))
- goto err_pci_alloc;
- if (iwl_alloc_fw_desc(nic,
- &nic->fw.ucode_init.data,
- pieces.init_data, pieces.init_data_size))
- goto err_pci_alloc;
- }
-
- /* WoWLAN instructions and data */
- if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
- if (iwl_alloc_fw_desc(nic,
- &nic->fw.ucode_wowlan.code,
- pieces.wowlan_inst,
- pieces.wowlan_inst_size))
- goto err_pci_alloc;
- if (iwl_alloc_fw_desc(nic,
- &nic->fw.ucode_wowlan.data,
- pieces.wowlan_data,
- pieces.wowlan_data_size))
- goto err_pci_alloc;
- }
-
- /* Now that we can no longer fail, copy information */
-
- /*
- * The (size - 16) / 12 formula is based on the information recorded
- * for each event, which is of mode 1 (including timestamp) for all
- * new microcodes that include this information.
- */
- nic->init_evtlog_ptr = pieces.init_evtlog_ptr;
- if (pieces.init_evtlog_size)
- nic->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
- else
- nic->init_evtlog_size =
- cfg->base_params->max_event_log_size;
- nic->init_errlog_ptr = pieces.init_errlog_ptr;
- nic->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
- if (pieces.inst_evtlog_size)
- nic->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
- else
- nic->inst_evtlog_size =
- cfg->base_params->max_event_log_size;
- nic->inst_errlog_ptr = pieces.inst_errlog_ptr;
-
- /*
- * figure out the offset of chain noise reset and gain commands
- * base on the size of standard phy calibration commands table size
- */
- if (fw->ucode_capa.standard_phy_calibration_size >
- IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
- fw->ucode_capa.standard_phy_calibration_size =
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- complete(&nic->request_firmware_complete);
-
- nic->op_mode = iwl_dvm_ops.start(nic->shrd->trans);
-
- if (!nic->op_mode)
- goto out_unbind;
-
- return;
-
- try_again:
- /* try next, if any */
- release_firmware(ucode_raw);
- if (iwl_request_firmware(nic, false))
- goto out_unbind;
- return;
-
- err_pci_alloc:
- IWL_ERR(nic, "failed to allocate pci memory\n");
- iwl_dealloc_ucode(nic);
- release_firmware(ucode_raw);
- out_unbind:
- complete(&nic->request_firmware_complete);
- device_release_driver(trans(nic)->dev);
-}
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-wifi.h b/drivers/net/wireless/iwlwifi/iwl-wifi.h
deleted file mode 100644
index d5cba07..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-wifi.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_wifi_h__
-#define __iwl_wifi_h__
-
-#include "iwl-shared.h"
-#include "iwl-ucode.h"
-
-#define UCODE_EXPERIMENTAL_INDEX 100
-
-/**
- * struct iwl_nic - nic common data
- * @fw: the iwl_fw structure
- * @shrd: pointer to common shared structure
- * @op_mode: the running op_mode
- * @fw_index: firmware revision to try loading
- * @firmware_name: composite filename of ucode file to load
- * @init_evtlog_ptr: event log offset for init ucode.
- * @init_evtlog_size: event log size for init ucode.
- * @init_errlog_ptr: error log offfset for init ucode.
- * @inst_evtlog_ptr: event log offset for runtime ucode.
- * @inst_evtlog_size: event log size for runtime ucode.
- * @inst_errlog_ptr: error log offfset for runtime ucode.
- * @request_firmware_complete: the firmware has been obtained from user space
- */
-struct iwl_nic {
- struct iwl_fw fw;
-
- struct iwl_shared *shrd;
- struct iwl_op_mode *op_mode;
-
- int fw_index; /* firmware we're trying to load */
- char firmware_name[25]; /* name of firmware file to load */
-
- u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
- u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
-
- struct completion request_firmware_complete;
-};
-
-
-int __must_check iwl_request_firmware(struct iwl_nic *nic, bool first);
-void iwl_dealloc_ucode(struct iwl_nic *nic);
-
-int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
-void iwl_send_prio_tbl(struct iwl_trans *trans);
-int iwl_init_alive_start(struct iwl_trans *trans);
-int iwl_run_init_ucode(struct iwl_trans *trans);
-int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
- enum iwl_ucode_type ucode_type);
-#endif /* __iwl_wifi_h__ */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ba16f05..b4f6cb3 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/ktime.h>
#include <net/genetlink.h>
#include "mac80211_hwsim.h"
@@ -321,11 +322,15 @@
struct dentry *debugfs_group;
int power_level;
+
+ /* difference between this hw's clock and the real clock, in usecs */
+ u64 tsf_offset;
};
struct hwsim_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
+ __le64 rt_tsft;
u8 rt_flags;
u8 rt_rate;
__le16 rt_channel;
@@ -367,6 +372,28 @@
return NETDEV_TX_OK;
}
+static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data)
+{
+ struct timeval tv = ktime_to_timeval(ktime_get_real());
+ u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+ return cpu_to_le64(now + data->tsf_offset);
+}
+
+static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ return le64_to_cpu(__mac80211_hwsim_get_tsf(data));
+}
+
+static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ struct timeval tv = ktime_to_timeval(ktime_get_real());
+ u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+ data->tsf_offset = tsf - now;
+}
static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
struct sk_buff *tx_skb)
@@ -391,7 +418,9 @@
hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_RATE) |
+ (1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_CHANNEL));
+ hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
hdr->rt_flags = 0;
hdr->rt_rate = txrate->bitrate / 5;
hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
@@ -610,7 +639,8 @@
}
memset(&rx_status, 0, sizeof(rx_status));
- /* TODO: set mactime */
+ rx_status.mactime = le64_to_cpu(__mac80211_hwsim_get_tsf(data));
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
rx_status.freq = data->channel->center_freq;
rx_status.band = data->channel->band;
rx_status.rate_idx = info->control.rates[0].idx;
@@ -667,6 +697,12 @@
bool ack;
struct ieee80211_tx_info *txi;
u32 _pid;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
+ struct mac80211_hwsim_data *data = hw->priv;
+
+ if (ieee80211_is_beacon(mgmt->frame_control) ||
+ ieee80211_is_probe_resp(mgmt->frame_control))
+ mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
mac80211_hwsim_monitor_rx(hw, skb);
@@ -763,9 +799,11 @@
struct ieee80211_vif *vif)
{
struct ieee80211_hw *hw = arg;
+ struct mac80211_hwsim_data *data = hw->priv;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
u32 _pid;
+ struct ieee80211_mgmt *mgmt;
hwsim_check_magic(vif);
@@ -779,6 +817,9 @@
return;
info = IEEE80211_SKB_CB(skb);
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
+
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
@@ -1199,6 +1240,8 @@
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
.flush = mac80211_hwsim_flush,
+ .get_tsf = mac80211_hwsim_get_tsf,
+ .set_tsf = mac80211_hwsim_set_tsf,
};
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a460fb0..84508b0 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -613,7 +613,6 @@
{.bitrate = 20, .hw_value = 4, },
{.bitrate = 55, .hw_value = 11, },
{.bitrate = 110, .hw_value = 22, },
- {.bitrate = 220, .hw_value = 44, },
{.bitrate = 60, .hw_value = 12, },
{.bitrate = 90, .hw_value = 18, },
{.bitrate = 120, .hw_value = 24, },
@@ -622,7 +621,6 @@
{.bitrate = 360, .hw_value = 72, },
{.bitrate = 480, .hw_value = 96, },
{.bitrate = 540, .hw_value = 108, },
- {.bitrate = 720, .hw_value = 144, },
};
/* Channel definitions to be advertised to cfg80211 */
@@ -648,7 +646,7 @@
.channels = mwifiex_channels_2ghz,
.n_channels = ARRAY_SIZE(mwifiex_channels_2ghz),
.bitrates = mwifiex_rates,
- .n_bitrates = 14,
+ .n_bitrates = ARRAY_SIZE(mwifiex_rates),
};
static struct ieee80211_channel mwifiex_channels_5ghz[] = {
@@ -688,8 +686,8 @@
static struct ieee80211_supported_band mwifiex_band_5ghz = {
.channels = mwifiex_channels_5ghz,
.n_channels = ARRAY_SIZE(mwifiex_channels_5ghz),
- .bitrates = mwifiex_rates - 4,
- .n_bitrates = ARRAY_SIZE(mwifiex_rates) + 4,
+ .bitrates = mwifiex_rates + 4,
+ .n_bitrates = ARRAY_SIZE(mwifiex_rates) - 4,
};
@@ -841,12 +839,12 @@
u8 *bssid, int mode, struct ieee80211_channel *channel,
struct cfg80211_connect_params *sme, bool privacy)
{
- struct mwifiex_802_11_ssid req_ssid;
+ struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
struct cfg80211_bss *bss = NULL;
u8 is_scanning_required = 0;
- memset(&req_ssid, 0, sizeof(struct mwifiex_802_11_ssid));
+ memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
req_ssid.ssid_len = ssid_len;
if (ssid_len > IEEE80211_MAX_SSID_LEN) {
@@ -873,6 +871,7 @@
priv->sec_info.wpa2_enabled = false;
priv->wep_key_curr_index = 0;
priv->sec_info.encryption_mode = 0;
+ priv->sec_info.is_authtype_auto = 0;
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
@@ -894,11 +893,12 @@
}
/* Now handle infra mode. "sme" is valid for infra mode only */
- if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC
- || sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
+ if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
- else if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY)
- auth_type = NL80211_AUTHTYPE_SHARED_KEY;
+ priv->sec_info.is_authtype_auto = 1;
+ } else {
+ auth_type = sme->auth_type;
+ }
if (sme->crypto.n_ciphers_pairwise) {
priv->sec_info.encryption_mode =
@@ -1106,12 +1106,10 @@
dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
return -ENOMEM;
}
- for (i = 0; i < request->n_ssids; i++) {
- memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
- request->ssids[i].ssid, request->ssids[i].ssid_len);
- priv->user_scan_cfg->ssid_list[i].max_len =
- request->ssids[i].ssid_len;
- }
+
+ priv->user_scan_cfg->num_ssids = request->n_ssids;
+ priv->user_scan_cfg->ssid_list = request->ssids;
+
for (i = 0; i < request->n_channels; i++) {
chan = request->channels[i];
priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 6623db6..c82eb7f 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -771,7 +771,7 @@
/* Check init command response */
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
- if (ret == -1) {
+ if (ret) {
dev_err(adapter->dev, "%s: cmd %#x failed during "
"initialization\n", __func__, cmdresp_no);
mwifiex_init_fw_complete(adapter);
@@ -781,10 +781,8 @@
}
if (adapter->curr_cmd) {
- if (adapter->curr_cmd->wait_q_enabled && (!ret))
- adapter->cmd_wait_q.status = 0;
- else if (adapter->curr_cmd->wait_q_enabled && (ret == -1))
- adapter->cmd_wait_q.status = -1;
+ if (adapter->curr_cmd->wait_q_enabled)
+ adapter->cmd_wait_q.status = ret;
/* Clean up and put current command back to cmd_free_q */
mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 3735c77..be5fd16 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -91,11 +91,6 @@
u32 fw_len;
};
-struct mwifiex_802_11_ssid {
- u32 ssid_len;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
-};
-
struct mwifiex_wait_queue {
wait_queue_head_t wait;
int status;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index c826200..fc4ffee 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -852,11 +852,6 @@
u32 scan_time;
} __packed;
-struct mwifiex_user_scan_ssid {
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 max_len;
-} __packed;
-
struct mwifiex_user_scan_cfg {
/*
* BSS mode to be sent in the firmware command
@@ -867,8 +862,9 @@
u8 reserved;
/* BSSID filter sent in the firmware command to limit the results */
u8 specific_bssid[ETH_ALEN];
- /* SSID filter list used in the to limit the scan results */
- struct mwifiex_user_scan_ssid ssid_list[MWIFIEX_MAX_SSID_LIST_LENGTH];
+ /* SSID filter list used in the firmware to limit the scan results */
+ struct cfg80211_ssid *ssid_list;
+ u8 num_ssids;
/* Variable number (fixed maximum) of channels to scan up */
struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d5d81f1..7ca4e82 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -50,7 +50,7 @@
};
struct mwifiex_ssid_bssid {
- struct mwifiex_802_11_ssid ssid;
+ struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
};
@@ -122,7 +122,7 @@
struct mwifiex_bss_info {
u32 bss_mode;
- struct mwifiex_802_11_ssid ssid;
+ struct cfg80211_ssid ssid;
u32 bss_chan;
u32 region_code;
u32 media_connected;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index ee439fc..bce9991 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -585,7 +585,7 @@
le16_to_cpu(assoc_rsp->cap_info_bitmap),
le16_to_cpu(assoc_rsp->a_id));
- ret = -1;
+ ret = le16_to_cpu(assoc_rsp->status_code);
goto done;
}
@@ -714,7 +714,7 @@
int
mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
int rsn_ie_len = 0;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1245,7 +1245,7 @@
*/
int
mwifiex_adhoc_start(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *adhoc_ssid)
+ struct cfg80211_ssid *adhoc_ssid)
{
dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
priv->adhoc_channel);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 4c86217..6dc1166 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -219,6 +219,7 @@
u8 wapi_key_on;
u8 wep_enabled;
u32 authentication_mode;
+ u8 is_authtype_auto;
u32 encryption_mode;
};
@@ -243,7 +244,7 @@
struct mwifiex_bssdescriptor {
u8 mac_address[ETH_ALEN];
- struct mwifiex_802_11_ssid ssid;
+ struct cfg80211_ssid ssid;
u32 privacy;
s32 rssi;
u32 channel;
@@ -387,7 +388,7 @@
s16 bcn_rssi_avg;
s16 bcn_nf_avg;
struct mwifiex_bssdescriptor *attempted_bss_desc;
- struct mwifiex_802_11_ssid prev_ssid;
+ struct cfg80211_ssid prev_ssid;
u8 prev_bssid[ETH_ALEN];
struct mwifiex_current_bss_params curr_bss_params;
u16 beacon_period;
@@ -746,8 +747,7 @@
struct cmd_ctrl_node *cmd_node);
int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-s32 mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
- struct mwifiex_802_11_ssid *ssid2);
+s32 mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2);
int mwifiex_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
@@ -759,12 +759,12 @@
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
int mwifiex_adhoc_start(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *adhoc_ssid);
+ struct cfg80211_ssid *adhoc_ssid);
int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- struct mwifiex_802_11_ssid *req_ssid);
+ struct cfg80211_ssid *req_ssid);
int mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct mwifiex_bssdescriptor *bss_desc);
@@ -897,7 +897,7 @@
struct net_device *dev);
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
- struct mwifiex_802_11_ssid *req_ssid);
+ struct cfg80211_ssid *req_ssid);
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
@@ -906,13 +906,12 @@
int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate);
int mwifiex_request_scan(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *req_ssid);
+ struct cfg80211_ssid *req_ssid);
int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
struct mwifiex_user_scan_cfg *scan_req);
-int mwifiex_change_adhoc_chan(struct mwifiex_private *priv, int channel);
int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
-int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel);
+int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
int key_len, u8 key_index, int disable);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8f10038..fd0302f 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -163,8 +163,7 @@
* This function compares two SSIDs and checks if they match.
*/
s32
-mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
- struct mwifiex_802_11_ssid *ssid2)
+mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2)
{
if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len))
return -1;
@@ -738,7 +737,7 @@
u16 scan_dur;
u8 channel;
u8 radio_type;
- u32 ssid_idx;
+ int i;
u8 ssid_filter;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 rates_size;
@@ -793,14 +792,8 @@
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
- for (ssid_idx = 0;
- ((ssid_idx < ARRAY_SIZE(user_scan_in->ssid_list))
- && (*user_scan_in->ssid_list[ssid_idx].ssid
- || user_scan_in->ssid_list[ssid_idx].max_len));
- ssid_idx++) {
-
- ssid_len = strlen(user_scan_in->ssid_list[ssid_idx].
- ssid) + 1;
+ for (i = 0; i < user_scan_in->num_ssids; i++) {
+ ssid_len = user_scan_in->ssid_list[i].ssid_len;
wildcard_ssid_tlv =
(struct mwifiex_ie_types_wildcard_ssid_params *)
@@ -811,19 +804,26 @@
(u16) (ssid_len + sizeof(wildcard_ssid_tlv->
max_ssid_length)));
- /* max_ssid_length = 0 tells firmware to perform
- specific scan for the SSID filled */
- wildcard_ssid_tlv->max_ssid_length = 0;
+ /*
+ * max_ssid_length = 0 tells firmware to perform
+ * specific scan for the SSID filled, whereas
+ * max_ssid_length = IEEE80211_MAX_SSID_LEN is for
+ * wildcard scan.
+ */
+ if (ssid_len)
+ wildcard_ssid_tlv->max_ssid_length = 0;
+ else
+ wildcard_ssid_tlv->max_ssid_length =
+ IEEE80211_MAX_SSID_LEN;
memcpy(wildcard_ssid_tlv->ssid,
- user_scan_in->ssid_list[ssid_idx].ssid,
- ssid_len);
+ user_scan_in->ssid_list[i].ssid, ssid_len);
tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+ le16_to_cpu(wildcard_ssid_tlv->header.len));
- dev_dbg(adapter->dev, "info: scan: ssid_list[%d]: %s, %d\n",
- ssid_idx, wildcard_ssid_tlv->ssid,
+ dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
+ i, wildcard_ssid_tlv->ssid,
wildcard_ssid_tlv->max_ssid_length);
/* Empty wildcard ssid with a maxlen will match many or
@@ -832,7 +832,6 @@
filtered. */
if (!ssid_len && wildcard_ssid_tlv->max_ssid_length)
ssid_filter = false;
-
}
/*
@@ -841,7 +840,7 @@
* truncate scan results. That is not an issue with an SSID
* or BSSID filter applied to the scan results in the firmware.
*/
- if ((ssid_idx && ssid_filter)
+ if ((i && ssid_filter)
|| memcmp(scan_cfg_out->specific_bssid, &zero_mac,
sizeof(zero_mac)))
*filtered_scan = true;
@@ -1851,7 +1850,7 @@
* firmware, filtered on a specific SSID.
*/
static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
@@ -1877,8 +1876,8 @@
return -ENOMEM;
}
- memcpy(scan_cfg->ssid_list[0].ssid, req_ssid->ssid,
- req_ssid->ssid_len);
+ scan_cfg->ssid_list = req_ssid;
+ scan_cfg->num_ssids = 1;
ret = mwifiex_scan_networks(priv, scan_cfg);
@@ -1896,7 +1895,7 @@
* scan, depending upon whether an SSID is provided or not.
*/
int mwifiex_request_scan(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
int ret;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index d7aa21d..b9b59db 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -101,7 +101,7 @@
memcpy(&priv->prev_ssid,
&priv->curr_bss_params.bss_descriptor.ssid,
- sizeof(struct mwifiex_802_11_ssid));
+ sizeof(struct cfg80211_ssid));
memcpy(priv->prev_bssid,
priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 866026e..0ae1209 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -192,7 +192,7 @@
* first.
*/
int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -249,6 +249,17 @@
* application retrieval */
priv->assoc_rsp_size = 0;
ret = mwifiex_associate(priv, bss_desc);
+
+ /* If auth type is auto and association fails using open mode,
+ * try to connect using shared mode */
+ if (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG &&
+ priv->sec_info.is_authtype_auto &&
+ priv->sec_info.wep_enabled) {
+ priv->sec_info.authentication_mode =
+ NL80211_AUTHTYPE_SHARED_KEY;
+ ret = mwifiex_associate(priv, bss_desc);
+ }
+
if (bss)
cfg80211_put_bss(bss);
} else {
@@ -453,8 +464,7 @@
info->bss_mode = priv->bss_mode;
- memcpy(&info->ssid, &bss_desc->ssid,
- sizeof(struct mwifiex_802_11_ssid));
+ memcpy(&info->ssid, &bss_desc->ssid, sizeof(struct cfg80211_ssid));
memcpy(&info->bssid, &bss_desc->mac_address, ETH_ALEN);
@@ -599,7 +609,7 @@
* - Start/Join the IBSS
*/
int
-mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
+mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel)
{
int ret;
struct mwifiex_bss_info bss_info;
@@ -636,7 +646,7 @@
ret = mwifiex_deauthenticate(priv, ssid_bssid.bssid);
ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_SET,
- (u16 *) &channel);
+ &channel);
/* Do specific SSID scanning */
if (mwifiex_request_scan(priv, &bss_info.ssid)) {
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 40f4eb7..ee8af1f 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -227,6 +227,7 @@
struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
+ int err;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
@@ -251,9 +252,9 @@
}
memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
- p54_setup_mac(priv);
+ err = p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
- return 0;
+ return err;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index b1f51a2..45df728 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -624,36 +624,39 @@
}
#ifdef CONFIG_PM
-static int p54p_suspend(struct pci_dev *pdev, pm_message_t state)
+static int p54p_suspend(struct device *device)
{
- struct ieee80211_hw *dev = pci_get_drvdata(pdev);
- struct p54p_priv *priv = dev->priv;
-
- if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
- ieee80211_stop_queues(dev);
- p54p_stop(dev);
- }
+ struct pci_dev *pdev = to_pci_dev(device);
pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
return 0;
}
-static int p54p_resume(struct pci_dev *pdev)
+static int p54p_resume(struct device *device)
{
- struct ieee80211_hw *dev = pci_get_drvdata(pdev);
- struct p54p_priv *priv = dev->priv;
+ struct pci_dev *pdev = to_pci_dev(device);
+ int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
- p54p_open(dev);
- ieee80211_wake_queues(dev);
- }
-
- return 0;
+ err = pci_reenable_device(pdev);
+ if (err)
+ return err;
+ return pci_set_power_state(pdev, PCI_D0);
}
+
+static const struct dev_pm_ops p54pci_pm_ops = {
+ .suspend = p54p_suspend,
+ .resume = p54p_resume,
+ .freeze = p54p_suspend,
+ .thaw = p54p_resume,
+ .poweroff = p54p_suspend,
+ .restore = p54p_resume,
+};
+
+#define P54P_PM_OPS (&p54pci_pm_ops)
+#else
+#define P54P_PM_OPS (NULL)
#endif /* CONFIG_PM */
static struct pci_driver p54p_driver = {
@@ -661,10 +664,7 @@
.id_table = p54p_table,
.probe = p54p_probe,
.remove = __devexit_p(p54p_remove),
-#ifdef CONFIG_PM
- .suspend = p54p_suspend,
- .resume = p54p_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = P54P_PM_OPS,
};
static int __init p54p_init(void)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7faed62..f792990 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -618,19 +618,19 @@
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "spi_setup failed");
- goto err_free_common;
+ goto err_free;
}
ret = gpio_request(p54spi_gpio_power, "p54spi power");
if (ret < 0) {
dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret);
- goto err_free_common;
+ goto err_free;
}
ret = gpio_request(p54spi_gpio_irq, "p54spi irq");
if (ret < 0) {
dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret);
- goto err_free_common;
+ goto err_free_gpio_power;
}
gpio_direction_output(p54spi_gpio_power, 0);
@@ -641,7 +641,7 @@
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
- goto err_free_common;
+ goto err_free_gpio_irq;
}
irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
@@ -673,6 +673,12 @@
return 0;
err_free_common:
+ free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
+err_free_gpio_irq:
+ gpio_free(p54spi_gpio_irq);
+err_free_gpio_power:
+ gpio_free(p54spi_gpio_power);
+err_free:
p54_free_common(priv->hw);
return ret;
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a330c69..d66e298 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -518,7 +518,7 @@
__le32 current_command_oid;
/* encryption stuff */
- int encr_tx_key_index;
+ u8 encr_tx_key_index;
struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
int wpa_version;
@@ -634,7 +634,7 @@
}
}
-static bool is_wpa_key(struct rndis_wlan_private *priv, int idx)
+static bool is_wpa_key(struct rndis_wlan_private *priv, u8 idx)
{
int cipher = priv->encr_keys[idx].cipher;
@@ -1350,7 +1350,7 @@
}
static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
- u16 *beacon_interval)
+ u32 *beacon_period)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
@@ -1370,14 +1370,14 @@
if (!channel)
return NULL;
- if (beacon_interval)
- *beacon_interval = le16_to_cpu(config.beacon_period);
+ if (beacon_period)
+ *beacon_period = le32_to_cpu(config.beacon_period);
return channel;
}
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
- int index)
+ u8 index)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_wep_key ndis_key;
@@ -1387,13 +1387,15 @@
netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n",
__func__, index, key_len);
- if ((key_len != 5 && key_len != 13) || index < 0 || index > 3)
+ if (index >= RNDIS_WLAN_NUM_KEYS)
return -EINVAL;
if (key_len == 5)
cipher = WLAN_CIPHER_SUITE_WEP40;
- else
+ else if (key_len == 13)
cipher = WLAN_CIPHER_SUITE_WEP104;
+ else
+ return -EINVAL;
memset(&ndis_key, 0, sizeof(ndis_key));
@@ -1428,7 +1430,7 @@
}
static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
- int index, const u8 *addr, const u8 *rx_seq,
+ u8 index, const u8 *addr, const u8 *rx_seq,
int seq_len, u32 cipher, __le32 flags)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
@@ -1436,7 +1438,7 @@
bool is_addr_ok;
int ret;
- if (index < 0 || index >= 4) {
+ if (index >= RNDIS_WLAN_NUM_KEYS) {
netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n",
__func__, index);
return -EINVAL;
@@ -1524,7 +1526,7 @@
return 0;
}
-static int restore_key(struct usbnet *usbdev, int key_idx)
+static int restore_key(struct usbnet *usbdev, u8 key_idx)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct rndis_wlan_encr_key key;
@@ -1550,13 +1552,13 @@
restore_key(usbdev, i);
}
-static void clear_key(struct rndis_wlan_private *priv, int idx)
+static void clear_key(struct rndis_wlan_private *priv, u8 idx)
{
memset(&priv->encr_keys[idx], 0, sizeof(priv->encr_keys[idx]));
}
/* remove_key is for both wep and wpa */
-static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
+static int remove_key(struct usbnet *usbdev, u8 index, const u8 *bssid)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_remove_key remove_key;
@@ -1790,9 +1792,9 @@
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
- int i, len, count, newlen, err;
+ int i, newlen, err;
+ unsigned int count;
- len = le32_to_cpu(pmkids->length);
count = le32_to_cpu(pmkids->bssid_info_count);
if (count > max_pmkids)
@@ -1831,9 +1833,9 @@
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
- int i, err, len, count, newlen;
+ int i, err, newlen;
+ unsigned int count;
- len = le32_to_cpu(pmkids->length);
count = le32_to_cpu(pmkids->bssid_info_count);
if (count > max_pmkids)
@@ -2683,7 +2685,7 @@
s32 signal;
u64 timestamp;
u16 capability;
- u16 beacon_interval = 0;
+ u32 beacon_period = 0;
__le32 rssi;
u8 ie_buf[34];
int len, ret, ie_len;
@@ -2708,7 +2710,7 @@
}
/* Get channel and beacon interval */
- channel = get_current_channel(usbdev, &beacon_interval);
+ channel = get_current_channel(usbdev, &beacon_period);
if (!channel) {
netdev_warn(usbdev->net, "%s(): could not get channel.\n",
__func__);
@@ -2738,11 +2740,11 @@
netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, "
"capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), "
"signal:%d\n", __func__, (channel ? channel->center_freq : -1),
- bssid, (u32)timestamp, capability, beacon_interval, ie_len,
+ bssid, (u32)timestamp, capability, beacon_period, ie_len,
ssid.essid, signal);
bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
- timestamp, capability, beacon_interval, ie_buf, ie_len,
+ timestamp, capability, beacon_period, ie_buf, ie_len,
signal, GFP_KERNEL);
cfg80211_put_bss(bss);
}
@@ -2755,9 +2757,10 @@
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_assoc_info *info = NULL;
u8 bssid[ETH_ALEN];
- int resp_ie_len, req_ie_len;
+ unsigned int resp_ie_len, req_ie_len;
+ unsigned int offset;
u8 *req_ie, *resp_ie;
- int ret, offset;
+ int ret;
bool roamed = false;
bool match_bss;
@@ -2785,7 +2788,9 @@
ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE);
if (!ret) {
req_ie_len = le32_to_cpu(info->req_ie_length);
- if (req_ie_len > 0) {
+ if (req_ie_len > CONTROL_BUFFER_SIZE)
+ req_ie_len = CONTROL_BUFFER_SIZE;
+ if (req_ie_len != 0) {
offset = le32_to_cpu(info->offset_req_ies);
if (offset > CONTROL_BUFFER_SIZE)
@@ -2799,7 +2804,9 @@
}
resp_ie_len = le32_to_cpu(info->resp_ie_length);
- if (resp_ie_len > 0) {
+ if (resp_ie_len > CONTROL_BUFFER_SIZE)
+ resp_ie_len = CONTROL_BUFFER_SIZE;
+ if (resp_ie_len != 0) {
offset = le32_to_cpu(info->offset_resp_ies);
if (offset > CONTROL_BUFFER_SIZE)
@@ -3038,7 +3045,7 @@
struct rndis_indicate *msg, int buflen)
{
struct ndis_80211_status_indication *indication;
- int len, offset;
+ unsigned int len, offset;
offset = offsetof(struct rndis_indicate, status) +
le32_to_cpu(msg->offset);
@@ -3050,7 +3057,7 @@
return;
}
- if (offset + len > buflen) {
+ if (len > buflen || offset > buflen || offset + len > buflen) {
netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n",
offset + len, buflen);
return;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 638fbef..cf53ac9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -8,7 +8,7 @@
* Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
*
* The driver was extended to the RTL8187B in 2008 by:
- * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
* Hin-Tak Leung <htl10@users.sourceforge.net>
* Larry Finger <Larry.Finger@lwfinger.net>
*
@@ -232,6 +232,7 @@
{
struct rtl8187_priv *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *tx_hdr = (struct ieee80211_hdr *)(skb->data);
unsigned int ep;
void *buf;
struct urb *urb;
@@ -249,7 +250,7 @@
flags |= RTL818X_TX_DESC_FLAG_NO_ENC;
flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
- if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control))
+ if (ieee80211_has_morefrags(tx_hdr->frame_control))
flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= RTL818X_TX_DESC_FLAG_RTS;
@@ -261,6 +262,13 @@
flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
}
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ priv->seqno += 0x10;
+ tx_hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ tx_hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
+ }
+
if (!priv->is_rtl8187b) {
struct rtl8187_tx_hdr *hdr =
(struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
@@ -274,8 +282,6 @@
} else {
/* fc needs to be calculated before skb_push() */
unsigned int epmap[4] = { 6, 7, 5, 4 };
- struct ieee80211_hdr *tx_hdr =
- (struct ieee80211_hdr *)(skb->data);
u16 fc = le16_to_cpu(tx_hdr->frame_control);
struct rtl8187b_tx_hdr *hdr =
@@ -1031,10 +1037,61 @@
cancel_delayed_work_sync(&priv->work);
}
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
+
+static void rtl8187_beacon_work(struct work_struct *work)
+{
+ struct rtl8187_vif *vif_priv =
+ container_of(work, struct rtl8187_vif, beacon_work.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+ struct ieee80211_hw *dev = vif_priv->dev;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+
+ /* don't overflow the tx ring */
+ if (ieee80211_queue_stopped(dev, 0))
+ goto resched;
+
+ /* grab a fresh beacon */
+ skb = ieee80211_beacon_get(dev, vif);
+ if (!skb)
+ goto resched;
+
+ /*
+ * update beacon timestamp w/ TSF value
+ * TODO: make hardware update beacon timestamp
+ */
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ mgmt->u.beacon.timestamp = cpu_to_le64(rtl8187_get_tsf(dev, vif));
+
+ /* TODO: use actual beacon queue */
+ skb_set_queue_mapping(skb, 0);
+
+ rtl8187_tx(dev, skb);
+
+resched:
+ /*
+ * schedule next beacon
+ * TODO: use hardware support for beacon timing
+ */
+ schedule_delayed_work(&vif_priv->beacon_work,
+ usecs_to_jiffies(1024 * vif->bss_conf.beacon_int));
+}
+
+
static int rtl8187_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
+ struct rtl8187_vif *vif_priv;
int i;
int ret = -EOPNOTSUPP;
@@ -1044,6 +1101,7 @@
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
break;
default:
goto exit;
@@ -1052,6 +1110,13 @@
ret = 0;
priv->vif = vif;
+ /* Initialize driver private area */
+ vif_priv = (struct rtl8187_vif *)&vif->drv_priv;
+ vif_priv->dev = dev;
+ INIT_DELAYED_WORK(&vif_priv->beacon_work, rtl8187_beacon_work);
+ vif_priv->enable_beacon = false;
+
+
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->MAC[i],
@@ -1175,9 +1240,12 @@
u32 changed)
{
struct rtl8187_priv *priv = dev->priv;
+ struct rtl8187_vif *vif_priv;
int i;
u8 reg;
+ vif_priv = (struct rtl8187_vif *)&vif->drv_priv;
+
if (changed & BSS_CHANGED_BSSID) {
mutex_lock(&priv->conf_mutex);
for (i = 0; i < ETH_ALEN; i++)
@@ -1189,8 +1257,12 @@
else
reg = 0;
- if (is_valid_ether_addr(info->bssid))
- reg |= RTL818X_MSR_INFRA;
+ if (is_valid_ether_addr(info->bssid)) {
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ reg |= RTL818X_MSR_ADHOC;
+ else
+ reg |= RTL818X_MSR_INFRA;
+ }
else
reg |= RTL818X_MSR_NO_LINK;
@@ -1202,6 +1274,16 @@
if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE))
rtl8187_conf_erp(priv, info->use_short_slot,
info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ vif_priv->enable_beacon = info->enable_beacon;
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON)) {
+ cancel_delayed_work_sync(&vif_priv->beacon_work);
+ if (vif_priv->enable_beacon)
+ schedule_work(&vif_priv->beacon_work.work);
+ }
+
}
static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
@@ -1279,13 +1361,6 @@
return 0;
}
-static u64 rtl8187_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif)
-{
- struct rtl8187_priv *priv = dev->priv;
-
- return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
- (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
-}
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
@@ -1514,12 +1589,9 @@
if (reg & 0xFF00)
priv->rfkill_mask = RFKILL_MASK_8198;
}
-
- /*
- * XXX: Once this driver supports anything that requires
- * beacons it must implement IEEE80211_TX_CTL_ASSIGN_SEQ.
- */
- dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ dev->vif_data_size = sizeof(struct rtl8187_vif);
+ dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) ;
if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index f1cc907..e19a20a 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -89,6 +89,14 @@
DEVICE_RTL8187B
};
+struct rtl8187_vif {
+ struct ieee80211_hw *dev;
+
+ /* beaconing */
+ struct delayed_work beacon_work;
+ bool enable_beacon;
+};
+
struct rtl8187_priv {
/* common between rtl818x drivers */
struct rtl818x_csr *map;
@@ -141,6 +149,7 @@
__le32 bits32;
} *io_dmabuf;
bool rfkill_off;
+ u16 seqno;
};
void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index a644735..1208b75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -520,6 +520,10 @@
dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
dm_digtable.backoff_val);
+ dm_digtable.cur_igvalue += 2;
+ if (dm_digtable.cur_igvalue > 0x3f)
+ dm_digtable.cur_igvalue = 0x3f;
+
if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
dm_digtable.cur_igvalue);
@@ -1201,13 +1205,18 @@
"PreState = %d, CurState = %d\n",
p_ra->pre_ratr_state, p_ra->ratr_state);
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ /* Only the PCI card uses sta in the update rate table
+ * callback routine */
+ if (rtlhal->interface == INTF_PCI) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ }
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
p_ra->ratr_state);
p_ra->pre_ratr_state = p_ra->ratr_state;
- rcu_read_unlock();
+ if (rtlhal->interface == INTF_PCI)
+ rcu_read_unlock();
}
}
}
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index ffcf89f..2e1e352 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -346,9 +346,14 @@
pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
pep_desc->bInterval);
}
- if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num)
- return -EINVAL ;
-
+ if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
+ pr_err("Too few input end points found\n");
+ return -EINVAL;
+ }
+ if (rtlusb->out_ep_nums == 0) {
+ pr_err("No output end points found\n");
+ return -EINVAL;
+ }
/* usb endpoint mapping */
err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
@@ -357,7 +362,7 @@
return err;
}
-static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
+static void rtl_usb_init_sw(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -392,7 +397,6 @@
/* HIMR_EX - turn all on */
rtlusb->irq_mask[1] = 0xFFFFFFFF;
rtlusb->disableHWSM = true;
- return 0;
}
#define __RADIO_TAP_SIZE_RSV 32
@@ -976,7 +980,9 @@
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = _rtl_usb_init(hw);
- err = _rtl_usb_init_sw(hw);
+ if (err)
+ goto error_out;
+ rtl_usb_init_sw(hw);
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index b776d9d..3414fc1 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -459,23 +459,39 @@
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
+ unsigned long flags;
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
+ /* these bits are used by op_tx */
+ spin_lock_irqsave(&wl->wl_lock, flags);
__set_bit(link, wl->links_map);
__set_bit(link, wlvif->links_map);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
*hlid = link;
return 0;
}
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
+ unsigned long flags;
+
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
+ /* these bits are used by op_tx */
+ spin_lock_irqsave(&wl->wl_lock, flags);
__clear_bit(*hlid, wl->links_map);
__clear_bit(*hlid, wlvif->links_map);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /*
+ * At this point op_tx() will not add more packets to the queues. We
+ * can purge them.
+ */
+ wl1271_tx_reset_link_queues(wl, *hlid);
+
*hlid = WL12XX_INVALID_LINK_ID;
}
@@ -515,7 +531,7 @@
goto out_free;
}
cmd->device.hlid = wlvif->dev_hlid;
- cmd->device.session = wlvif->session_counter;
+ cmd->device.session = wl12xx_get_new_session_id(wl, wlvif);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -1802,6 +1818,14 @@
goto out;
__clear_bit(role_id, wl->roc_map);
+
+ /*
+ * Rearm the tx watchdog when removing the last ROC. This prevents
+ * recoveries due to just finished ROCs - when Tx hasn't yet had
+ * a chance to get out.
+ */
+ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES)
+ wl12xx_rearm_tx_watchdog_locked(wl);
out:
return ret;
}
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index cc50faa..3e581e1 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -690,6 +690,9 @@
*/
u8 tmpl_short_retry_limit;
u8 tmpl_long_retry_limit;
+
+ /* Time in ms for Tx watchdog timer to expire */
+ u32 tx_watchdog_timeout;
};
enum {
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index adf9bbc..3900236 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -217,6 +217,7 @@
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
+ .tx_watchdog_timeout = 5000,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -246,7 +247,7 @@
.psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
- .dynamic_ps_timeout = 100,
+ .dynamic_ps_timeout = 200,
.forced_ps = false,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
@@ -392,15 +393,15 @@
static void wl1271_op_stop(struct ieee80211_hw *hw);
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-static DEFINE_MUTEX(wl_list_mutex);
-static LIST_HEAD(wl_list);
-
-static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- unsigned char operstate)
+static int wl12xx_set_authorized(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- if (operstate != IF_OPER_UP)
+ if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
+ return -EINVAL;
+
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
return 0;
if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
@@ -415,76 +416,6 @@
wl1271_info("Association completed.");
return 0;
}
-static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
- void *arg)
-{
- struct net_device *dev = arg;
- struct wireless_dev *wdev;
- struct wiphy *wiphy;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- struct wl1271 *wl_temp;
- struct wl12xx_vif *wlvif;
- int ret = 0;
-
- /* Check that this notification is for us. */
- if (what != NETDEV_CHANGE)
- return NOTIFY_DONE;
-
- wdev = dev->ieee80211_ptr;
- if (wdev == NULL)
- return NOTIFY_DONE;
-
- wiphy = wdev->wiphy;
- if (wiphy == NULL)
- return NOTIFY_DONE;
-
- hw = wiphy_priv(wiphy);
- if (hw == NULL)
- return NOTIFY_DONE;
-
- wl_temp = hw->priv;
- mutex_lock(&wl_list_mutex);
- list_for_each_entry(wl, &wl_list, list) {
- if (wl == wl_temp)
- break;
- }
- mutex_unlock(&wl_list_mutex);
- if (wl != wl_temp)
- return NOTIFY_DONE;
-
- mutex_lock(&wl->mutex);
-
- if (wl->state == WL1271_STATE_OFF)
- goto out;
-
- if (dev->operstate != IF_OPER_UP)
- goto out;
- /*
- * The correct behavior should be just getting the appropriate wlvif
- * from the given dev, but currently we don't have a mac80211
- * interface for it.
- */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
-
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- wl1271_check_operstate(wl, wlvif,
- ieee80211_get_operstate(vif));
-
- wl1271_ps_elp_sleep(wl);
- }
-out:
- mutex_unlock(&wl->mutex);
-
- return NOTIFY_OK;
-}
static int wl1271_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
@@ -623,6 +554,80 @@
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
+/* wl->mutex must be taken */
+void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
+{
+ /* if the watchdog is not armed, don't do anything */
+ if (wl->tx_allocated_blocks == 0)
+ return;
+
+ cancel_delayed_work(&wl->tx_watchdog_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
+ msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
+}
+
+static void wl12xx_tx_watchdog_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wl = container_of(dwork, struct wl1271, tx_watchdog_work);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ /* Tx went out in the meantime - everything is ok */
+ if (unlikely(wl->tx_allocated_blocks == 0))
+ goto out;
+
+ /*
+ * if a ROC is in progress, we might not have any Tx for a long
+ * time (e.g. pending Tx on the non-ROC channels)
+ */
+ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
+ wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
+ wl->conf.tx.tx_watchdog_timeout);
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ goto out;
+ }
+
+ /*
+ * if a scan is in progress, we might not have any Tx for a long
+ * time
+ */
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+ wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
+ wl->conf.tx.tx_watchdog_timeout);
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ goto out;
+ }
+
+ /*
+ * AP might cache a frame for a long time for a sleeping station,
+ * so rearm the timer if there's an AP interface with stations. If
+ * Tx is genuinely stuck we will most hopefully discover it when all
+ * stations are removed due to inactivity.
+ */
+ if (wl->active_sta_count) {
+ wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
+ " %d stations",
+ wl->conf.tx.tx_watchdog_timeout,
+ wl->active_sta_count);
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ goto out;
+ }
+
+ wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
+ wl->conf.tx.tx_watchdog_timeout);
+ wl12xx_queue_recovery_work(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static void wl1271_conf_init(struct wl1271 *wl)
{
@@ -815,6 +820,18 @@
wl->tx_allocated_blocks -= freed_blocks;
+ /*
+ * If the FW freed some blocks:
+ * If we still have allocated blocks - re-arm the timer, Tx is
+ * not stuck. Otherwise, cancel the timer (no Tx currently).
+ */
+ if (freed_blocks) {
+ if (wl->tx_allocated_blocks)
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ else
+ cancel_delayed_work(&wl->tx_watchdog_work);
+ }
+
avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
/*
@@ -1224,7 +1241,8 @@
wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
- BUG_ON(bug_on_recovery);
+ BUG_ON(bug_on_recovery &&
+ !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
/*
* Advance security sequence number to overcome potential progress
@@ -1487,6 +1505,7 @@
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
cancel_delayed_work_sync(&wl->elp_work);
+ cancel_delayed_work_sync(&wl->tx_watchdog_work);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
@@ -1528,7 +1547,8 @@
goto out;
}
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
+ hlid, q, skb->len);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
wl->tx_queue_count[q]++;
@@ -1626,10 +1646,6 @@
}
-static struct notifier_block wl1271_dev_notifier = {
- .notifier_call = wl1271_dev_notify,
-};
-
#ifdef CONFIG_PM
static int wl1271_configure_suspend_sta(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
@@ -1737,6 +1753,8 @@
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow || !wow->any);
+ wl1271_tx_flush(wl);
+
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_configure_suspend(wl, wlvif);
@@ -1854,15 +1872,12 @@
wl->state = WL1271_STATE_OFF;
mutex_unlock(&wl->mutex);
- mutex_lock(&wl_list_mutex);
- list_del(&wl->list);
- mutex_unlock(&wl_list_mutex);
-
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
+ cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
wl12xx_tx_reset(wl, true);
@@ -2209,6 +2224,7 @@
if (wl12xx_need_fw_change(wl, vif_count, true)) {
wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
mutex_unlock(&wl->mutex);
wl1271_recovery_work(&wl->recovery_work);
return 0;
@@ -2268,11 +2284,6 @@
out_unlock:
mutex_unlock(&wl->mutex);
- mutex_lock(&wl_list_mutex);
- if (!ret)
- list_add(&wl->list, &wl_list);
- mutex_unlock(&wl_list_mutex);
-
return ret;
}
@@ -2296,6 +2307,12 @@
if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
wl->scan_vif == vif) {
+ /*
+ * Rearm the tx watchdog just before idling scan. This
+ * prevents just-finished scans from triggering the watchdog
+ */
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_vif = NULL;
@@ -2398,6 +2415,7 @@
WARN_ON(iter != wlvif);
if (wl12xx_need_fw_change(wl, vif_count, false)) {
wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
wl12xx_queue_recovery_work(wl);
cancel_recovery = false;
}
@@ -2417,7 +2435,7 @@
set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
wl1271_op_remove_interface(hw, vif);
- vif->type = ieee80211_iftype_p2p(new_type, p2p);
+ vif->type = new_type;
vif->p2p = p2p;
ret = wl1271_op_add_interface(hw, vif);
@@ -2596,35 +2614,22 @@
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
- &wlvif->flags)) {
- if (wl12xx_dev_role_started(wlvif)) {
- /* roaming */
- ret = wl12xx_croc(wl,
- wlvif->dev_role_id);
- if (ret < 0)
- return ret;
- }
- ret = wl1271_join(wl, wlvif, false);
+ /*
+ * change the ROC channel. do it only if we are
+ * not idle. otherwise, CROC will be called
+ * anyway.
+ */
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags) &&
+ wl12xx_dev_role_started(wlvif) &&
+ !(conf->flags & IEEE80211_CONF_IDLE)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
- wl1271_warning("cmd join on channel "
- "failed %d", ret);
- } else {
- /*
- * change the ROC channel. do it only if we are
- * not idle. otherwise, CROC will be called
- * anyway.
- */
- if (wl12xx_dev_role_started(wlvif) &&
- !(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_stop_dev(wl, wlvif);
- if (ret < 0)
- return ret;
+ return ret;
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- return ret;
- }
+ ret = wl12xx_start_dev(wl, wlvif);
+ if (ret < 0)
+ return ret;
}
}
}
@@ -3151,8 +3156,6 @@
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-
int ret;
u8 *ssid = NULL;
size_t len = 0;
@@ -3180,8 +3183,8 @@
if (ret < 0)
goto out;
- if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
- test_bit(wlvif->role_id, wl->roc_map)) {
+ /* fail if there is any role in ROC */
+ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
/* don't allow scanning right now */
ret = -EBUSY;
goto out_sleep;
@@ -3221,6 +3224,13 @@
if (ret < 0)
goto out_sleep;
}
+
+ /*
+ * Rearm the tx watchdog just before idling scan. This
+ * prevents just-finished scans from triggering the watchdog
+ */
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_vif = NULL;
@@ -3744,10 +3754,8 @@
ibss_joined = true;
} else {
if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
- &wlvif->flags)) {
+ &wlvif->flags))
wl1271_unjoin(wl, wlvif);
- wl12xx_start_dev(wl, wlvif);
- }
}
}
@@ -3765,7 +3773,7 @@
do_join = true;
}
- if (changed & BSS_CHANGED_IDLE) {
+ if (changed & BSS_CHANGED_IDLE && !is_ibss) {
ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
if (ret < 0)
wl1271_warning("idle mode change failed %d", ret);
@@ -3821,6 +3829,7 @@
u32 rates;
int ieoffset;
wlvif->aid = bss_conf->aid;
+ wlvif->beacon_int = bss_conf->beacon_int;
set_assoc = true;
/*
@@ -3901,7 +3910,6 @@
/* restore the bssid filter and go to dummy bssid */
if (was_assoc) {
- u32 conf_flags = wl->hw->conf.flags;
/*
* we might have to disable roc, if there was
* no IF_OPER_UP notification.
@@ -3924,7 +3932,7 @@
}
wl1271_unjoin(wl, wlvif);
- if (!(conf_flags & IEEE80211_CONF_IDLE))
+ if (!bss_conf->idle)
wl12xx_start_dev(wl, wlvif);
}
}
@@ -3968,8 +3976,8 @@
if (ret < 0)
goto out;
- wl1271_check_operstate(wl, wlvif,
- ieee80211_get_operstate(vif));
+ if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
+ wl12xx_set_authorized(wl, wlvif);
}
/*
* stop device role if started (we might already be in
@@ -4228,107 +4236,155 @@
clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
- wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
+
+ /*
+ * rearm the tx watchdog when the last STA is freed - give the FW a
+ * chance to return STA-buffered packets before complaining.
+ */
+ if (wl->active_sta_count == 0)
+ wl12xx_rearm_tx_watchdog_locked(wl);
}
-static int wl1271_op_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int wl12xx_sta_add(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
- struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
-
- if (wlvif->bss_type != BSS_TYPE_AP_BSS)
- goto out;
-
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
- goto out;
+ return ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_free_sta;
-
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
- goto out_sleep;
-
- ret = wl12xx_cmd_set_peer_state(wl, hlid);
- if (ret < 0)
- goto out_sleep;
-
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, hlid);
- if (ret < 0)
- goto out_sleep;
-
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
-out_free_sta:
- if (ret < 0)
wl1271_free_sta(wl, wlvif, hlid);
-out:
- mutex_unlock(&wl->mutex);
return ret;
}
-static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int wl12xx_sta_remove(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
- struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0, id;
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
-
- if (wlvif->bss_type != BSS_TYPE_AP_BSS)
- goto out;
-
wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
wl_sta = (struct wl1271_station *)sta->drv_priv;
id = wl_sta->hlid;
if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
+ return -EINVAL;
+
+ ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
+ if (ret < 0)
+ return ret;
+
+ wl1271_free_sta(wl, wlvif, wl_sta->hlid);
+ return ret;
+}
+
+static int wl12xx_update_sta_state(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct wl1271_station *wl_sta;
+ u8 hlid;
+ bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ int ret;
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+
+ /* Add station (AP mode) */
+ if (is_ap &&
+ old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return wl12xx_sta_add(wl, wlvif, sta);
+
+ /* Remove station (AP mode) */
+ if (is_ap &&
+ old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ /* must not fail */
+ wl12xx_sta_remove(wl, wlvif, sta);
+ return 0;
+ }
+
+ /* Authorize station (AP mode) */
+ if (is_ap &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = wl12xx_cmd_set_peer_state(wl, hlid);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
+ hlid);
+ return ret;
+ }
+
+ /* Authorize station */
+ if (is_sta &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
+ return wl12xx_set_authorized(wl, wlvif);
+ }
+
+ if (is_sta &&
+ old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
+ sta->aid, old_state, new_state);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EBUSY;
goto out;
+ }
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
- if (ret < 0)
- goto out_sleep;
+ ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
- wl1271_free_sta(wl, wlvif, wl_sta->hlid);
-
-out_sleep:
wl1271_ps_elp_sleep(wl);
-
out:
mutex_unlock(&wl->mutex);
+ if (new_state < old_state)
+ return 0;
return ret;
}
@@ -4497,6 +4553,8 @@
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
+ wl1271_tx_flush(wl);
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -4795,8 +4853,7 @@
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
- .sta_add = wl1271_op_sta_add,
- .sta_remove = wl1271_op_sta_remove,
+ .sta_state = wl12xx_op_sta_state,
.ampdu_action = wl1271_op_ampdu_action,
.tx_frames_pending = wl1271_tx_frames_pending,
.set_bitrate_mask = wl12xx_set_bitrate_mask,
@@ -5117,8 +5174,6 @@
wl1271_debugfs_init(wl);
- register_netdevice_notifier(&wl1271_dev_notifier);
-
wl1271_notice("loaded");
out:
@@ -5130,7 +5185,6 @@
if (wl->plt)
wl1271_plt_stop(wl);
- unregister_netdevice_notifier(&wl1271_dev_notifier);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
@@ -5251,7 +5305,6 @@
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
- INIT_LIST_HEAD(&wl->list);
INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
@@ -5268,6 +5321,7 @@
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+ INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 23d6750..78f598b 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -69,8 +69,6 @@
mutex_unlock(&wl->mutex);
}
-#define ELP_ENTRY_DELAY 5
-
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
@@ -90,7 +88,7 @@
}
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(ELP_ENTRY_DELAY));
+ msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout));
}
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index e43a6b2..fcba055 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -55,6 +55,12 @@
vif = wl->scan_vif;
wlvif = wl12xx_vif_to_data(vif);
+ /*
+ * Rearm the tx watchdog just before idling scan. This
+ * prevents just-finished scans from triggering the watchdog
+ */
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 6446e4d..43ae491 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -226,6 +226,10 @@
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
+ /* If the FW was empty before, arm the Tx watchdog */
+ if (wl->tx_allocated_blocks == total_blocks)
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
@@ -527,6 +531,7 @@
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
+ WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -571,6 +576,7 @@
struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
+ /* continue from last wlvif (round robin) */
if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) {
skb = wl12xx_vif_skb_dequeue(wl, wlvif);
@@ -581,7 +587,11 @@
}
}
- /* do another pass */
+ /* dequeue from the system HLID before the restarting wlvif list */
+ if (!skb)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+
+ /* do a new pass over the wlvif list */
if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) {
skb = wl12xx_vif_skb_dequeue(wl, wlvif);
@@ -589,12 +599,16 @@
wl->last_wlvif = wlvif;
break;
}
+
+ /*
+ * No need to continue after last_wlvif. The previous
+ * pass should have found it.
+ */
+ if (wlvif == wl->last_wlvif)
+ break;
}
}
- if (!skb)
- skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
-
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
@@ -602,6 +616,7 @@
skb = wl->dummy_packet;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
+ WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -959,7 +974,6 @@
else
wlvif->sta.ba_rx_bitmap = 0;
- wl1271_tx_reset_link_queues(wl, i);
wl->links[i].allocated_pkts = 0;
wl->links[i].prev_freed_pkts = 0;
}
@@ -973,8 +987,14 @@
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wl->tx_queue_count[i] = 0;
+ /* only reset the queues if something bad happened */
+ if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
+ for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ wl1271_tx_reset_link_queues(wl, i);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wl->tx_queue_count[i] = 0;
+ }
wl->stopped_queues_map = 0;
@@ -1024,6 +1044,7 @@
void wl1271_tx_flush(struct wl1271 *wl)
{
unsigned long timeout;
+ int i;
timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
while (!time_after(jiffies, timeout)) {
@@ -1041,6 +1062,12 @@
}
wl1271_warning("Unable to flush all TX buffers, timed out.");
+
+ /* forcibly flush all Tx buffers on our queues */
+ mutex_lock(&wl->mutex);
+ for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ wl1271_tx_reset_link_queues(wl, i);
+ mutex_unlock(&wl->mutex);
}
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index e3977b5..5cf8c32 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -227,5 +227,6 @@
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
+void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 9035241..749a15a 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -260,11 +260,13 @@
WL1271_FLAG_SOFT_GEMINI,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
+ WL1271_FLAG_INTENDED_FW_RECOVERY,
};
enum wl12xx_vif_flags {
WLVIF_FLAG_INITIALIZED,
WLVIF_FLAG_STA_ASSOCIATED,
+ WLVIF_FLAG_STA_AUTHORIZED,
WLVIF_FLAG_IBSS_JOINED,
WLVIF_FLAG_AP_STARTED,
WLVIF_FLAG_IN_PS,
@@ -452,8 +454,6 @@
bool enable_11a;
- struct list_head list;
-
/* Most recently reported noise in dBm */
s8 noise;
@@ -495,6 +495,9 @@
/* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif;
+
+ /* work to fire when Tx is stuck */
+ struct delayed_work tx_watchdog_work;
};
struct wl1271_station {
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 1a1500b..cb6204f 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -736,6 +736,8 @@
nfc_tgt->sens_res = be16_to_cpu(tgt_type_a->sens_res);
nfc_tgt->sel_res = tgt_type_a->sel_res;
+ nfc_tgt->nfcid1_len = tgt_type_a->nfcid_len;
+ memcpy(nfc_tgt->nfcid1, tgt_type_a->nfcid_data, nfc_tgt->nfcid1_len);
return 0;
}
@@ -781,6 +783,9 @@
else
nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK;
+ memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9);
+ nfc_tgt->sensf_res_len = 9;
+
return 0;
}
@@ -823,6 +828,8 @@
nfc_tgt->supported_protocols = NFC_PROTO_JEWEL_MASK;
nfc_tgt->sens_res = be16_to_cpu(tgt_jewel->sens_res);
+ nfc_tgt->nfcid1_len = 4;
+ memcpy(nfc_tgt->nfcid1, tgt_jewel->jewelid, nfc_tgt->nfcid1_len);
return 0;
}
@@ -902,6 +909,8 @@
if (resp->tg != 1)
return -EPROTO;
+ memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+
target_data_len = resp_len - sizeof(struct pn533_poll_response);
switch (dev->poll_mod_curr) {
@@ -1307,6 +1316,8 @@
nfc_dev_dbg(&dev->interface->dev, "Creating new target");
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ nfc_target.nfcid1_len = 10;
+ memcpy(nfc_target.nfcid1, resp->nfcid3t, nfc_target.nfcid1_len);
rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
if (rc)
return 0;
@@ -1329,21 +1340,15 @@
}
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
- u8 comm_mode, u8 rf_mode)
+ u8 comm_mode, u8* gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_cmd_jump_dep *cmd;
- u8 cmd_len, local_gt_len, *local_gt;
+ u8 cmd_len;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (rf_mode == NFC_RF_TARGET) {
- nfc_dev_err(&dev->interface->dev, "Target mode not supported");
- return -EOPNOTSUPP;
- }
-
-
if (dev->poll_mod_count) {
nfc_dev_err(&dev->interface->dev,
"Cannot bring the DEP link up while polling");
@@ -1356,11 +1361,7 @@
return -EBUSY;
}
- local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
- if (local_gt_len > NFC_MAX_GT_LEN)
- return -EINVAL;
-
- cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+ cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len;
cmd = kzalloc(cmd_len, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -1369,9 +1370,9 @@
cmd->active = !comm_mode;
cmd->baud = 0;
- if (local_gt != NULL) {
+ if (gb != NULL && gb_len > 0) {
cmd->next = 4; /* We have some Gi */
- memcpy(cmd->gt, local_gt, local_gt_len);
+ memcpy(cmd->gt, gb, gb_len);
} else {
cmd->next = 0;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ea2bd1b..91a375f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -23,7 +23,6 @@
#include <asm/machdep.h>
#endif /* CONFIG_PPC */
-#include <asm/setup.h>
#include <asm/page.h>
char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 980c079..483c0adc 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,7 +182,7 @@
if (!phy_id || sz < sizeof(*phy_id))
return NULL;
- sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0]));
+ sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
phy = phy_connect(dev, bus_id, hndlr, 0, iface);
return IS_ERR(phy) ? NULL : phy;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 68d7201..cd9bc3b 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -72,4 +72,17 @@
In order for this to work, your MAC driver must also
implement the skb_tx_timetamp() function.
+config PTP_1588_CLOCK_PCH
+ tristate "Intel PCH EG20T as PTP clock"
+ depends on PTP_1588_CLOCK
+ depends on PCH_GBE
+ help
+ This driver adds support for using the PCH EG20T as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_pch.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index f6933e8..8b58597 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -5,3 +5,4 @@
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
+obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
new file mode 100644
index 0000000..375eb04
--- /dev/null
+++ b/drivers/ptp/ptp_pch.c
@@ -0,0 +1,730 @@
+/*
+ * PTP 1588 clock using the EG20T PCH
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright (C) 2011-2012 LAPIS SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the IXP46X driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define STATION_ADDR_LEN 20
+#define PCI_DEVICE_ID_PCH_1588 0x8819
+#define IO_MEM_BAR 1
+
+#define DEFAULT_ADDEND 0xA0000000
+#define TICKS_NS_SHIFT 5
+#define N_EXT_TS 2
+
+enum pch_status {
+ PCH_SUCCESS,
+ PCH_INVALIDPARAM,
+ PCH_NOTIMESTAMP,
+ PCH_INTERRUPTMODEINUSE,
+ PCH_FAILED,
+ PCH_UNSUPPORTED,
+};
+/**
+ * struct pch_ts_regs - IEEE 1588 registers
+ */
+struct pch_ts_regs {
+ u32 control;
+ u32 event;
+ u32 addend;
+ u32 accum;
+ u32 test;
+ u32 ts_compare;
+ u32 rsystime_lo;
+ u32 rsystime_hi;
+ u32 systime_lo;
+ u32 systime_hi;
+ u32 trgt_lo;
+ u32 trgt_hi;
+ u32 asms_lo;
+ u32 asms_hi;
+ u32 amms_lo;
+ u32 amms_hi;
+ u32 ch_control;
+ u32 ch_event;
+ u32 tx_snap_lo;
+ u32 tx_snap_hi;
+ u32 rx_snap_lo;
+ u32 rx_snap_hi;
+ u32 src_uuid_lo;
+ u32 src_uuid_hi;
+ u32 can_status;
+ u32 can_snap_lo;
+ u32 can_snap_hi;
+ u32 ts_sel;
+ u32 ts_st[6];
+ u32 reserve1[14];
+ u32 stl_max_set_en;
+ u32 stl_max_set;
+ u32 reserve2[13];
+ u32 srst;
+};
+
+#define PCH_TSC_RESET (1 << 0)
+#define PCH_TSC_TTM_MASK (1 << 1)
+#define PCH_TSC_ASMS_MASK (1 << 2)
+#define PCH_TSC_AMMS_MASK (1 << 3)
+#define PCH_TSC_PPSM_MASK (1 << 4)
+#define PCH_TSE_TTIPEND (1 << 1)
+#define PCH_TSE_SNS (1 << 2)
+#define PCH_TSE_SNM (1 << 3)
+#define PCH_TSE_PPS (1 << 4)
+#define PCH_CC_MM (1 << 0)
+#define PCH_CC_TA (1 << 1)
+
+#define PCH_CC_MODE_SHIFT 16
+#define PCH_CC_MODE_MASK 0x001F0000
+#define PCH_CC_VERSION (1 << 31)
+#define PCH_CE_TXS (1 << 0)
+#define PCH_CE_RXS (1 << 1)
+#define PCH_CE_OVR (1 << 0)
+#define PCH_CE_VAL (1 << 1)
+#define PCH_ECS_ETH (1 << 0)
+
+#define PCH_ECS_CAN (1 << 1)
+#define PCH_STATION_BYTES 6
+
+#define PCH_IEEE1588_ETH (1 << 0)
+#define PCH_IEEE1588_CAN (1 << 1)
+/**
+ * struct pch_dev - Driver private data
+ */
+struct pch_dev {
+ struct pch_ts_regs *regs;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+ int exts0_enabled;
+ int exts1_enabled;
+
+ u32 mem_base;
+ u32 mem_size;
+ u32 irq;
+ struct pci_dev *pdev;
+ spinlock_t register_lock;
+};
+
+/**
+ * struct pch_params - 1588 module parameter
+ */
+struct pch_params {
+ u8 station[STATION_ADDR_LEN];
+};
+
+/* structure to hold the module parameters */
+static struct pch_params pch_param = {
+ "00:00:00:00:00:00"
+};
+
+/*
+ * Register access functions
+ */
+static inline void pch_eth_enable_set(struct pch_dev *chip)
+{
+ u32 val;
+ /* SET the eth_enable bit */
+ val = ioread32(&chip->regs->ts_sel) | (PCH_ECS_ETH);
+ iowrite32(val, (&chip->regs->ts_sel));
+}
+
+static u64 pch_systime_read(struct pch_ts_regs *regs)
+{
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ioread32(®s->systime_lo);
+ hi = ioread32(®s->systime_hi);
+
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= TICKS_NS_SHIFT;
+
+ return ns;
+}
+
+static void pch_systime_write(struct pch_ts_regs *regs, u64 ns)
+{
+ u32 hi, lo;
+
+ ns >>= TICKS_NS_SHIFT;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+
+ iowrite32(lo, ®s->systime_lo);
+ iowrite32(hi, ®s->systime_hi);
+}
+
+static inline void pch_block_reset(struct pch_dev *chip)
+{
+ u32 val;
+ /* Reset Hardware Assist block */
+ val = ioread32(&chip->regs->control) | PCH_TSC_RESET;
+ iowrite32(val, (&chip->regs->control));
+ val = val & ~PCH_TSC_RESET;
+ iowrite32(val, (&chip->regs->control));
+}
+
+u32 pch_ch_control_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->ch_control);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_ch_control_read);
+
+void pch_ch_control_write(struct pci_dev *pdev, u32 val)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ iowrite32(val, (&chip->regs->ch_control));
+}
+EXPORT_SYMBOL(pch_ch_control_write);
+
+u32 pch_ch_event_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->ch_event);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_ch_event_read);
+
+void pch_ch_event_write(struct pci_dev *pdev, u32 val)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ iowrite32(val, (&chip->regs->ch_event));
+}
+EXPORT_SYMBOL(pch_ch_event_write);
+
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->src_uuid_lo);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_src_uuid_lo_read);
+
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->src_uuid_hi);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_src_uuid_hi_read);
+
+u64 pch_rx_snap_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ioread32(&chip->regs->rx_snap_lo);
+ hi = ioread32(&chip->regs->rx_snap_hi);
+
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+
+ return ns;
+}
+EXPORT_SYMBOL(pch_rx_snap_read);
+
+u64 pch_tx_snap_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ioread32(&chip->regs->tx_snap_lo);
+ hi = ioread32(&chip->regs->tx_snap_hi);
+
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+
+ return ns;
+}
+EXPORT_SYMBOL(pch_tx_snap_read);
+
+/* This function enables all 64 bits in system time registers [high & low].
+This is a work-around for non continuous value in the SystemTime Register*/
+static void pch_set_system_time_count(struct pch_dev *chip)
+{
+ iowrite32(0x01, &chip->regs->stl_max_set_en);
+ iowrite32(0xFFFFFFFF, &chip->regs->stl_max_set);
+ iowrite32(0x00, &chip->regs->stl_max_set_en);
+}
+
+static void pch_reset(struct pch_dev *chip)
+{
+ /* Reset Hardware Assist */
+ pch_block_reset(chip);
+
+ /* enable all 32 bits in system time registers */
+ pch_set_system_time_count(chip);
+}
+
+/**
+ * pch_set_station_address() - This API sets the station address used by
+ * IEEE 1588 hardware when looking at PTP
+ * traffic on the ethernet interface
+ * @addr: dress which contain the column separated address to be used.
+ */
+static int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
+{
+ s32 i;
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ /* Verify the parameter */
+ if ((chip->regs == 0) || addr == (u8 *)NULL) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+ /* For all station address bytes */
+ for (i = 0; i < PCH_STATION_BYTES; i++) {
+ u32 val;
+ s32 tmp;
+
+ tmp = hex_to_bin(addr[i * 3]);
+ if (tmp < 0) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+ val = tmp * 16;
+ tmp = hex_to_bin(addr[(i * 3) + 1]);
+ if (tmp < 0) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+ val += tmp;
+ /* Expects ':' separated addresses */
+ if ((i < 5) && (addr[(i * 3) + 2] != ':')) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+
+ /* Ideally we should set the address only after validating
+ entire string */
+ dev_dbg(&pdev->dev, "invoking pch_station_set\n");
+ iowrite32(val, &chip->regs->ts_st[i]);
+ }
+ return 0;
+}
+
+/*
+ * Interrupt service routine
+ */
+static irqreturn_t isr(int irq, void *priv)
+{
+ struct pch_dev *pch_dev = priv;
+ struct pch_ts_regs *regs = pch_dev->regs;
+ struct ptp_clock_event event;
+ u32 ack = 0, lo, hi, val;
+
+ val = ioread32(®s->event);
+
+ if (val & PCH_TSE_SNS) {
+ ack |= PCH_TSE_SNS;
+ if (pch_dev->exts0_enabled) {
+ hi = ioread32(®s->asms_hi);
+ lo = ioread32(®s->asms_lo);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ event.timestamp <<= TICKS_NS_SHIFT;
+ ptp_clock_event(pch_dev->ptp_clock, &event);
+ }
+ }
+
+ if (val & PCH_TSE_SNM) {
+ ack |= PCH_TSE_SNM;
+ if (pch_dev->exts1_enabled) {
+ hi = ioread32(®s->amms_hi);
+ lo = ioread32(®s->amms_lo);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 1;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ event.timestamp <<= TICKS_NS_SHIFT;
+ ptp_clock_event(pch_dev->ptp_clock, &event);
+ }
+ }
+
+ if (val & PCH_TSE_TTIPEND)
+ ack |= PCH_TSE_TTIPEND; /* this bit seems to be always set */
+
+ if (ack) {
+ iowrite32(ack, ®s->event);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, addend;
+ int neg_adj = 0;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs *regs = pch_dev->regs;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ addend = DEFAULT_ADDEND;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ iowrite32(addend, ®s->addend);
+
+ return 0;
+}
+
+static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs *regs = pch_dev->regs;
+
+ spin_lock_irqsave(&pch_dev->register_lock, flags);
+ now = pch_systime_read(regs);
+ now += delta;
+ pch_systime_write(regs, now);
+ spin_unlock_irqrestore(&pch_dev->register_lock, flags);
+
+ return 0;
+}
+
+static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs *regs = pch_dev->regs;
+
+ spin_lock_irqsave(&pch_dev->register_lock, flags);
+ ns = pch_systime_read(regs);
+ spin_unlock_irqrestore(&pch_dev->register_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return 0;
+}
+
+static int ptp_pch_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs *regs = pch_dev->regs;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&pch_dev->register_lock, flags);
+ pch_systime_write(regs, ns);
+ spin_unlock_irqrestore(&pch_dev->register_lock, flags);
+
+ return 0;
+}
+
+static int ptp_pch_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ pch_dev->exts0_enabled = on ? 1 : 0;
+ break;
+ case 1:
+ pch_dev->exts1_enabled = on ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_pch_caps = {
+ .owner = THIS_MODULE,
+ .name = "PCH timer",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
+ .pps = 0,
+ .adjfreq = ptp_pch_adjfreq,
+ .adjtime = ptp_pch_adjtime,
+ .gettime = ptp_pch_gettime,
+ .settime = ptp_pch_settime,
+ .enable = ptp_pch_enable,
+};
+
+
+#ifdef CONFIG_PM
+static s32 pch_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_disable_device(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ if (pci_save_state(pdev) != 0) {
+ dev_err(&pdev->dev, "could not save PCI config state\n");
+ return -ENOMEM;
+ }
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static s32 pch_resume(struct pci_dev *pdev)
+{
+ s32 ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ return ret;
+ }
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ return 0;
+}
+#else
+#define pch_suspend NULL
+#define pch_resume NULL
+#endif
+
+static void __devexit pch_remove(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ ptp_clock_unregister(chip->ptp_clock);
+ /* free the interrupt */
+ if (pdev->irq != 0)
+ free_irq(pdev->irq, chip);
+
+ /* unmap the virtual IO memory space */
+ if (chip->regs != 0) {
+ iounmap(chip->regs);
+ chip->regs = 0;
+ }
+ /* release the reserved IO memory space */
+ if (chip->mem_base != 0) {
+ release_mem_region(chip->mem_base, chip->mem_size);
+ chip->mem_base = 0;
+ }
+ pci_disable_device(pdev);
+ kfree(chip);
+ dev_info(&pdev->dev, "complete\n");
+}
+
+static s32 __devinit
+pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ s32 ret;
+ unsigned long flags;
+ struct pch_dev *chip;
+
+ chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* enable the 1588 pci device */
+ ret = pci_enable_device(pdev);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "could not enable the pci device\n");
+ goto err_pci_en;
+ }
+
+ chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR);
+ if (!chip->mem_base) {
+ dev_err(&pdev->dev, "could not locate IO memory address\n");
+ ret = -ENODEV;
+ goto err_pci_start;
+ }
+
+ /* retrieve the available length of the IO memory space */
+ chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR);
+
+ /* allocate the memory for the device registers */
+ if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) {
+ dev_err(&pdev->dev,
+ "could not allocate register memory space\n");
+ ret = -EBUSY;
+ goto err_req_mem_region;
+ }
+
+ /* get the virtual address to the 1588 registers */
+ chip->regs = ioremap(chip->mem_base, chip->mem_size);
+
+ if (!chip->regs) {
+ dev_err(&pdev->dev, "Could not get virtual address\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ chip->caps = ptp_pch_caps;
+ chip->ptp_clock = ptp_clock_register(&chip->caps);
+
+ if (IS_ERR(chip->ptp_clock))
+ return PTR_ERR(chip->ptp_clock);
+
+ spin_lock_init(&chip->register_lock);
+
+ ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq);
+ goto err_req_irq;
+ }
+
+ /* indicate success */
+ chip->irq = pdev->irq;
+ chip->pdev = pdev;
+ pci_set_drvdata(pdev, chip);
+
+ spin_lock_irqsave(&chip->register_lock, flags);
+ /* reset the ieee1588 h/w */
+ pch_reset(chip);
+
+ iowrite32(DEFAULT_ADDEND, &chip->regs->addend);
+ iowrite32(1, &chip->regs->trgt_lo);
+ iowrite32(0, &chip->regs->trgt_hi);
+ iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
+ /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */
+ iowrite32(0x80020000, &chip->regs->ch_control);
+
+ pch_eth_enable_set(chip);
+
+ if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) {
+ if (pch_set_station_address(pch_param.station, pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Invalid station address parameter\n"
+ "Module loaded but station address not set correctly\n"
+ );
+ }
+ }
+ spin_unlock_irqrestore(&chip->register_lock, flags);
+ return 0;
+
+err_req_irq:
+ ptp_clock_unregister(chip->ptp_clock);
+ iounmap(chip->regs);
+ chip->regs = 0;
+
+err_ioremap:
+ release_mem_region(chip->mem_base, chip->mem_size);
+
+err_req_mem_region:
+ chip->mem_base = 0;
+
+err_pci_start:
+ pci_disable_device(pdev);
+
+err_pci_en:
+ kfree(chip);
+ dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret);
+
+ return ret;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pch_ieee1588_pcidev_id) = {
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_PCH_1588
+ },
+ {0}
+};
+
+static struct pci_driver pch_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_ieee1588_pcidev_id,
+ .probe = pch_probe,
+ .remove = pch_remove,
+ .suspend = pch_suspend,
+ .resume = pch_resume,
+};
+
+static void __exit ptp_pch_exit(void)
+{
+ pci_unregister_driver(&pch_driver);
+}
+
+static s32 __init ptp_pch_init(void)
+{
+ s32 ret;
+
+ /* register the driver with the pci core */
+ ret = pci_register_driver(&pch_driver);
+
+ return ret;
+}
+
+module_init(ptp_pch_init);
+module_exit(ptp_pch_exit);
+
+module_param_string(station, pch_param.station, sizeof pch_param.station, 0444);
+MODULE_PARM_DESC(station,
+ "IEEE 1588 station address to use - column separated hex values");
+
+MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
+MODULE_DESCRIPTION("PTP clock using the EG20T timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 3767364..ea4d8f5 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -260,8 +260,8 @@
* the LDO activate bit to implment the changes on the
* LDO output.
*/
- return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
- info->activate_bit);
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+ info->activate_bit, info->activate_bit);
}
static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
@@ -280,8 +280,8 @@
* the DCDC activate bit to implment the changes on the
* DCDC output.
*/
- return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
- info->activate_bit);
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+ info->activate_bit, info->activate_bit);
}
static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 5c15ba0..40ecf516 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -662,7 +662,7 @@
tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
- vsel = selector;
+ vsel = selector + 3;
tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3ef8d07..770a740 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -167,7 +167,7 @@
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
return 0;
}
@@ -215,7 +215,7 @@
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
return 0;
}
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 2d602207..a697669 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1341,6 +1341,12 @@
spin_unlock(&ch->collect_lock);
clear_normalized_cda(&ch->ccw[1]);
+
+ CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
+ (void *)(unsigned long)ch->ccw[1].cda,
+ ch->trans_skb->data);
+ ch->ccw[1].count = ch->max_bufsize;
+
if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
dev_kfree_skb_any(ch->trans_skb);
ch->trans_skb = NULL;
@@ -1350,6 +1356,11 @@
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
return;
}
+
+ CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
+ (void *)(unsigned long)ch->ccw[1].cda,
+ ch->trans_skb->data);
+
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
ch->prof.send_stamp = current_kernel_time(); /* xtime */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 5cb93a8..11f3b07 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -562,6 +562,9 @@
skb_queue_tail(&ch->io_queue, skb);
ccw_idx = 3;
}
+ if (do_debug_ccw)
+ ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
+ sizeof(struct ccw1) * 3);
ch->retry = 0;
fsm_newstate(ch->fsm, CTC_STATE_TX);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index da4c7473..ac7975b 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,8 +53,8 @@
#include <linux/moduleparam.h>
#include <asm/idals.h>
-#include "ctcm_mpc.h"
#include "ctcm_main.h"
+#include "ctcm_mpc.h"
#include "ctcm_fsms.h"
static const struct xid2 init_xid = {
@@ -132,7 +132,7 @@
__u32 ct, sw, rm, dup;
char *ptr, *rptr;
char tbuf[82], tdup[82];
- #if (UTS_MACHINE == s390x)
+ #ifdef CONFIG_64BIT
char addr[22];
#else
char addr[12];
@@ -149,8 +149,8 @@
for (ct = 0; ct < len; ct++, ptr++, rptr++) {
if (sw == 0) {
- #if (UTS_MACHINE == s390x)
- sprintf(addr, "%16.16lx", (__u64)rptr);
+ #ifdef CONFIG_64BIT
+ sprintf(addr, "%16.16llx", (__u64)rptr);
#else
sprintf(addr, "%8.8X", (__u32)rptr);
#endif
@@ -164,8 +164,8 @@
if (sw == 8)
strcat(bhex, " ");
- #if (UTS_MACHINE == s390x)
- sprintf(tbuf, "%2.2lX", (__u64)*ptr);
+ #if CONFIG_64BIT
+ sprintf(tbuf, "%2.2llX", (__u64)*ptr);
#else
sprintf(tbuf, "%2.2X", (__u32)*ptr);
#endif
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 863fc21..687efe4 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2240,7 +2240,7 @@
{
struct lcs_card *card;
enum lcs_dev_states recover_state;
- int ret;
+ int ret = 0, ret2 = 0, ret3 = 0;
LCS_DBF_TEXT(3, setup, "shtdndev");
card = dev_get_drvdata(&ccwgdev->dev);
@@ -2255,13 +2255,15 @@
recover_state = card->state;
ret = lcs_stop_device(card->dev);
- ret = ccw_device_set_offline(card->read.ccwdev);
- ret = ccw_device_set_offline(card->write.ccwdev);
+ ret2 = ccw_device_set_offline(card->read.ccwdev);
+ ret3 = ccw_device_set_offline(card->write.ccwdev);
+ if (!ret)
+ ret = (ret2) ? ret2 : ret3;
+ if (ret)
+ LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
if (recover_state == DEV_STATE_UP) {
card->state = DEV_STATE_RECOVER;
}
- if (ret)
- return ret;
return 0;
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cbb1018..120955c6 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -51,6 +51,7 @@
static struct device *qeth_core_root_dev;
static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
static struct lock_class_key qdio_out_skb_queue_key;
+static struct mutex qeth_mod_mutex;
static void qeth_send_control_data_cb(struct qeth_channel *,
struct qeth_cmd_buffer *);
@@ -2944,8 +2945,8 @@
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
QETH_DBF_TEXT(SETUP, 2, "suppenbl");
- QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
- QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
+ QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
+ QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
return 0;
}
@@ -4321,7 +4322,7 @@
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
- cmd->hdr.return_code = -ENOMEM;
+ cmd->hdr.return_code = IPA_RC_ENOMEM;
return 0;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
@@ -5040,6 +5041,7 @@
enum qeth_discipline_id discipline)
{
int rc = 0;
+ mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
card->discipline.ccwgdriver = try_then_request_module(
@@ -5057,6 +5059,7 @@
"support discipline %d\n", discipline);
rc = -EINVAL;
}
+ mutex_unlock(&qeth_mod_mutex);
return rc;
}
@@ -5540,6 +5543,7 @@
pr_info("loading core functions\n");
INIT_LIST_HEAD(&qeth_core_card_list.list);
rwlock_init(&qeth_core_card_list.rwlock);
+ mutex_init(&qeth_mod_mutex);
rc = qeth_register_dbf_views();
if (rc)
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index ec24901..7fab654 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -207,6 +207,7 @@
{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
+ {IPA_RC_ENOMEM, "Memory problem"},
{IPA_RC_FFFF, "Unknown Error"}
};
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 578e19a..ff41e42 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -190,6 +190,7 @@
IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013,
IPA_RC_LAN_OFFLINE = 0xe080,
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
+ IPA_RC_ENOMEM = 0xfffe,
IPA_RC_FFFF = 0xffff
};
/* for DELIP */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index e5c9cf1..0e7c29d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -576,7 +576,6 @@
default:
break;
}
- cmd->hdr.return_code = -EIO;
} else {
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
@@ -605,7 +604,6 @@
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
- cmd->hdr.return_code = -EIO;
return 0;
}
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
@@ -682,7 +680,7 @@
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
if (!rc)
rc = qeth_l2_send_setmac(card, addr->sa_data);
- return rc;
+ return rc ? -EINVAL : 0;
}
static void qeth_l2_set_multicast_list(struct net_device *dev)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 73bf888..f859216 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2430,7 +2430,7 @@
if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
- cmd->hdr.return_code = -ENOMEM;
+ cmd->hdr.return_code = IPA_RC_ENOMEM;
goto out_error;
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 2f9cb43..f37ad22 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1083,7 +1083,7 @@
return -ENOMEM;
}
-static int __init pl022_dma_probe(struct pl022 *pl022)
+static int __devinit pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index befa89e..ed41244 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -331,7 +331,6 @@
{
int i;
u16 v;
- s8 gain;
u16 loc[3];
if (out->revision == 3) /* rev 3 moved MAC */
@@ -390,20 +389,12 @@
SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0);
/* Extract the antenna gain values. */
- gain = r123_extract_antgain(out->revision, in,
- SSB_SPROM1_AGAIN_BG,
- SSB_SPROM1_AGAIN_BG_SHIFT);
- out->antenna_gain.ghz24.a0 = gain;
- out->antenna_gain.ghz24.a1 = gain;
- out->antenna_gain.ghz24.a2 = gain;
- out->antenna_gain.ghz24.a3 = gain;
- gain = r123_extract_antgain(out->revision, in,
- SSB_SPROM1_AGAIN_A,
- SSB_SPROM1_AGAIN_A_SHIFT);
- out->antenna_gain.ghz5.a0 = gain;
- out->antenna_gain.ghz5.a1 = gain;
- out->antenna_gain.ghz5.a2 = gain;
- out->antenna_gain.ghz5.a3 = gain;
+ out->antenna_gain.a0 = r123_extract_antgain(out->revision, in,
+ SSB_SPROM1_AGAIN_BG,
+ SSB_SPROM1_AGAIN_BG_SHIFT);
+ out->antenna_gain.a1 = r123_extract_antgain(out->revision, in,
+ SSB_SPROM1_AGAIN_A,
+ SSB_SPROM1_AGAIN_A_SHIFT);
}
/* Revs 4 5 and 8 have partially shared layout */
@@ -504,16 +495,14 @@
}
/* Extract the antenna gain values. */
- SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01,
+ SPEX(antenna_gain.a0, SSB_SPROM4_AGAIN01,
SSB_SPROM4_AGAIN0, SSB_SPROM4_AGAIN0_SHIFT);
- SPEX(antenna_gain.ghz24.a1, SSB_SPROM4_AGAIN01,
+ SPEX(antenna_gain.a1, SSB_SPROM4_AGAIN01,
SSB_SPROM4_AGAIN1, SSB_SPROM4_AGAIN1_SHIFT);
- SPEX(antenna_gain.ghz24.a2, SSB_SPROM4_AGAIN23,
+ SPEX(antenna_gain.a2, SSB_SPROM4_AGAIN23,
SSB_SPROM4_AGAIN2, SSB_SPROM4_AGAIN2_SHIFT);
- SPEX(antenna_gain.ghz24.a3, SSB_SPROM4_AGAIN23,
+ SPEX(antenna_gain.a3, SSB_SPROM4_AGAIN23,
SSB_SPROM4_AGAIN3, SSB_SPROM4_AGAIN3_SHIFT);
- memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
- sizeof(out->antenna_gain.ghz5));
sprom_extract_r458(out, in);
@@ -602,16 +591,14 @@
SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, 0xFFFFFFFF, 0);
/* Extract the antenna gain values. */
- SPEX(antenna_gain.ghz24.a0, SSB_SPROM8_AGAIN01,
+ SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
- SPEX(antenna_gain.ghz24.a1, SSB_SPROM8_AGAIN01,
+ SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
- SPEX(antenna_gain.ghz24.a2, SSB_SPROM8_AGAIN23,
+ SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
- SPEX(antenna_gain.ghz24.a3, SSB_SPROM8_AGAIN23,
+ SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
- memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
- sizeof(out->antenna_gain.ghz5));
/* Extract cores power info info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index c821c6b..fbafed5 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -676,14 +676,10 @@
case SSB_PCMCIA_CIS_ANTGAIN:
GOTO_ERROR_ON(tuple->TupleDataLen != 2,
"antg tpl size");
- sprom->antenna_gain.ghz24.a0 = tuple->TupleData[1];
- sprom->antenna_gain.ghz24.a1 = tuple->TupleData[1];
- sprom->antenna_gain.ghz24.a2 = tuple->TupleData[1];
- sprom->antenna_gain.ghz24.a3 = tuple->TupleData[1];
- sprom->antenna_gain.ghz5.a0 = tuple->TupleData[1];
- sprom->antenna_gain.ghz5.a1 = tuple->TupleData[1];
- sprom->antenna_gain.ghz5.a2 = tuple->TupleData[1];
- sprom->antenna_gain.ghz5.a3 = tuple->TupleData[1];
+ sprom->antenna_gain.a0 = tuple->TupleData[1];
+ sprom->antenna_gain.a1 = tuple->TupleData[1];
+ sprom->antenna_gain.a2 = tuple->TupleData[1];
+ sprom->antenna_gain.a3 = tuple->TupleData[1];
break;
case SSB_PCMCIA_CIS_BFLAGS:
GOTO_ERROR_ON((tuple->TupleDataLen != 3) &&
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index 63fd709..b2d36f7 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -551,14 +551,10 @@
case SSB_SDIO_CIS_ANTGAIN:
GOTO_ERROR_ON(tuple->size != 2,
"antg tpl size");
- sprom->antenna_gain.ghz24.a0 = tuple->data[1];
- sprom->antenna_gain.ghz24.a1 = tuple->data[1];
- sprom->antenna_gain.ghz24.a2 = tuple->data[1];
- sprom->antenna_gain.ghz24.a3 = tuple->data[1];
- sprom->antenna_gain.ghz5.a0 = tuple->data[1];
- sprom->antenna_gain.ghz5.a1 = tuple->data[1];
- sprom->antenna_gain.ghz5.a2 = tuple->data[1];
- sprom->antenna_gain.ghz5.a3 = tuple->data[1];
+ sprom->antenna_gain.a0 = tuple->data[1];
+ sprom->antenna_gain.a1 = tuple->data[1];
+ sprom->antenna_gain.a2 = tuple->data[1];
+ sprom->antenna_gain.a3 = tuple->data[1];
break;
case SSB_SDIO_CIS_BFLAGS:
GOTO_ERROR_ON((tuple->size != 3) &&
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 63a196b..bc7e244 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -584,10 +584,26 @@
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a5f54b7..745da3d 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -534,6 +534,11 @@
if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
fattr->cf_dtype = DT_DIR;
+ /*
+ * Server can return wrong NumberOfLinks value for directories
+ * when Unix extensions are disabled - fake it.
+ */
+ fattr->cf_nlink = 2;
} else {
fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
fattr->cf_dtype = DT_REG;
@@ -541,9 +546,9 @@
/* clear write bits if ATTR_READONLY is set */
if (fattr->cf_cifsattrs & ATTR_READONLY)
fattr->cf_mode &= ~(S_IWUGO);
- }
- fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+ fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+ }
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
@@ -1322,7 +1327,6 @@
}
/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
to set uid/gid */
- inc_nlink(inode);
cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1355,7 +1359,6 @@
d_drop(direntry);
} else {
mkdir_get_info:
- inc_nlink(inode);
if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
@@ -1436,6 +1439,11 @@
}
}
mkdir_out:
+ /*
+ * Force revalidate to get parent dir info when needed since cached
+ * attributes are invalid now.
+ */
+ CIFS_I(inode)->time = 0;
kfree(full_path);
FreeXid(xid);
cifs_put_tlink(tlink);
@@ -1475,7 +1483,6 @@
cifs_put_tlink(tlink);
if (!rc) {
- drop_nlink(inode);
spin_lock(&direntry->d_inode->i_lock);
i_size_write(direntry->d_inode, 0);
clear_nlink(direntry->d_inode);
@@ -1483,12 +1490,15 @@
}
cifsInode = CIFS_I(direntry->d_inode);
- cifsInode->time = 0; /* force revalidate to go get info when
- needed */
+ /* force revalidate to go get info when needed */
+ cifsInode->time = 0;
cifsInode = CIFS_I(inode);
- cifsInode->time = 0; /* force revalidate to get parent dir info
- since cached search results now invalid */
+ /*
+ * Force revalidate to get parent dir info when needed since cached
+ * attributes are invalid now.
+ */
+ cifsInode->time = 0;
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 0b3109e..ca0c59a 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -52,6 +52,7 @@
#include <linux/mutex.h>
#include <linux/sctp.h>
#include <linux/slab.h>
+#include <net/sctp/sctp.h>
#include <net/sctp/user.h>
#include <net/ipv6.h>
@@ -474,9 +475,6 @@
int prim_len, ret;
int addr_len;
struct connection *new_con;
- sctp_peeloff_arg_t parg;
- int parglen = sizeof(parg);
- int err;
/*
* We get this before any data for an association.
@@ -525,23 +523,19 @@
return;
/* Peel off a new sock */
- parg.associd = sn->sn_assoc_change.sac_assoc_id;
- ret = kernel_getsockopt(con->sock, IPPROTO_SCTP,
- SCTP_SOCKOPT_PEELOFF,
- (void *)&parg, &parglen);
+ sctp_lock_sock(con->sock->sk);
+ ret = sctp_do_peeloff(con->sock->sk,
+ sn->sn_assoc_change.sac_assoc_id,
+ &new_con->sock);
+ sctp_release_sock(con->sock->sk);
if (ret < 0) {
log_print("Can't peel off a socket for "
"connection %d to node %d: err=%d",
- parg.associd, nodeid, ret);
- return;
- }
- new_con->sock = sockfd_lookup(parg.sd, &err);
- if (!new_con->sock) {
- log_print("sockfd_lookup error %d", err);
+ (int)sn->sn_assoc_change.sac_assoc_id,
+ nodeid, ret);
return;
}
add_sock(new_con->sock, new_con);
- sockfd_put(new_con->sock);
log_print("connecting to %d sctp association %d",
nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 514ed45..d117b29 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -23,6 +23,8 @@
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#include <linux/types.h>
+
/* -------------------------------------------------------------------------------
* From AMBA UART (PL010) Block Specification
* -------------------------------------------------------------------------------
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index b9f65fb..5af9a07 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -176,6 +176,12 @@
extern void bcma_driver_unregister(struct bcma_driver *drv);
+/* Set a fallback SPROM.
+ * See kdoc at the function definition for complete documentation. */
+extern int bcma_arch_register_fallback_sprom(
+ int (*sprom_callback)(struct bcma_bus *bus,
+ struct ssb_sprom *out));
+
struct bcma_bus {
/* The MMIO area. */
void __iomem *mmio;
@@ -284,6 +290,7 @@
bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
}
+extern struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid);
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index e72938b..8bbfe31 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -56,6 +56,9 @@
#define BCMA_CC_OTPS_HW_PROTECT 0x00000001
#define BCMA_CC_OTPS_SW_PROTECT 0x00000002
#define BCMA_CC_OTPS_CID_PROTECT 0x00000004
+#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */
+#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8
+#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */
#define BCMA_CC_OTPC 0x0014 /* OTP control */
#define BCMA_CC_OTPC_RECWAIT 0xFF000000
#define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00
@@ -72,6 +75,8 @@
#define BCMA_CC_OTPP_READ 0x40000000
#define BCMA_CC_OTPP_START 0x80000000
#define BCMA_CC_OTPP_BUSY 0x80000000
+#define BCMA_CC_OTPL 0x001C /* OTP layout */
+#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */
#define BCMA_CC_IRQSTAT 0x0020
#define BCMA_CC_IRQMASK 0x0024
#define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */
@@ -79,6 +84,10 @@
#define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */
#define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */
#define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */
+#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1
+#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
+#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
+#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */
#define BCMA_CC_JCMD_START 0x80000000
#define BCMA_CC_JCMD_BUSY 0x80000000
@@ -256,7 +265,6 @@
#define BCMA_CC_PLLCTL_ADDR 0x0660
#define BCMA_CC_PLLCTL_DATA 0x0664
#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
-#define BCMA_CC_SPROM_PCIE6 0x0830 /* SPROM beginning on PCIe rev >= 6 */
/* Divider allocation in 4716/47162/5356 */
#define BCMA_CC_PMU5_MAINPLL_CPU 1
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a89933b..4535a4e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -417,7 +417,7 @@
extern void __napi_schedule(struct napi_struct *n);
-static inline int napi_disable_pending(struct napi_struct *n)
+static inline bool napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
@@ -431,7 +431,7 @@
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
-static inline int napi_schedule_prep(struct napi_struct *n)
+static inline bool napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
@@ -451,13 +451,13 @@
}
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline int napi_reschedule(struct napi_struct *napi)
+static inline bool napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -1868,7 +1868,7 @@
}
}
-static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -1879,17 +1879,17 @@
*
* Test if transmit queue on device is currently unable to send.
*/
-static inline int netif_queue_stopped(const struct net_device *dev)
+static inline bool netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
-static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF;
}
-static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
}
@@ -1899,12 +1899,22 @@
{
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
- if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
- set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
- if (unlikely(dql_avail(&dev_queue->dql) >= 0))
- clear_bit(__QUEUE_STATE_STACK_XOFF,
- &dev_queue->state);
- }
+
+ if (likely(dql_avail(&dev_queue->dql) >= 0))
+ return;
+
+ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+
+ /*
+ * The XOFF flag must be set before checking the dql_avail below,
+ * because in netdev_tx_completed_queue we update the dql_completed
+ * before checking the XOFF flag.
+ */
+ smp_mb();
+
+ /* check again in case another CPU has just made room avail */
+ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
#endif
}
@@ -1917,16 +1927,23 @@
unsigned pkts, unsigned bytes)
{
#ifdef CONFIG_BQL
- if (likely(bytes)) {
- dql_completed(&dev_queue->dql, bytes);
- if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
- &dev_queue->state) &&
- dql_avail(&dev_queue->dql) >= 0)) {
- if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
- &dev_queue->state))
- netif_schedule_queue(dev_queue);
- }
- }
+ if (unlikely(!bytes))
+ return;
+
+ dql_completed(&dev_queue->dql, bytes);
+
+ /*
+ * Without the memory barrier there is a small possiblity that
+ * netdev_tx_sent_queue will miss the update and cause the queue to
+ * be stopped forever
+ */
+ smp_mb();
+
+ if (dql_avail(&dev_queue->dql) < 0)
+ return;
+
+ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
+ netif_schedule_queue(dev_queue);
#endif
}
@@ -1939,6 +1956,7 @@
static inline void netdev_tx_reset_queue(struct netdev_queue *q)
{
#ifdef CONFIG_BQL
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
dql_reset(&q->dql);
#endif
}
@@ -1954,7 +1972,7 @@
*
* Test if the device has been brought up.
*/
-static inline int netif_running(const struct net_device *dev)
+static inline bool netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
@@ -2004,16 +2022,16 @@
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
-static inline int __netif_subqueue_stopped(const struct net_device *dev,
- u16 queue_index)
+static inline bool __netif_subqueue_stopped(const struct net_device *dev,
+ u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
-static inline int netif_subqueue_stopped(const struct net_device *dev,
- struct sk_buff *skb)
+static inline bool netif_subqueue_stopped(const struct net_device *dev,
+ struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
@@ -2052,7 +2070,7 @@
*
* Check if device has multiple transmit queues
*/
-static inline int netif_is_multiqueue(const struct net_device *dev)
+static inline bool netif_is_multiqueue(const struct net_device *dev)
{
return dev->num_tx_queues > 1;
}
@@ -2188,7 +2206,7 @@
*
* Check if carrier is present on device
*/
-static inline int netif_carrier_ok(const struct net_device *dev)
+static inline bool netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
@@ -2240,7 +2258,7 @@
*
* Check if carrier is present on device
*/
-static inline int netif_dormant(const struct net_device *dev)
+static inline bool netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
@@ -2252,7 +2270,7 @@
*
* Check if carrier is operational
*/
-static inline int netif_oper_up(const struct net_device *dev)
+static inline bool netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
@@ -2264,7 +2282,7 @@
*
* Check if device has not been removed from system.
*/
-static inline int netif_device_present(struct net_device *dev)
+static inline bool netif_device_present(struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
@@ -2334,9 +2352,9 @@
txq->xmit_lock_owner = smp_processor_id();
}
-static inline int __netif_tx_trylock(struct netdev_queue *txq)
+static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
- int ok = spin_trylock(&txq->_xmit_lock);
+ bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
txq->xmit_lock_owner = smp_processor_id();
return ok;
@@ -2614,7 +2632,7 @@
netdev_features_t netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(netdev_features_t features, int gso_type)
+static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2629,14 +2647,14 @@
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
+static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline int netif_needs_gso(struct sk_buff *skb,
- netdev_features_t features)
+static inline bool netif_needs_gso(struct sk_buff *skb,
+ netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2648,7 +2666,7 @@
dev->gso_max_size = size;
}
-static inline int netif_is_bond_slave(struct net_device *dev)
+static inline bool netif_is_bond_slave(struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index e144f54..1697036 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -10,6 +10,7 @@
header-y += nfnetlink_acct.h
header-y += nfnetlink_compat.h
header-y += nfnetlink_conntrack.h
+header-y += nfnetlink_cttimeout.h
header-y += nfnetlink_log.h
header-y += nfnetlink_queue.h
header-y += x_tables.h
@@ -22,6 +23,7 @@
header-y += xt_DSCP.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h
+header-y += xt_LOG.h
header-y += xt_MARK.h
header-y += xt_nfacct.h
header-y += xt_NFLOG.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 3540c6e..2f8e18a 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
/* The protocol version */
#define IPSET_PROTOCOL 6
@@ -148,6 +150,7 @@
IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME),
IPSET_FLAG_BIT_LIST_HEADER = 2,
IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER),
+ IPSET_FLAG_CMD_MAX = 15, /* Lower half */
};
/* Flags at CADT attribute level */
@@ -156,6 +159,9 @@
IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE),
IPSET_FLAG_BIT_PHYSDEV = 1,
IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV),
+ IPSET_FLAG_BIT_NOMATCH = 2,
+ IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH),
+ IPSET_FLAG_CADT_MAX = 15, /* Upper half */
};
/* Commands with settype-specific attributes */
@@ -168,19 +174,10 @@
IPSET_CADT_MAX,
};
-#ifdef __KERNEL__
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/vmalloc.h>
-#include <net/netlink.h>
-
/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
* and IPSET_INVALID_ID if you want to increase the max number of sets.
*/
-typedef u16 ip_set_id_t;
+typedef __u16 ip_set_id_t;
#define IPSET_INVALID_ID 65535
@@ -203,6 +200,15 @@
IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
};
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
/* Set features */
enum ip_set_feature {
IPSET_TYPE_IP_FLAG = 0,
@@ -288,7 +294,10 @@
u8 features;
/* Set type dimension */
u8 dimension;
- /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+ /*
+ * Supported family: may be NFPROTO_UNSPEC for both
+ * NFPROTO_IPV4/NFPROTO_IPV6.
+ */
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
@@ -450,6 +459,8 @@
return 4 * ((((b - a + 8) / 8) + 3) / 4);
}
+#endif /* __KERNEL__ */
+
/* Interface to iptables/ip6tables */
#define SO_IP_SET 83
@@ -475,6 +486,4 @@
unsigned version;
};
-#endif /* __KERNEL__ */
-
#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index b89fb79..05a5d72 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -113,6 +113,12 @@
}
#ifdef IP_SET_HASH_WITH_NETS
+#ifdef IP_SET_HASH_WITH_NETS_PACKED
+/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
+#define CIDR(cidr) (cidr + 1)
+#else
+#define CIDR(cidr) (cidr)
+#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
@@ -262,6 +268,12 @@
#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
#define type_pf_data_next TOKEN(TYPE, PF, _data_next)
+#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags)
+#ifdef IP_SET_HASH_WITH_NETS
+#define type_pf_data_match TOKEN(TYPE, PF, _data_match)
+#else
+#define type_pf_data_match(d) 1
+#endif
#define type_pf_elem TOKEN(TYPE, PF, _elem)
#define type_pf_telem TOKEN(TYPE, PF, _telem)
@@ -308,8 +320,10 @@
* we spare the maintenance of the internal counters. */
static int
type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value,
- u8 ahash_max)
+ u8 ahash_max, u32 cadt_flags)
{
+ struct type_pf_elem *data;
+
if (n->pos >= n->size) {
void *tmp;
@@ -330,7 +344,13 @@
n->value = tmp;
n->size += AHASH_INIT_SIZE;
}
- type_pf_data_copy(ahash_data(n, n->pos++), value);
+ data = ahash_data(n, n->pos++);
+ type_pf_data_copy(data, value);
+#ifdef IP_SET_HASH_WITH_NETS
+ /* Resizing won't overwrite stored flags */
+ if (cadt_flags)
+ type_pf_data_flags(data, cadt_flags);
+#endif
return 0;
}
@@ -353,9 +373,12 @@
htable_bits++;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
- if (!htable_bits)
+ if (!htable_bits) {
/* In case we have plenty of memory :-) */
+ pr_warning("Cannot increase the hashsize of set %s further\n",
+ set->name);
return -IPSET_ERR_HASH_FULL;
+ }
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
@@ -368,7 +391,7 @@
for (j = 0; j < n->pos; j++) {
data = ahash_data(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = type_pf_elem_add(m, data, AHASH_MAX(h));
+ ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0);
if (ret < 0) {
read_unlock_bh(&set->lock);
ahash_destroy(t);
@@ -406,9 +429,14 @@
struct hbucket *n;
int i, ret = 0;
u32 key, multi = 0;
+ u32 cadt_flags = flags >> 16;
- if (h->elements >= h->maxelem)
+ if (h->elements >= h->maxelem) {
+ if (net_ratelimit())
+ pr_warning("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
+ }
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
@@ -416,11 +444,17 @@
n = hbucket(t, key);
for (i = 0; i < n->pos; i++)
if (type_pf_data_equal(ahash_data(n, i), d, &multi)) {
+#ifdef IP_SET_HASH_WITH_NETS
+ if (flags & IPSET_FLAG_EXIST)
+ /* Support overwriting just the flags */
+ type_pf_data_flags(ahash_data(n, i),
+ cadt_flags);
+#endif
ret = -IPSET_ERR_EXIST;
goto out;
}
TUNE_AHASH_MAX(h, multi);
- ret = type_pf_elem_add(n, value, AHASH_MAX(h));
+ ret = type_pf_elem_add(n, value, AHASH_MAX(h), cadt_flags);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
@@ -428,7 +462,7 @@
}
#ifdef IP_SET_HASH_WITH_NETS
- add_cidr(h, d->cidr, HOST_MASK);
+ add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
h->elements++;
out:
@@ -463,7 +497,7 @@
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, d->cidr, HOST_MASK);
+ del_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -506,7 +540,7 @@
for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i);
if (type_pf_data_equal(data, d, &multi))
- return 1;
+ return type_pf_data_match(data);
}
}
return 0;
@@ -528,7 +562,7 @@
#ifdef IP_SET_HASH_WITH_NETS
/* If we test an IP address and not a network address,
* try all possible network sizes */
- if (d->cidr == SET_HOST_MASK(set->family))
+ if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_test_cidrs(set, d, timeout);
#endif
@@ -537,7 +571,7 @@
for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i);
if (type_pf_data_equal(data, d, &multi))
- return 1;
+ return type_pf_data_match(data);
}
return 0;
}
@@ -693,7 +727,7 @@
static int
type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
- u8 ahash_max, u32 timeout)
+ u8 ahash_max, u32 cadt_flags, u32 timeout)
{
struct type_pf_elem *data;
@@ -720,6 +754,11 @@
data = ahash_tdata(n, n->pos++);
type_pf_data_copy(data, value);
type_pf_data_timeout_set(data, timeout);
+#ifdef IP_SET_HASH_WITH_NETS
+ /* Resizing won't overwrite stored flags */
+ if (cadt_flags)
+ type_pf_data_flags(data, cadt_flags);
+#endif
return 0;
}
@@ -740,7 +779,7 @@
if (type_pf_data_expired(data)) {
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, data->cidr, HOST_MASK);
+ del_cidr(h, CIDR(data->cidr), HOST_MASK);
#endif
if (j != n->pos - 1)
/* Not last one */
@@ -790,9 +829,12 @@
retry:
ret = 0;
htable_bits++;
- if (!htable_bits)
+ if (!htable_bits) {
/* In case we have plenty of memory :-) */
+ pr_warning("Cannot increase the hashsize of set %s further\n",
+ set->name);
return -IPSET_ERR_HASH_FULL;
+ }
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
@@ -805,7 +847,7 @@
for (j = 0; j < n->pos; j++) {
data = ahash_tdata(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = type_pf_elem_tadd(m, data, AHASH_MAX(h),
+ ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
type_pf_data_timeout(data));
if (ret < 0) {
read_unlock_bh(&set->lock);
@@ -839,12 +881,17 @@
int ret = 0, i, j = AHASH_MAX(h) + 1;
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
+ u32 cadt_flags = flags >> 16;
if (h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
type_pf_expire(h);
- if (h->elements >= h->maxelem)
+ if (h->elements >= h->maxelem) {
+ if (net_ratelimit())
+ pr_warning("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
+ }
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
@@ -854,6 +901,7 @@
data = ahash_tdata(n, i);
if (type_pf_data_equal(data, d, &multi)) {
if (type_pf_data_expired(data) || flag_exist)
+ /* Just timeout value may be updated */
j = i;
else {
ret = -IPSET_ERR_EXIST;
@@ -866,15 +914,18 @@
if (j != AHASH_MAX(h) + 1) {
data = ahash_tdata(n, j);
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, data->cidr, HOST_MASK);
- add_cidr(h, d->cidr, HOST_MASK);
+ del_cidr(h, CIDR(data->cidr), HOST_MASK);
+ add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
type_pf_data_copy(data, d);
type_pf_data_timeout_set(data, timeout);
+#ifdef IP_SET_HASH_WITH_NETS
+ type_pf_data_flags(data, cadt_flags);
+#endif
goto out;
}
TUNE_AHASH_MAX(h, multi);
- ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), timeout);
+ ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), cadt_flags, timeout);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
@@ -882,7 +933,7 @@
}
#ifdef IP_SET_HASH_WITH_NETS
- add_cidr(h, d->cidr, HOST_MASK);
+ add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
h->elements++;
out:
@@ -916,7 +967,7 @@
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, d->cidr, HOST_MASK);
+ del_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -954,8 +1005,17 @@
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
- if (type_pf_data_equal(data, d, &multi))
- return !type_pf_data_expired(data);
+#ifdef IP_SET_HASH_WITH_MULTI
+ if (type_pf_data_equal(data, d, &multi)) {
+ if (!type_pf_data_expired(data))
+ return type_pf_data_match(data);
+ multi = 0;
+ }
+#else
+ if (type_pf_data_equal(data, d, &multi) &&
+ !type_pf_data_expired(data))
+ return type_pf_data_match(data);
+#endif
}
}
return 0;
@@ -973,15 +1033,16 @@
u32 key, multi = 0;
#ifdef IP_SET_HASH_WITH_NETS
- if (d->cidr == SET_HOST_MASK(set->family))
+ if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_ttest_cidrs(set, d, timeout);
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
- if (type_pf_data_equal(data, d, &multi))
- return !type_pf_data_expired(data);
+ if (type_pf_data_equal(data, d, &multi) &&
+ !type_pf_data_expired(data))
+ return type_pf_data_match(data);
}
return 0;
}
@@ -1094,14 +1155,17 @@
#undef type_pf_data_isnull
#undef type_pf_data_copy
#undef type_pf_data_zero_out
+#undef type_pf_data_netmask
#undef type_pf_data_list
#undef type_pf_data_tlist
+#undef type_pf_data_next
+#undef type_pf_data_flags
+#undef type_pf_data_match
#undef type_pf_elem
#undef type_pf_telem
#undef type_pf_data_timeout
#undef type_pf_data_expired
-#undef type_pf_data_netmask
#undef type_pf_data_timeout_set
#undef type_pf_elem_add
@@ -1111,6 +1175,7 @@
#undef type_pf_test
#undef type_pf_elem_tadd
+#undef type_pf_del_telem
#undef type_pf_expire
#undef type_pf_tadd
#undef type_pf_tdel
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
index 6e135f9..e59868a 100644
--- a/include/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -18,7 +18,10 @@
TCP_CONNTRACK_LISTEN, /* obsolete */
#define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN
TCP_CONNTRACK_MAX,
- TCP_CONNTRACK_IGNORE
+ TCP_CONNTRACK_IGNORE,
+ TCP_CONNTRACK_RETRANS,
+ TCP_CONNTRACK_UNACK,
+ TCP_CONNTRACK_TIMEOUT_MAX
};
/* Window scaling is advertised by the sender */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index b64454c..6fd1f0d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -49,7 +49,8 @@
#define NFNL_SUBSYS_OSF 5
#define NFNL_SUBSYS_IPSET 6
#define NFNL_SUBSYS_ACCT 7
-#define NFNL_SUBSYS_COUNT 8
+#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8
+#define NFNL_SUBSYS_COUNT 9
#ifdef __KERNEL__
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index d498a44..e58e4b9 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -173,10 +173,21 @@
CTA_EXPECT_HELP_NAME,
CTA_EXPECT_ZONE,
CTA_EXPECT_FLAGS,
+ CTA_EXPECT_CLASS,
+ CTA_EXPECT_NAT,
+ CTA_EXPECT_FN,
__CTA_EXPECT_MAX
};
#define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1)
+enum ctattr_expect_nat {
+ CTA_EXPECT_NAT_UNSPEC,
+ CTA_EXPECT_NAT_DIR,
+ CTA_EXPECT_NAT_TUPLE,
+ __CTA_EXPECT_NAT_MAX
+};
+#define CTA_EXPECT_NAT_MAX (__CTA_EXPECT_NAT_MAX - 1)
+
enum ctattr_help {
CTA_HELP_UNSPEC,
CTA_HELP_NAME,
diff --git a/include/linux/netfilter/nfnetlink_cttimeout.h b/include/linux/netfilter/nfnetlink_cttimeout.h
new file mode 100644
index 0000000..a2810a7
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_cttimeout.h
@@ -0,0 +1,114 @@
+#ifndef _CTTIMEOUT_NETLINK_H
+#define _CTTIMEOUT_NETLINK_H
+#include <linux/netfilter/nfnetlink.h>
+
+enum ctnl_timeout_msg_types {
+ IPCTNL_MSG_TIMEOUT_NEW,
+ IPCTNL_MSG_TIMEOUT_GET,
+ IPCTNL_MSG_TIMEOUT_DELETE,
+
+ IPCTNL_MSG_TIMEOUT_MAX
+};
+
+enum ctattr_timeout {
+ CTA_TIMEOUT_UNSPEC,
+ CTA_TIMEOUT_NAME,
+ CTA_TIMEOUT_L3PROTO,
+ CTA_TIMEOUT_L4PROTO,
+ CTA_TIMEOUT_DATA,
+ CTA_TIMEOUT_USE,
+ __CTA_TIMEOUT_MAX
+};
+#define CTA_TIMEOUT_MAX (__CTA_TIMEOUT_MAX - 1)
+
+enum ctattr_timeout_generic {
+ CTA_TIMEOUT_GENERIC_UNSPEC,
+ CTA_TIMEOUT_GENERIC_TIMEOUT,
+ __CTA_TIMEOUT_GENERIC_MAX
+};
+#define CTA_TIMEOUT_GENERIC_MAX (__CTA_TIMEOUT_GENERIC_MAX - 1)
+
+enum ctattr_timeout_tcp {
+ CTA_TIMEOUT_TCP_UNSPEC,
+ CTA_TIMEOUT_TCP_SYN_SENT,
+ CTA_TIMEOUT_TCP_SYN_RECV,
+ CTA_TIMEOUT_TCP_ESTABLISHED,
+ CTA_TIMEOUT_TCP_FIN_WAIT,
+ CTA_TIMEOUT_TCP_CLOSE_WAIT,
+ CTA_TIMEOUT_TCP_LAST_ACK,
+ CTA_TIMEOUT_TCP_TIME_WAIT,
+ CTA_TIMEOUT_TCP_CLOSE,
+ CTA_TIMEOUT_TCP_SYN_SENT2,
+ CTA_TIMEOUT_TCP_RETRANS,
+ CTA_TIMEOUT_TCP_UNACK,
+ __CTA_TIMEOUT_TCP_MAX
+};
+#define CTA_TIMEOUT_TCP_MAX (__CTA_TIMEOUT_TCP_MAX - 1)
+
+enum ctattr_timeout_udp {
+ CTA_TIMEOUT_UDP_UNSPEC,
+ CTA_TIMEOUT_UDP_UNREPLIED,
+ CTA_TIMEOUT_UDP_REPLIED,
+ __CTA_TIMEOUT_UDP_MAX
+};
+#define CTA_TIMEOUT_UDP_MAX (__CTA_TIMEOUT_UDP_MAX - 1)
+
+enum ctattr_timeout_udplite {
+ CTA_TIMEOUT_UDPLITE_UNSPEC,
+ CTA_TIMEOUT_UDPLITE_UNREPLIED,
+ CTA_TIMEOUT_UDPLITE_REPLIED,
+ __CTA_TIMEOUT_UDPLITE_MAX
+};
+#define CTA_TIMEOUT_UDPLITE_MAX (__CTA_TIMEOUT_UDPLITE_MAX - 1)
+
+enum ctattr_timeout_icmp {
+ CTA_TIMEOUT_ICMP_UNSPEC,
+ CTA_TIMEOUT_ICMP_TIMEOUT,
+ __CTA_TIMEOUT_ICMP_MAX
+};
+#define CTA_TIMEOUT_ICMP_MAX (__CTA_TIMEOUT_ICMP_MAX - 1)
+
+enum ctattr_timeout_dccp {
+ CTA_TIMEOUT_DCCP_UNSPEC,
+ CTA_TIMEOUT_DCCP_REQUEST,
+ CTA_TIMEOUT_DCCP_RESPOND,
+ CTA_TIMEOUT_DCCP_PARTOPEN,
+ CTA_TIMEOUT_DCCP_OPEN,
+ CTA_TIMEOUT_DCCP_CLOSEREQ,
+ CTA_TIMEOUT_DCCP_CLOSING,
+ CTA_TIMEOUT_DCCP_TIMEWAIT,
+ __CTA_TIMEOUT_DCCP_MAX
+};
+#define CTA_TIMEOUT_DCCP_MAX (__CTA_TIMEOUT_DCCP_MAX - 1)
+
+enum ctattr_timeout_sctp {
+ CTA_TIMEOUT_SCTP_UNSPEC,
+ CTA_TIMEOUT_SCTP_CLOSED,
+ CTA_TIMEOUT_SCTP_COOKIE_WAIT,
+ CTA_TIMEOUT_SCTP_COOKIE_ECHOED,
+ CTA_TIMEOUT_SCTP_ESTABLISHED,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ __CTA_TIMEOUT_SCTP_MAX
+};
+#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
+
+enum ctattr_timeout_icmpv6 {
+ CTA_TIMEOUT_ICMPV6_UNSPEC,
+ CTA_TIMEOUT_ICMPV6_TIMEOUT,
+ __CTA_TIMEOUT_ICMPV6_MAX
+};
+#define CTA_TIMEOUT_ICMPV6_MAX (__CTA_TIMEOUT_ICMPV6_MAX - 1)
+
+enum ctattr_timeout_gre {
+ CTA_TIMEOUT_GRE_UNSPEC,
+ CTA_TIMEOUT_GRE_UNREPLIED,
+ CTA_TIMEOUT_GRE_REPLIED,
+ __CTA_TIMEOUT_GRE_MAX
+};
+#define CTA_TIMEOUT_GRE_MAX (__CTA_TIMEOUT_GRE_MAX - 1)
+
+#define CTNL_TIMEOUT_NAME_MAX 32
+
+#endif
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index b56e768..a064b8a 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -16,4 +16,16 @@
struct nf_conn *ct __attribute__((aligned(8)));
};
+struct xt_ct_target_info_v1 {
+ __u16 flags;
+ __u16 zone;
+ __u32 ct_events;
+ __u32 exp_events;
+ char helper[16];
+ char timeout[32];
+
+ /* Used internally by the kernel */
+ struct nf_conn *ct __attribute__((aligned(8)));
+};
+
#endif /* _XT_CT_H */
diff --git a/include/linux/netfilter/xt_LOG.h b/include/linux/netfilter/xt_LOG.h
new file mode 100644
index 0000000..cac0790
--- /dev/null
+++ b/include/linux/netfilter/xt_LOG.h
@@ -0,0 +1,19 @@
+#ifndef _XT_LOG_H
+#define _XT_LOG_H
+
+/* make sure not to change this without changing nf_log.h:NF_LOG_* (!) */
+#define XT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
+#define XT_LOG_TCPOPT 0x02 /* Log TCP options */
+#define XT_LOG_IPOPT 0x04 /* Log IP options */
+#define XT_LOG_UID 0x08 /* Log UID owning local socket */
+#define XT_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
+#define XT_LOG_MACDECODE 0x20 /* Decode MAC header */
+#define XT_LOG_MASK 0x2f
+
+struct xt_log_info {
+ unsigned char level;
+ unsigned char logflags;
+ char prefix[30];
+};
+
+#endif /* _XT_LOG_H */
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index f9930c8..31f8bec 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -4,11 +4,9 @@
header-y += ipt_ECN.h
header-y += ipt_LOG.h
header-y += ipt_REJECT.h
-header-y += ipt_SAME.h
header-y += ipt_TTL.h
header-y += ipt_ULOG.h
header-y += ipt_addrtype.h
header-y += ipt_ah.h
header-y += ipt_ecn.h
-header-y += ipt_realm.h
header-y += ipt_ttl.h
diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h
index dcdbadf..5d81520 100644
--- a/include/linux/netfilter_ipv4/ipt_LOG.h
+++ b/include/linux/netfilter_ipv4/ipt_LOG.h
@@ -1,6 +1,8 @@
#ifndef _IPT_LOG_H
#define _IPT_LOG_H
+#warning "Please update iptables, this file will be removed soon!"
+
/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */
#define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
#define IPT_LOG_TCPOPT 0x02 /* Log TCP options */
diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h
deleted file mode 100644
index 5bca782..0000000
--- a/include/linux/netfilter_ipv4/ipt_SAME.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _IPT_SAME_H
-#define _IPT_SAME_H
-
-#include <linux/types.h>
-
-#define IPT_SAME_MAX_RANGE 10
-
-#define IPT_SAME_NODST 0x01
-
-struct ipt_same_info {
- unsigned char info;
- __u32 rangesize;
- __u32 ipnum;
- __u32 *iparray;
-
- /* hangs off end. */
- struct nf_nat_range range[IPT_SAME_MAX_RANGE];
-};
-
-#endif /*_IPT_SAME_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_realm.h b/include/linux/netfilter_ipv4/ipt_realm.h
deleted file mode 100644
index b3996ea..0000000
--- a/include/linux/netfilter_ipv4/ipt_realm.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _IPT_REALM_H
-#define _IPT_REALM_H
-
-#include <linux/netfilter/xt_realm.h>
-#define ipt_realm_info xt_realm_info
-
-#endif /* _IPT_REALM_H */
diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h
index 9dd5579..3dd0bc4 100644
--- a/include/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/linux/netfilter_ipv6/ip6t_LOG.h
@@ -1,6 +1,8 @@
#ifndef _IP6T_LOG_H
#define _IP6T_LOG_H
+#warning "Please update iptables, this file will be removed soon!"
+
/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */
#define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
#define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index b4999ab..39c1fcf 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -107,6 +107,7 @@
NFC_ATTR_TARGET_SENSF_RES,
NFC_ATTR_COMM_MODE,
NFC_ATTR_RF_MODE,
+ NFC_ATTR_DEVICE_POWERED,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index be35a68..c37d67a 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -168,8 +168,8 @@
* %NL80211_ATTR_DTIM_PERIOD, %NL80211_ATTR_SSID,
* %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHERS_PAIRWISE,
* %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS,
- * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY and
- * %NL80211_ATTR_AUTH_TYPE.
+ * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY,
+ * %NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT.
* @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP
* @NL80211_CMD_STOP_AP: Stop AP operation on the given interface
* @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP
@@ -1197,6 +1197,16 @@
* @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
* up to 16 TIDs.
*
+ * @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be
+ * used by the drivers which has MLME in firmware and does not have support
+ * to report per station tx/rx activity to free up the staion entry from
+ * the list. This needs to be used when the driver advertises the
+ * capability to timeout the stations.
+ *
+ * @NL80211_ATTR_RX_SIGNAL_DBM: signal strength in dBm (as a 32-bit int);
+ * this attribute is (depending on the driver capabilities) added to
+ * received frames indicated with %NL80211_CMD_FRAME.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1442,6 +1452,10 @@
NL80211_ATTR_NOACK_MAP,
+ NL80211_ATTR_INACTIVITY_TIMEOUT,
+
+ NL80211_ATTR_RX_SIGNAL_DBM,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2112,6 +2126,10 @@
* @NL80211_MESHCONF_FORWARDING: set Mesh STA as forwarding or non-forwarding
* or forwarding entity (default is TRUE - forwarding entity)
*
+ * @NL80211_MESHCONF_RSSI_THRESHOLD: RSSI threshold in dBm. This specifies the
+ * threshold for average signal strength of candidate station to establish
+ * a peer link.
+ *
* @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -2137,6 +2155,7 @@
NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
NL80211_MESHCONF_FORWARDING,
+ NL80211_MESHCONF_RSSI_THRESHOLD,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2804,10 +2823,13 @@
* TX status to the socket error queue when requested with the
* socket option.
* @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
+ * @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up
+ * the connected inactive stations in AP mode.
*/
enum nl80211_feature_flags {
NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
NL80211_FEATURE_HT_IBSS = 1 << 1,
+ NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
};
/**
diff --git a/include/linux/of.h b/include/linux/of.h
index a75a831..92cf6ad 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -281,6 +281,14 @@
return NULL;
}
+static inline struct device_node *of_find_compatible_node(
+ struct device_node *from,
+ const char *type,
+ const char *compat)
+{
+ return NULL;
+}
+
static inline int of_property_read_u32_array(const struct device_node *np,
const char *propname,
u32 *out_values, size_t sz)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 79ef820..8dc8257 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1173,7 +1173,7 @@
}
-static inline int skb_is_nonlinear(const struct sk_buff *skb)
+static inline bool skb_is_nonlinear(const struct sk_buff *skb)
{
return skb->data_len;
}
@@ -2469,12 +2469,12 @@
}
#endif
-static inline int skb_is_gso(const struct sk_buff *skb)
+static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
}
-static inline int skb_is_gso_v6(const struct sk_buff *skb)
+static inline bool skb_is_gso_v6(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
diff --git a/include/linux/socket.h b/include/linux/socket.h
index d0e77f6..da2d3e2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -326,11 +326,11 @@
int offset,
unsigned int len, __wsum *csump);
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
-extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr);
+extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
struct timespec;
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index bbc2612..d276831 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -19,7 +19,7 @@
struct ssb_sprom_core_pwr_info {
u8 itssi_2g, itssi_5g;
u8 maxpwr_2g, maxpwr_5gl, maxpwr_5g, maxpwr_5gh;
- u16 pa_2g[3], pa_5gl[3], pa_5g[3], pa_5gh[3];
+ u16 pa_2g[4], pa_5gl[4], pa_5g[4], pa_5gh[4];
};
struct ssb_sprom {
@@ -32,9 +32,12 @@
u8 et0mdcport; /* MDIO for enet0 */
u8 et1mdcport; /* MDIO for enet1 */
u16 board_rev; /* Board revision number from SPROM. */
+ u16 board_num; /* Board number from SPROM. */
+ u16 board_type; /* Board type from SPROM. */
u8 country_code; /* Country Code */
- u16 leddc_on_time; /* LED Powersave Duty Cycle On Count */
- u16 leddc_off_time; /* LED Powersave Duty Cycle Off Count */
+ char alpha2[2]; /* Country Code as two chars like EU or US */
+ u8 leddc_on_time; /* LED Powersave Duty Cycle On Count */
+ u8 leddc_off_time; /* LED Powersave Duty Cycle Off Count */
u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */
u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */
u16 pa0b0;
@@ -53,10 +56,10 @@
u8 gpio1; /* GPIO pin 1 */
u8 gpio2; /* GPIO pin 2 */
u8 gpio3; /* GPIO pin 3 */
- u16 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */
- u16 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */
- u16 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */
- u16 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */
u8 itssi_a; /* Idle TSSI Target for A-PHY */
u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */
u8 tri2g; /* 2.4GHz TX isolation */
@@ -67,8 +70,8 @@
u8 txpid5gl[4]; /* 4.9 - 5.1GHz TX power index */
u8 txpid5g[4]; /* 5.1 - 5.5GHz TX power index */
u8 txpid5gh[4]; /* 5.5 - ...GHz TX power index */
- u8 rxpo2g; /* 2GHz RX power offset */
- u8 rxpo5g; /* 5GHz RX power offset */
+ s8 rxpo2g; /* 2GHz RX power offset */
+ s8 rxpo5g; /* 5GHz RX power offset */
u8 rssisav2g; /* 2GHz RSSI params */
u8 rssismc2g;
u8 rssismf2g;
@@ -94,12 +97,7 @@
* on each band. Values in dBm/4 (Q5.2). Negative gain means the
* loss in the connectors is bigger than the gain. */
struct {
- struct {
- s8 a0, a1, a2, a3;
- } ghz24; /* 2.4GHz band */
- struct {
- s8 a0, a1, a2, a3;
- } ghz5; /* 5GHz band */
+ s8 a0, a1, a2, a3;
} antenna_gain;
struct {
@@ -111,7 +109,79 @@
} ghz5;
} fem;
- /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */
+ u16 mcs2gpo[8];
+ u16 mcs5gpo[8];
+ u16 mcs5glpo[8];
+ u16 mcs5ghpo[8];
+ u8 opo;
+
+ u8 rxgainerr2ga[3];
+ u8 rxgainerr5gla[3];
+ u8 rxgainerr5gma[3];
+ u8 rxgainerr5gha[3];
+ u8 rxgainerr5gua[3];
+
+ u8 noiselvl2ga[3];
+ u8 noiselvl5gla[3];
+ u8 noiselvl5gma[3];
+ u8 noiselvl5gha[3];
+ u8 noiselvl5gua[3];
+
+ u8 regrev;
+ u8 txchain;
+ u8 rxchain;
+ u8 antswitch;
+ u16 cddpo;
+ u16 stbcpo;
+ u16 bw40po;
+ u16 bwduppo;
+
+ u8 tempthresh;
+ u8 tempoffset;
+ u16 rawtempsense;
+ u8 measpower;
+ u8 tempsense_slope;
+ u8 tempcorrx;
+ u8 tempsense_option;
+ u8 freqoffset_corr;
+ u8 iqcal_swp_dis;
+ u8 hw_iqcal_en;
+ u8 elna2g;
+ u8 elna5g;
+ u8 phycal_tempdelta;
+ u8 temps_period;
+ u8 temps_hysteresis;
+ u8 measpower1;
+ u8 measpower2;
+ u8 pcieingress_war;
+
+ /* power per rate from sromrev 9 */
+ u16 cckbw202gpo;
+ u16 cckbw20ul2gpo;
+ u32 legofdmbw202gpo;
+ u32 legofdmbw20ul2gpo;
+ u32 legofdmbw205glpo;
+ u32 legofdmbw20ul5glpo;
+ u32 legofdmbw205gmpo;
+ u32 legofdmbw20ul5gmpo;
+ u32 legofdmbw205ghpo;
+ u32 legofdmbw20ul5ghpo;
+ u32 mcsbw202gpo;
+ u32 mcsbw20ul2gpo;
+ u32 mcsbw402gpo;
+ u32 mcsbw205glpo;
+ u32 mcsbw20ul5glpo;
+ u32 mcsbw405glpo;
+ u32 mcsbw205gmpo;
+ u32 mcsbw20ul5gmpo;
+ u32 mcsbw405gmpo;
+ u32 mcsbw205ghpo;
+ u32 mcsbw20ul5ghpo;
+ u32 mcsbw405ghpo;
+ u16 mcs32po;
+ u16 legofdm40duppo;
+ u8 sar2g;
+ u8 sar5g;
};
/* Information about the PCB the circuitry is soldered on. */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 4a82ca0..262ebd1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -109,12 +109,14 @@
*/
#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
-__printf(2, 3)
-int bt_printk(const char *level, const char *fmt, ...);
+__printf(1, 2)
+int bt_info(const char *fmt, ...);
+__printf(1, 2)
+int bt_err(const char *fmt, ...);
-#define BT_INFO(fmt, arg...) bt_printk(KERN_INFO, pr_fmt(fmt), ##arg)
-#define BT_ERR(fmt, arg...) bt_printk(KERN_ERR, pr_fmt(fmt), ##arg)
-#define BT_DBG(fmt, arg...) pr_debug(fmt "\n", ##arg)
+#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
+#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
/* Connection and socket states */
enum {
@@ -129,6 +131,33 @@
BT_CLOSED
};
+/* If unused will be removed by compiler */
+static inline const char *state_to_string(int state)
+{
+ switch (state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
+
+ return "invalid state";
+}
+
/* BD Address */
typedef struct {
__u8 b[6];
@@ -193,7 +222,6 @@
__u16 tx_seq;
__u8 retries;
__u8 sar;
- unsigned short channel;
__u8 force_active;
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 00596e8..344b0f9 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -77,14 +77,6 @@
HCI_RAW,
- HCI_SETUP,
- HCI_AUTO_OFF,
- HCI_MGMT,
- HCI_PAIRABLE,
- HCI_SERVICE_CACHE,
- HCI_LINK_KEYS,
- HCI_DEBUG_KEYS,
-
HCI_RESET,
};
@@ -93,7 +85,22 @@
* states from the controller.
*/
enum {
+ HCI_SETUP,
+ HCI_AUTO_OFF,
+ HCI_MGMT,
+ HCI_PAIRABLE,
+ HCI_SERVICE_CACHE,
+ HCI_LINK_KEYS,
+ HCI_DEBUG_KEYS,
+
HCI_LE_SCAN,
+ HCI_SSP_ENABLED,
+ HCI_HS_ENABLED,
+ HCI_LE_ENABLED,
+ HCI_CONNECTABLE,
+ HCI_DISCOVERABLE,
+ HCI_LINK_SECURITY,
+ HCI_PENDING_CLASS,
};
/* HCI ioctl defines */
@@ -130,6 +137,7 @@
#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */
+#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -229,7 +237,9 @@
#define LMP_EXTFEATURES 0x80
/* Extended LMP features */
-#define LMP_HOST_LE 0x02
+#define LMP_HOST_SSP 0x01
+#define LMP_HOST_LE 0x02
+#define LMP_HOST_LE_BREDR 0x04
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -268,10 +278,11 @@
#define HCI_LK_UNAUTH_COMBINATION 0x04
#define HCI_LK_AUTH_COMBINATION 0x05
#define HCI_LK_CHANGED_COMBINATION 0x06
-/* The spec doesn't define types for SMP keys */
-#define HCI_LK_SMP_LTK 0x81
-#define HCI_LK_SMP_IRK 0x82
-#define HCI_LK_SMP_CSRK 0x83
+/* The spec doesn't define types for SMP keys, the _MASTER suffix is implied */
+#define HCI_SMP_STK 0x80
+#define HCI_SMP_STK_SLAVE 0x81
+#define HCI_SMP_LTK 0x82
+#define HCI_SMP_LTK_SLAVE 0x83
/* ---- HCI Error Codes ---- */
#define HCI_ERROR_AUTH_FAILURE 0x05
@@ -284,6 +295,22 @@
#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
+/* Extended Inquiry Response field types */
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
+#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */
+#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+
/* ----- HCI Commands ---- */
#define HCI_OP_NOP 0x0000
@@ -666,8 +693,8 @@
#define HCI_OP_WRITE_EIR 0x0c52
struct hci_cp_write_eir {
- uint8_t fec;
- uint8_t data[HCI_MAX_EIR_LENGTH];
+ __u8 fec;
+ __u8 data[HCI_MAX_EIR_LENGTH];
} __packed;
#define HCI_OP_READ_SSP_MODE 0x0c55
@@ -698,8 +725,8 @@
#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
struct hci_cp_write_le_host_supported {
- __u8 le;
- __u8 simul;
+ __u8 le;
+ __u8 simul;
} __packed;
#define HCI_OP_READ_LOCAL_VERSION 0x1001
@@ -1155,6 +1182,19 @@
__u8 subevent;
} __packed;
+#define HCI_EV_NUM_COMP_BLOCKS 0x48
+struct hci_comp_blocks_info {
+ __le16 handle;
+ __le16 pkts;
+ __le16 blocks;
+} __packed;
+
+struct hci_ev_num_comp_blocks {
+ __le16 num_blocks;
+ __u8 num_hndl;
+ struct hci_comp_blocks_info handles[0];
+} __packed;
+
/* Low energy meta events */
#define HCI_EV_LE_CONN_COMPLETE 0x01
struct hci_ev_le_conn_complete {
@@ -1288,6 +1328,7 @@
#define HCI_CHANNEL_RAW 0
#define HCI_CHANNEL_CONTROL 1
+#define HCI_CHANNEL_MONITOR 2
struct hci_filter {
unsigned long type_mask;
@@ -1389,5 +1430,6 @@
#define IREQ_CACHE_FLUSH 0x0001
extern bool enable_hs;
+extern bool enable_le;
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 453893b..daefaac 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -44,14 +44,31 @@
};
struct inquiry_entry {
- struct inquiry_entry *next;
+ struct list_head all; /* inq_cache.all */
+ struct list_head list; /* unknown or resolve */
+ enum {
+ NAME_NOT_KNOWN,
+ NAME_NEEDED,
+ NAME_PENDING,
+ NAME_KNOWN,
+ } name_state;
__u32 timestamp;
struct inquiry_data data;
};
-struct inquiry_cache {
+struct discovery_state {
+ int type;
+ enum {
+ DISCOVERY_STOPPED,
+ DISCOVERY_STARTING,
+ DISCOVERY_FINDING,
+ DISCOVERY_RESOLVING,
+ DISCOVERY_STOPPING,
+ } state;
+ struct list_head all; /* All devices found during inquiry */
+ struct list_head unknown; /* Name state not known */
+ struct list_head resolve; /* Name needs to be resolved */
__u32 timestamp;
- struct inquiry_entry *list;
};
struct hci_conn_hash {
@@ -72,18 +89,16 @@
u8 svc_hint;
};
-struct key_master_id {
+struct smp_ltk {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 authenticated;
+ u8 type;
+ u8 enc_size;
__le16 ediv;
u8 rand[8];
-} __packed;
-
-struct link_key_data {
- bdaddr_t bdaddr;
- u8 type;
u8 val[16];
- u8 pin_len;
- u8 dlen;
- u8 data[0];
} __packed;
struct link_key {
@@ -92,8 +107,6 @@
u8 type;
u8 val[16];
u8 pin_len;
- u8 dlen;
- u8 data[0];
};
struct oob_data {
@@ -109,11 +122,19 @@
u8 bdaddr_type;
};
+struct le_scan_params {
+ u8 type;
+ u16 interval;
+ u16 window;
+ int timeout;
+};
+
+#define HCI_MAX_SHORT_NAME_LENGTH 10
+
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
struct mutex lock;
- atomic_t refcnt;
char name[8];
unsigned long flags;
@@ -122,6 +143,7 @@
__u8 dev_type;
bdaddr_t bdaddr;
__u8 dev_name[HCI_MAX_NAME_LENGTH];
+ __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
__u8 dev_class[3];
__u8 major_class;
@@ -129,7 +151,6 @@
__u8 features[8];
__u8 host_features[8];
__u8 commands[64];
- __u8 ssp_mode;
__u8 hci_ver;
__u16 hci_rev;
__u8 lmp_ver;
@@ -217,7 +238,7 @@
struct list_head mgmt_pending;
- struct inquiry_cache inq_cache;
+ struct discovery_state discovery;
struct hci_conn_hash conn_hash;
struct list_head blacklist;
@@ -225,6 +246,8 @@
struct list_head link_keys;
+ struct list_head long_term_keys;
+
struct list_head remote_oob_data;
struct list_head adv_entries;
@@ -234,7 +257,6 @@
struct sk_buff_head driver_init;
- void *driver_data;
void *core_data;
atomic_t promisc;
@@ -246,15 +268,17 @@
struct rfkill *rfkill;
- struct module *owner;
-
unsigned long dev_flags;
+ struct delayed_work le_scan_disable;
+
+ struct work_struct le_scan;
+ struct le_scan_params le_scan_params;
+
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*send)(struct sk_buff *skb);
- void (*destruct)(struct hci_dev *hdev);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
};
@@ -270,11 +294,10 @@
__u16 state;
__u8 mode;
__u8 type;
- __u8 out;
+ bool out;
__u8 attempt;
__u8 dev_class[3];
__u8 features[8];
- __u8 ssp_mode;
__u16 interval;
__u16 pkt_type;
__u16 link_policy;
@@ -286,12 +309,10 @@
__u8 pin_length;
__u8 enc_key_size;
__u8 io_capability;
- __u8 power_save;
__u16 disc_timeout;
- unsigned long pend;
+ unsigned long flags;
__u8 remote_cap;
- __u8 remote_oob;
__u8 remote_auth;
unsigned int sent;
@@ -348,21 +369,26 @@
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
-static inline void inquiry_cache_init(struct hci_dev *hdev)
+static inline void discovery_init(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
- c->list = NULL;
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ INIT_LIST_HEAD(&hdev->discovery.all);
+ INIT_LIST_HEAD(&hdev->discovery.unknown);
+ INIT_LIST_HEAD(&hdev->discovery.resolve);
}
+bool hci_discovery_active(struct hci_dev *hdev);
+
+void hci_discovery_set_state(struct hci_dev *hdev, int state);
+
static inline int inquiry_cache_empty(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
- return c->list == NULL;
+ return list_empty(&hdev->discovery.all);
}
static inline long inquiry_cache_age(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
+ struct discovery_state *c = &hdev->discovery;
return jiffies - c->timestamp;
}
@@ -372,8 +398,16 @@
}
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
- bdaddr_t *bdaddr);
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state);
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie);
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known, bool *ssp);
/* ----- HCI Connections ----- */
enum {
@@ -384,8 +418,19 @@
HCI_CONN_MODE_CHANGE_PEND,
HCI_CONN_SCO_SETUP_PEND,
HCI_CONN_LE_SMP_PEND,
+ HCI_CONN_MGMT_CONNECTED,
+ HCI_CONN_SSP_ENABLED,
+ HCI_CONN_POWER_SAVE,
+ HCI_CONN_REMOTE_OOB,
};
+static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ return (test_bit(HCI_SSP_ENABLED, &hdev->flags) &&
+ test_bit(HCI_CONN_SSP_ENABLED, &conn->flags));
+}
+
static inline void hci_conn_hash_init(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -566,36 +611,33 @@
}
/* ----- HCI Devices ----- */
-static inline void __hci_dev_put(struct hci_dev *d)
+static inline void hci_dev_put(struct hci_dev *d)
{
- if (atomic_dec_and_test(&d->refcnt))
- d->destruct(d);
+ put_device(&d->dev);
}
-/*
- * hci_dev_put and hci_dev_hold are macros to avoid dragging all the
- * overhead of all the modular infrastructure into this header.
- */
-#define hci_dev_put(d) \
-do { \
- __hci_dev_put(d); \
- module_put(d->owner); \
-} while (0)
-
-static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
+static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
{
- atomic_inc(&d->refcnt);
+ get_device(&d->dev);
return d;
}
-#define hci_dev_hold(d) \
-({ \
- try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \
-})
-
#define hci_dev_lock(d) mutex_lock(&d->lock)
#define hci_dev_unlock(d) mutex_unlock(&d->lock)
+#define to_hci_dev(d) container_of(d, struct hci_dev, dev)
+#define to_hci_conn(c) container_of(c, struct hci_conn, dev)
+
+static inline void *hci_get_drvdata(struct hci_dev *hdev)
+{
+ return dev_get_drvdata(&hdev->dev);
+}
+
+static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
+{
+ dev_set_drvdata(&hdev->dev, data);
+}
+
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
@@ -619,20 +661,23 @@
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
int hci_blacklist_clear(struct hci_dev *hdev);
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
int hci_uuids_clear(struct hci_dev *hdev);
int hci_link_keys_clear(struct hci_dev *hdev);
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
- bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
-struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
-struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 type);
-int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
- u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]);
+ bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
+int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16 ediv,
+ u8 rand[8]);
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_smp_ltks_clear(struct hci_dev *hdev);
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
int hci_remote_oob_data_clear(struct hci_dev *hdev);
@@ -674,6 +719,7 @@
#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
#define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
+#define lmp_bredr_capable(dev) (!((dev)->features[4] & LMP_NO_BREDR))
/* ----- Extended LMP capabilities ----- */
#define lmp_host_le_capable(dev) ((dev)->host_features[0] & LMP_HOST_LE)
@@ -755,7 +801,7 @@
if (conn->type != ACL_LINK && conn->type != LE_LINK)
return;
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
@@ -796,7 +842,7 @@
hci_proto_auth_cfm(conn, status);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
@@ -859,25 +905,71 @@
read_unlock(&hci_cb_list_lock);
}
+static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
+{
+ u8 field_len;
+ size_t parsed;
+
+ for (parsed = 0; parsed < data_len - 1; parsed += field_len) {
+ field_len = data[0];
+
+ if (field_len == 0)
+ break;
+
+ parsed += field_len + 1;
+
+ if (parsed > data_len)
+ break;
+
+ if (data[1] == type)
+ return true;
+
+ data += field_len + 1;
+ }
+
+ return false;
+}
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
+}
+
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-int hci_register_notifier(struct notifier_block *nb);
-int hci_unregister_notifier(struct notifier_block *nb);
-
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
-void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
-
/* ----- HCI Sockets ----- */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
- struct sock *skip_sk);
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event);
/* Management interface */
+#define MGMT_ADDR_BREDR 0x00
+#define MGMT_ADDR_LE_PUBLIC 0x01
+#define MGMT_ADDR_LE_RANDOM 0x02
+#define MGMT_ADDR_INVALID 0xff
+
+#define DISCOV_TYPE_BREDR (BIT(MGMT_ADDR_BREDR))
+#define DISCOV_TYPE_LE (BIT(MGMT_ADDR_LE_PUBLIC) | \
+ BIT(MGMT_ADDR_LE_RANDOM))
+#define DISCOV_TYPE_INTERLEAVED (BIT(MGMT_ADDR_BREDR) | \
+ BIT(MGMT_ADDR_LE_PUBLIC) | \
+ BIT(MGMT_ADDR_LE_RANDOM))
+
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_index_added(struct hci_dev *hdev);
int mgmt_index_removed(struct hci_dev *hdev);
@@ -886,56 +978,67 @@
int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
- u8 persistent);
-int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type);
-int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type);
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+ u8 persistent);
+int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class);
+int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type);
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
+ u8 addr_type, u8 status);
int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status);
+ u8 status);
int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status);
+ u8 status);
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
- __le32 value, u8 confirm_hint);
+ u8 link_type, u8 addr_type, __le32 value,
+ u8 confirm_hint);
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status);
-int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 status);
-int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type);
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status);
-int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 status);
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status);
int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
- u8 *randomizer, u8 status);
+ u8 *randomizer, u8 status);
+int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+ u8 ssp, u8 *eir, u16 eir_len);
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len);
int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
-int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_interleaved_discovery(struct hci_dev *hdev);
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
+int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
-/* HCI socket flags */
-#define HCI_PI_MGMT_INIT 0
-
struct hci_pinfo {
struct bt_sock bt;
struct hci_dev *hdev;
struct hci_filter filter;
__u32 cmsg_mask;
unsigned short channel;
- unsigned long flags;
};
/* HCI security filter */
@@ -966,5 +1069,7 @@
int hci_do_inquiry(struct hci_dev *hdev, u8 length);
int hci_cancel_inquiry(struct hci_dev *hdev);
+int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
+ int timeout);
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
new file mode 100644
index 0000000..77d1e57
--- /dev/null
+++ b/include/net/bluetooth/hci_mon.h
@@ -0,0 +1,51 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_MON_H
+#define __HCI_MON_H
+
+struct hci_mon_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+#define HCI_MON_HDR_SIZE 6
+
+#define HCI_MON_NEW_INDEX 0
+#define HCI_MON_DEL_INDEX 1
+#define HCI_MON_COMMAND_PKT 2
+#define HCI_MON_EVENT_PKT 3
+#define HCI_MON_ACL_TX_PKT 4
+#define HCI_MON_ACL_RX_PKT 5
+#define HCI_MON_SCO_TX_PKT 6
+#define HCI_MON_SCO_RX_PKT 7
+
+struct hci_mon_new_index {
+ __u8 type;
+ __u8 bus;
+ bdaddr_t bdaddr;
+ char name[8];
+} __packed;
+#define HCI_MON_NEW_INDEX_SIZE 16
+
+#endif /* __HCI_MON_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index b1664ed..9b242c6 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -45,11 +45,11 @@
#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
-#define L2CAP_DISC_TIMEOUT (100)
-#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */
-#define L2CAP_ENC_TIMEOUT (5000) /* 5 seconds */
-#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
-#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
+#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
+#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_ENC_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
+#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
/* L2CAP socket address */
struct sockaddr_l2 {
@@ -492,51 +492,56 @@
struct sk_buff_head srej_q;
struct list_head srej_l;
- struct list_head list;
- struct list_head global_l;
+ struct list_head list;
+ struct list_head global_l;
- void *data;
- struct l2cap_ops *ops;
+ void *data;
+ struct l2cap_ops *ops;
+ struct mutex lock;
};
struct l2cap_ops {
- char *name;
+ char *name;
struct l2cap_chan *(*new_connection) (void *data);
int (*recv) (void *data, struct sk_buff *skb);
void (*close) (void *data);
void (*state_change) (void *data, int state);
+ struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
+ unsigned long len, int nb, int *err);
+
};
struct l2cap_conn {
- struct hci_conn *hcon;
- struct hci_chan *hchan;
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
- bdaddr_t *dst;
- bdaddr_t *src;
+ bdaddr_t *dst;
+ bdaddr_t *src;
- unsigned int mtu;
+ unsigned int mtu;
- __u32 feat_mask;
+ __u32 feat_mask;
+ __u8 fixed_chan_mask;
- __u8 info_state;
- __u8 info_ident;
+ __u8 info_state;
+ __u8 info_ident;
- struct delayed_work info_timer;
+ struct delayed_work info_timer;
- spinlock_t lock;
+ spinlock_t lock;
- struct sk_buff *rx_skb;
- __u32 rx_len;
- __u8 tx_ident;
+ struct sk_buff *rx_skb;
+ __u32 rx_len;
+ __u8 tx_ident;
- __u8 disc_reason;
+ __u8 disc_reason;
- struct delayed_work security_timer;
- struct smp_chan *smp_chan;
+ struct delayed_work security_timer;
+ struct smp_chan *smp_chan;
- struct list_head chan_l;
- struct mutex chan_lock;
+ struct list_head chan_l;
+ struct mutex chan_lock;
};
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
@@ -551,9 +556,9 @@
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
struct l2cap_pinfo {
- struct bt_sock bt;
+ struct bt_sock bt;
struct l2cap_chan *chan;
- struct sk_buff *rx_busy_skb;
+ struct sk_buff *rx_busy_skb;
};
enum {
@@ -606,21 +611,37 @@
kfree(c);
}
+static inline void l2cap_chan_lock(struct l2cap_chan *chan)
+{
+ mutex_lock(&chan->lock);
+}
+
+static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
+{
+ mutex_unlock(&chan->lock);
+}
+
static inline void l2cap_set_timer(struct l2cap_chan *chan,
struct delayed_work *work, long timeout)
{
- BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
+ BT_DBG("chan %p state %s timeout %ld", chan,
+ state_to_string(chan->state), timeout);
if (!cancel_delayed_work(work))
l2cap_chan_hold(chan);
schedule_delayed_work(work, timeout);
}
-static inline void l2cap_clear_timer(struct l2cap_chan *chan,
+static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
struct delayed_work *work)
{
- if (cancel_delayed_work(work))
+ bool ret;
+
+ ret = cancel_delayed_work(work);
+ if (ret)
l2cap_chan_put(chan);
+
+ return ret;
}
#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index be65d34..ffc1377 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -2,6 +2,7 @@
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -39,29 +40,47 @@
#define MGMT_STATUS_INVALID_PARAMS 0x0d
#define MGMT_STATUS_DISCONNECTED 0x0e
#define MGMT_STATUS_NOT_POWERED 0x0f
+#define MGMT_STATUS_CANCELLED 0x10
+#define MGMT_STATUS_INVALID_INDEX 0x11
struct mgmt_hdr {
- __le16 opcode;
- __le16 index;
- __le16 len;
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
} __packed;
+struct mgmt_addr_info {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+#define MGMT_ADDR_INFO_SIZE 7
+
#define MGMT_OP_READ_VERSION 0x0001
+#define MGMT_READ_VERSION_SIZE 0
struct mgmt_rp_read_version {
- __u8 version;
- __le16 revision;
+ __u8 version;
+ __le16 revision;
+} __packed;
+
+#define MGMT_OP_READ_COMMANDS 0x0002
+#define MGMT_READ_COMMANDS_SIZE 0
+struct mgmt_rp_read_commands {
+ __le16 num_commands;
+ __le16 num_events;
+ __le16 opcodes[0];
} __packed;
#define MGMT_OP_READ_INDEX_LIST 0x0003
+#define MGMT_READ_INDEX_LIST_SIZE 0
struct mgmt_rp_read_index_list {
- __le16 num_controllers;
- __le16 index[0];
+ __le16 num_controllers;
+ __le16 index[0];
} __packed;
/* Reserve one extra byte for names in management messages so that they
* are always guaranteed to be nul-terminated */
#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
-#define MGMT_MAX_SHORT_NAME_LENGTH (10 + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1)
#define MGMT_SETTING_POWERED 0x00000001
#define MGMT_SETTING_CONNECTABLE 0x00000002
@@ -75,28 +94,32 @@
#define MGMT_SETTING_LE 0x00000200
#define MGMT_OP_READ_INFO 0x0004
+#define MGMT_READ_INFO_SIZE 0
struct mgmt_rp_read_info {
- bdaddr_t bdaddr;
- __u8 version;
- __le16 manufacturer;
- __le32 supported_settings;
- __le32 current_settings;
- __u8 dev_class[3];
- __u8 name[MGMT_MAX_NAME_LENGTH];
- __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __u8 dev_class[3];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
} __packed;
struct mgmt_mode {
__u8 val;
} __packed;
+#define MGMT_SETTING_SIZE 1
+
#define MGMT_OP_SET_POWERED 0x0005
#define MGMT_OP_SET_DISCOVERABLE 0x0006
struct mgmt_cp_set_discoverable {
- __u8 val;
- __u16 timeout;
+ __u8 val;
+ __u16 timeout;
} __packed;
+#define MGMT_SET_DISCOVERABLE_SIZE 3
#define MGMT_OP_SET_CONNECTABLE 0x0007
@@ -111,73 +134,76 @@
#define MGMT_OP_SET_HS 0x000C
#define MGMT_OP_SET_LE 0x000D
-
#define MGMT_OP_SET_DEV_CLASS 0x000E
struct mgmt_cp_set_dev_class {
- __u8 major;
- __u8 minor;
+ __u8 major;
+ __u8 minor;
} __packed;
+#define MGMT_SET_DEV_CLASS_SIZE 2
#define MGMT_OP_SET_LOCAL_NAME 0x000F
struct mgmt_cp_set_local_name {
- __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
} __packed;
+#define MGMT_SET_LOCAL_NAME_SIZE 260
#define MGMT_OP_ADD_UUID 0x0010
struct mgmt_cp_add_uuid {
- __u8 uuid[16];
- __u8 svc_hint;
+ __u8 uuid[16];
+ __u8 svc_hint;
} __packed;
+#define MGMT_ADD_UUID_SIZE 17
#define MGMT_OP_REMOVE_UUID 0x0011
struct mgmt_cp_remove_uuid {
- __u8 uuid[16];
+ __u8 uuid[16];
} __packed;
+#define MGMT_REMOVE_UUID_SIZE 16
struct mgmt_link_key_info {
- bdaddr_t bdaddr;
- u8 type;
- u8 val[16];
- u8 pin_len;
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+ __u8 pin_len;
} __packed;
#define MGMT_OP_LOAD_LINK_KEYS 0x0012
struct mgmt_cp_load_link_keys {
- __u8 debug_keys;
- __le16 key_count;
- struct mgmt_link_key_info keys[0];
+ __u8 debug_keys;
+ __le16 key_count;
+ struct mgmt_link_key_info keys[0];
+} __packed;
+#define MGMT_LOAD_LINK_KEYS_SIZE 3
+
+struct mgmt_ltk_info {
+ struct mgmt_addr_info addr;
+ __u8 authenticated;
+ __u8 master;
+ __u8 enc_size;
+ __le16 ediv;
+ __u8 rand[8];
+ __u8 val[16];
} __packed;
-#define MGMT_OP_REMOVE_KEYS 0x0013
-struct mgmt_cp_remove_keys {
- bdaddr_t bdaddr;
- __u8 disconnect;
+#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013
+struct mgmt_cp_load_long_term_keys {
+ __le16 key_count;
+ struct mgmt_ltk_info keys[0];
} __packed;
-struct mgmt_rp_remove_keys {
- bdaddr_t bdaddr;
- __u8 status;
-};
+#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2
#define MGMT_OP_DISCONNECT 0x0014
struct mgmt_cp_disconnect {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_DISCONNECT_SIZE MGMT_ADDR_INFO_SIZE
struct mgmt_rp_disconnect {
- bdaddr_t bdaddr;
- __u8 status;
-} __packed;
-
-#define MGMT_ADDR_BREDR 0x00
-#define MGMT_ADDR_LE_PUBLIC 0x01
-#define MGMT_ADDR_LE_RANDOM 0x02
-#define MGMT_ADDR_INVALID 0xff
-
-struct mgmt_addr_info {
- bdaddr_t bdaddr;
- __u8 type;
+ struct mgmt_addr_info addr;
} __packed;
#define MGMT_OP_GET_CONNECTIONS 0x0015
+#define MGMT_GET_CONNECTIONS_SIZE 0
struct mgmt_rp_get_connections {
__le16 conn_count;
struct mgmt_addr_info addr[0];
@@ -185,124 +211,152 @@
#define MGMT_OP_PIN_CODE_REPLY 0x0016
struct mgmt_cp_pin_code_reply {
- bdaddr_t bdaddr;
- __u8 pin_len;
- __u8 pin_code[16];
+ struct mgmt_addr_info addr;
+ __u8 pin_len;
+ __u8 pin_code[16];
} __packed;
+#define MGMT_PIN_CODE_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 17)
struct mgmt_rp_pin_code_reply {
- bdaddr_t bdaddr;
- uint8_t status;
+ struct mgmt_addr_info addr;
} __packed;
#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
struct mgmt_cp_pin_code_neg_reply {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_PIN_CODE_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
#define MGMT_OP_SET_IO_CAPABILITY 0x0018
struct mgmt_cp_set_io_capability {
- __u8 io_capability;
+ __u8 io_capability;
} __packed;
+#define MGMT_SET_IO_CAPABILITY_SIZE 1
#define MGMT_OP_PAIR_DEVICE 0x0019
struct mgmt_cp_pair_device {
struct mgmt_addr_info addr;
- __u8 io_cap;
+ __u8 io_cap;
} __packed;
+#define MGMT_PAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
struct mgmt_rp_pair_device {
struct mgmt_addr_info addr;
- __u8 status;
} __packed;
-#define MGMT_OP_USER_CONFIRM_REPLY 0x001A
+#define MGMT_OP_CANCEL_PAIR_DEVICE 0x001A
+#define MGMT_CANCEL_PAIR_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNPAIR_DEVICE 0x001B
+struct mgmt_cp_unpair_device {
+ struct mgmt_addr_info addr;
+ __u8 disconnect;
+} __packed;
+#define MGMT_UNPAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_unpair_device {
+ struct mgmt_addr_info addr;
+};
+
+#define MGMT_OP_USER_CONFIRM_REPLY 0x001C
struct mgmt_cp_user_confirm_reply {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_USER_CONFIRM_REPLY_SIZE MGMT_ADDR_INFO_SIZE
struct mgmt_rp_user_confirm_reply {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001B
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001D
struct mgmt_cp_user_confirm_neg_reply {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_USER_CONFIRM_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_USER_PASSKEY_REPLY 0x001C
+#define MGMT_OP_USER_PASSKEY_REPLY 0x001E
struct mgmt_cp_user_passkey_reply {
- bdaddr_t bdaddr;
- __le32 passkey;
+ struct mgmt_addr_info addr;
+ __le32 passkey;
} __packed;
+#define MGMT_USER_PASSKEY_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 4)
struct mgmt_rp_user_passkey_reply {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001D
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001F
struct mgmt_cp_user_passkey_neg_reply {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_USER_PASSKEY_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_READ_LOCAL_OOB_DATA 0x001E
+#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
+#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
struct mgmt_rp_read_local_oob_data {
- __u8 hash[16];
- __u8 randomizer[16];
+ __u8 hash[16];
+ __u8 randomizer[16];
} __packed;
-#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x001F
+#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
struct mgmt_cp_add_remote_oob_data {
- bdaddr_t bdaddr;
- __u8 hash[16];
- __u8 randomizer[16];
+ struct mgmt_addr_info addr;
+ __u8 hash[16];
+ __u8 randomizer[16];
} __packed;
+#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
-#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0020
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022
struct mgmt_cp_remove_remote_oob_data {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_REMOVE_REMOTE_OOB_DATA_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_START_DISCOVERY 0x0021
+#define MGMT_OP_START_DISCOVERY 0x0023
struct mgmt_cp_start_discovery {
__u8 type;
} __packed;
+#define MGMT_START_DISCOVERY_SIZE 1
-#define MGMT_OP_STOP_DISCOVERY 0x0022
+#define MGMT_OP_STOP_DISCOVERY 0x0024
+struct mgmt_cp_stop_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_STOP_DISCOVERY_SIZE 1
-#define MGMT_OP_CONFIRM_NAME 0x0023
+#define MGMT_OP_CONFIRM_NAME 0x0025
struct mgmt_cp_confirm_name {
- bdaddr_t bdaddr;
- __u8 name_known;
+ struct mgmt_addr_info addr;
+ __u8 name_known;
} __packed;
+#define MGMT_CONFIRM_NAME_SIZE (MGMT_ADDR_INFO_SIZE + 1)
struct mgmt_rp_confirm_name {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_BLOCK_DEVICE 0x0024
+#define MGMT_OP_BLOCK_DEVICE 0x0026
struct mgmt_cp_block_device {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_BLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_UNBLOCK_DEVICE 0x0025
+#define MGMT_OP_UNBLOCK_DEVICE 0x0027
struct mgmt_cp_unblock_device {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
- __le16 opcode;
- __u8 data[0];
+ __le16 opcode;
+ __u8 status;
+ __u8 data[0];
} __packed;
#define MGMT_EV_CMD_STATUS 0x0002
struct mgmt_ev_cmd_status {
- __u8 status;
- __le16 opcode;
+ __le16 opcode;
+ __u8 status;
} __packed;
#define MGMT_EV_CONTROLLER_ERROR 0x0003
struct mgmt_ev_controller_error {
- __u8 error_code;
+ __u8 error_code;
} __packed;
#define MGMT_EV_INDEX_ADDED 0x0004
@@ -313,78 +367,96 @@
#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
struct mgmt_ev_class_of_dev_changed {
- __u8 dev_class[3];
+ __u8 dev_class[3];
};
#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
struct mgmt_ev_local_name_changed {
- __u8 name[MGMT_MAX_NAME_LENGTH];
- __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
} __packed;
#define MGMT_EV_NEW_LINK_KEY 0x0009
struct mgmt_ev_new_link_key {
- __u8 store_hint;
+ __u8 store_hint;
struct mgmt_link_key_info key;
} __packed;
-#define MGMT_EV_CONNECTED 0x000A
+#define MGMT_EV_NEW_LONG_TERM_KEY 0x000A
+struct mgmt_ev_new_long_term_key {
+ __u8 store_hint;
+ struct mgmt_ltk_info key;
+} __packed;
-#define MGMT_EV_DISCONNECTED 0x000B
+#define MGMT_EV_DEVICE_CONNECTED 0x000B
+struct mgmt_ev_device_connected {
+ struct mgmt_addr_info addr;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
-#define MGMT_EV_CONNECT_FAILED 0x000C
+#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
+
+#define MGMT_EV_CONNECT_FAILED 0x000D
struct mgmt_ev_connect_failed {
struct mgmt_addr_info addr;
- __u8 status;
+ __u8 status;
} __packed;
-#define MGMT_EV_PIN_CODE_REQUEST 0x000D
+#define MGMT_EV_PIN_CODE_REQUEST 0x000E
struct mgmt_ev_pin_code_request {
- bdaddr_t bdaddr;
- __u8 secure;
+ struct mgmt_addr_info addr;
+ __u8 secure;
} __packed;
-#define MGMT_EV_USER_CONFIRM_REQUEST 0x000E
+#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
struct mgmt_ev_user_confirm_request {
- bdaddr_t bdaddr;
- __u8 confirm_hint;
- __le32 value;
+ struct mgmt_addr_info addr;
+ __u8 confirm_hint;
+ __le32 value;
} __packed;
-#define MGMT_EV_USER_PASSKEY_REQUEST 0x000F
+#define MGMT_EV_USER_PASSKEY_REQUEST 0x0010
struct mgmt_ev_user_passkey_request {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_EV_AUTH_FAILED 0x0010
+#define MGMT_EV_AUTH_FAILED 0x0011
struct mgmt_ev_auth_failed {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
+ __u8 status;
} __packed;
-#define MGMT_EV_DEVICE_FOUND 0x0011
+#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
+#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
+
+#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
struct mgmt_addr_info addr;
- __u8 dev_class[3];
- __s8 rssi;
- __u8 confirm_name;
- __u8 eir[HCI_MAX_EIR_LENGTH];
-} __packed;
-
-#define MGMT_EV_REMOTE_NAME 0x0012
-struct mgmt_ev_remote_name {
- bdaddr_t bdaddr;
- __u8 name[MGMT_MAX_NAME_LENGTH];
+ __s8 rssi;
+ __u8 flags[4];
+ __le16 eir_len;
+ __u8 eir[0];
} __packed;
#define MGMT_EV_DISCOVERING 0x0013
+struct mgmt_ev_discovering {
+ __u8 type;
+ __u8 discovering;
+} __packed;
#define MGMT_EV_DEVICE_BLOCKED 0x0014
struct mgmt_ev_device_blocked {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
struct mgmt_ev_device_unblocked {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNPAIRED 0x0016
+struct mgmt_ev_device_unpaired {
+ struct mgmt_addr_info addr;
} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index aeaf5fa..7b3acdd 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -127,7 +127,7 @@
u8 rrnd[16]; /* SMP Pairing Random (remote) */
u8 pcnf[16]; /* SMP Pairing Confirm */
u8 tk[16]; /* SMP Temporary Key */
- u8 smp_key_size;
+ u8 enc_key_size;
unsigned long smp_flags;
struct crypto_blkcipher *tfm;
struct work_struct confirm;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0178c74..57c9fdd 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -413,6 +413,7 @@
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
+ * @inactivity_timeout: time in seconds to determine station's inactivity.
*/
struct cfg80211_ap_settings {
struct cfg80211_beacon_data beacon;
@@ -424,6 +425,7 @@
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
+ int inactivity_timeout;
};
/**
@@ -809,6 +811,7 @@
* Still keeping the same nomenclature to be in sync with the spec. */
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
+ s32 rssi_threshold;
};
/**
@@ -1356,12 +1359,10 @@
*
* @set_rekey_data: give the data necessary for GTK rekeying to the driver
*
- * @add_beacon: Add a beacon with given parameters, @head, @interval
- * and @dtim_period will be valid, @tail is optional.
- * @set_beacon: Change the beacon parameters for an access point mode
- * interface. This should reject the call when no beacon has been
- * configured.
- * @del_beacon: Remove beacon configuration and stop sending the beacon.
+ * @start_ap: Start acting in AP mode defined by the parameters.
+ * @change_beacon: Change the beacon parameters for an access point mode
+ * interface. This should reject the call when AP mode wasn't started.
+ * @stop_ap: Stop being an AP, including stopping beaconing.
*
* @add_station: Add a new station.
* @del_station: Remove a station; @mac may be NULL to remove all stations.
@@ -3188,6 +3189,7 @@
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
* @dev: network device
* @freq: Frequency on which the frame was received in MHz
+ * @sig_dbm: signal strength in mBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
* @gfp: context flags
@@ -3200,8 +3202,8 @@
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
*/
-bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
- size_t len, gfp_t gfp);
+bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_dbm,
+ const u8 *buf, size_t len, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
@@ -3314,6 +3316,7 @@
* @frame: the frame
* @len: length of the frame
* @freq: frequency the frame was received on
+ * @sig_dbm: signal strength in mBm, or 0 if unknown
* @gfp: allocation flags
*
* Use this function to report to userspace when a beacon was
@@ -3322,7 +3325,7 @@
*/
void cfg80211_report_obss_beacon(struct wiphy *wiphy,
const u8 *frame, size_t len,
- int freq, gfp_t gfp);
+ int freq, int sig_dbm, gfp_t gfp);
/*
* cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
@@ -3334,6 +3337,14 @@
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type);
+/*
+ * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
+ * @rate: given rate_info to calculate bitrate from
+ *
+ * return 0 if MCS index >= 32
+ */
+u16 cfg80211_calculate_bitrate(struct rate_info *rate);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/compat.h b/include/net/compat.h
index 9ee75ed..a974ae9 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -41,7 +41,7 @@
#endif /* defined(CONFIG_COMPAT) */
extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr *, int);
+extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
unsigned, unsigned);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 06b795d..b94765e 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,12 +35,12 @@
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
- int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
u32 pmtu_learned;
struct inetpeer_addr_base redirect_learned;
+ struct list_head gc_list;
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
* are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
@@ -96,6 +96,8 @@
extern void inet_putpeer(struct inet_peer *p);
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+extern void inetpeer_invalidate_tree(int family);
+
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
* tcp_ts_stamp if no refcount is taken on inet_peer
diff --git a/include/net/ip.h b/include/net/ip.h
index 775009f..b53d65f 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -388,7 +388,7 @@
return 1;
}
-extern int ip_call_ra_chain(struct sk_buff *skb);
+extern bool ip_call_ra_chain(struct sk_buff *skb);
/*
* Functions provided by ip_fragment.c
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 2e1d5ec..cc7c197 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -62,6 +62,7 @@
#define AF_IUCV_FLAG_SYN 0x2
#define AF_IUCV_FLAG_FIN 0x4
#define AF_IUCV_FLAG_WIN 0x8
+#define AF_IUCV_FLAG_SHT 0x10
struct af_iucv_trans_hdr {
u16 magic;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 7477f02..f7917f7 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -659,6 +659,8 @@
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
* @RX_FLAG_40MHZ: HT40 (40 MHz) was used
* @RX_FLAG_SHORT_GI: Short guard interval was used
+ * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
+ * Valid only for data frames (mainly A-MPDU)
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = 1<<0,
@@ -672,6 +674,7 @@
RX_FLAG_HT = 1<<9,
RX_FLAG_40MHZ = 1<<10,
RX_FLAG_SHORT_GI = 1<<11,
+ RX_FLAG_NO_SIGNAL_VAL = 1<<12,
};
/**
@@ -3565,6 +3568,8 @@
* @hw: The hardware the algorithm is invoked for.
* @sband: The band this frame is being transmitted on.
* @bss_conf: the current BSS configuration
+ * @skb: the skb that will be transmitted, the control information in it needs
+ * to be filled in
* @reported_rate: The rate control algorithm can fill this in to indicate
* which rate should be reported to userspace as the current rate and
* used for rate calculations in the mesh network.
@@ -3572,12 +3577,11 @@
* RTS threshold
* @short_preamble: whether mac80211 will request short-preamble transmission
* if the selected rate supports it
- * @max_rate_idx: user-requested maximum rate (not MCS for now)
+ * @max_rate_idx: user-requested maximum (legacy) rate
* (deprecated; this will be removed once drivers get updated to use
* rate_idx_mask)
- * @rate_idx_mask: user-requested rate mask (not MCS for now)
- * @skb: the skb that will be transmitted, the control information in it needs
- * to be filled in
+ * @rate_idx_mask: user-requested (legacy) rate mask
+ * @rate_idx_mcs_mask: user-requested MCS rate mask
* @bss: whether this frame is sent out in AP or IBSS mode
*/
struct ieee80211_tx_rate_control {
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 2dcf317..96755c3 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -20,6 +20,9 @@
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
NF_CT_EXT_TSTAMP,
#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ NF_CT_EXT_TIMEOUT,
+#endif
NF_CT_EXT_NUM,
};
@@ -29,6 +32,7 @@
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
+#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index f1c1311..5767dc2 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -69,4 +69,17 @@
enum ip_conntrack_info ctinfo,
unsigned int timeout);
+struct nf_ct_helper_expectfn {
+ struct list_head head;
+ const char *name;
+ void (*expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp);
+};
+
+void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n);
+void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n);
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_name(const char *name);
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
+
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index e3d3ee3..90c67c7 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -39,12 +39,13 @@
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum);
+ unsigned int hooknum,
+ unsigned int *timeouts);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff);
+ unsigned int dataoff, unsigned int *timeouts);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
@@ -60,6 +61,9 @@
/* Print out the private part of the conntrack. */
int (*print_conntrack)(struct seq_file *s, struct nf_conn *);
+ /* Return the array of timeouts for this protocol. */
+ unsigned int *(*get_timeouts)(struct net *net);
+
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
@@ -79,6 +83,17 @@
size_t nla_size;
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ struct {
+ size_t obj_size;
+ int (*nlattr_to_obj)(struct nlattr *tb[], void *data);
+ int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
+
+ unsigned int nlattr_max;
+ const struct nla_policy *nla_policy;
+ } ctnl_timeout;
+#endif
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header **ctl_table_header;
struct ctl_table *ctl_table;
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
new file mode 100644
index 0000000..0e04db4
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -0,0 +1,78 @@
+#ifndef _NF_CONNTRACK_TIMEOUT_H
+#define _NF_CONNTRACK_TIMEOUT_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+#define CTNL_TIMEOUT_NAME_MAX 32
+
+struct ctnl_timeout {
+ struct list_head head;
+ struct rcu_head rcu_head;
+ atomic_t refcnt;
+ char name[CTNL_TIMEOUT_NAME_MAX];
+ __u16 l3num;
+ __u8 l4num;
+ char data[0];
+};
+
+struct nf_conn_timeout {
+ struct ctnl_timeout *timeout;
+};
+
+#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data)
+
+static inline
+struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
+ struct ctnl_timeout *timeout,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp);
+ if (timeout_ext == NULL)
+ return NULL;
+
+ timeout_ext->timeout = timeout;
+
+ return timeout_ext;
+#else
+ return NULL;
+#endif
+};
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+extern int nf_conntrack_timeout_init(struct net *net);
+extern void nf_conntrack_timeout_fini(struct net *net);
+#else
+static inline int nf_conntrack_timeout_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_timeout_fini(struct net *net)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(const char *name);
+extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
+#endif
+
+#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h
index 0dfb34a..7e1544e 100644
--- a/include/net/netfilter/xt_log.h
+++ b/include/net/netfilter/xt_log.h
@@ -6,7 +6,7 @@
};
static struct sbuff emergency, *emergency_ptr = &emergency;
-static int sb_add(struct sbuff *m, const char *f, ...)
+static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...)
{
va_list args;
int len;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 86fee8b..feba740 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -141,17 +141,17 @@
/* ----- NCI Devices ----- */
struct nci_dev *nci_allocate_device(struct nci_ops *ops,
- __u32 supported_protocols,
- int tx_headroom,
- int tx_tailroom);
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
void nci_free_device(struct nci_dev *ndev);
int nci_register_device(struct nci_dev *ndev);
void nci_unregister_device(struct nci_dev *ndev);
int nci_recv_frame(struct sk_buff *skb);
static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
- unsigned int len,
- gfp_t how)
+ unsigned int len,
+ gfp_t how)
{
struct sk_buff *skb;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index d253278..bac070b 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -53,15 +53,15 @@
int (*dev_down)(struct nfc_dev *dev);
int (*start_poll)(struct nfc_dev *dev, u32 protocols);
void (*stop_poll)(struct nfc_dev *dev);
- int (*dep_link_up)(struct nfc_dev *dev, int target_idx,
- u8 comm_mode, u8 rf_mode);
+ int (*dep_link_up)(struct nfc_dev *dev, int target_idx, u8 comm_mode,
+ u8 *gb, size_t gb_len);
int (*dep_link_down)(struct nfc_dev *dev);
int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
- u32 protocol);
+ u32 protocol);
void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx);
int (*data_exchange)(struct nfc_dev *dev, u32 target_idx,
- struct sk_buff *skb, data_exchange_cb_t cb,
- void *cb_context);
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context);
};
#define NFC_TARGET_IDX_ANY -1
@@ -110,9 +110,9 @@
extern struct class nfc_class;
struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
- u32 supported_protocols,
- int tx_headroom,
- int tx_tailroom);
+ u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
/**
* nfc_free_device - free nfc device
@@ -135,7 +135,7 @@
* @dev: The parent device
*/
static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev,
- struct device *dev)
+ struct device *dev)
{
nfc_dev->dev.parent = dev;
}
@@ -172,17 +172,15 @@
}
struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
- unsigned int flags, unsigned int size,
- unsigned int *err);
+ unsigned int flags, unsigned int size,
+ unsigned int *err);
struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
int nfc_set_remote_general_bytes(struct nfc_dev *dev,
- u8 *gt, u8 gt_len);
+ u8 *gt, u8 gt_len);
-u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len);
-
-int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
- int ntargets);
+int nfc_targets_found(struct nfc_dev *dev,
+ struct nfc_target *targets, int ntargets);
int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index d368561..6ee44b2 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -413,6 +413,7 @@
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
/* A macro to walk a list of skbs. */
#define sctp_skb_for_each(pos, head, tmp) \
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 5f097ca..7137545 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -40,7 +40,7 @@
* checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
* with a zero checksum field are illegal. */
if (uh->check == 0) {
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: zeroed checksum field\n");
+ LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n");
return 1;
}
@@ -52,7 +52,7 @@
/*
* Coverage length violates RFC 3828: log and discard silently.
*/
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: bad csum coverage %d/%d\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n",
cscov, skb->len);
return 1;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 32313c0..0f0d470 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -985,6 +985,11 @@
/* add new interrupt at end of irq queue */
do {
+ /*
+ * Or all existing action->thread_mask bits,
+ * so we can find the next zero bit for this
+ * new action.
+ */
thread_mask |= old->thread_mask;
old_ptr = &old->next;
old = *old_ptr;
@@ -993,14 +998,41 @@
}
/*
- * Setup the thread mask for this irqaction. Unlikely to have
- * 32 resp 64 irqs sharing one line, but who knows.
+ * Setup the thread mask for this irqaction for ONESHOT. For
+ * !ONESHOT irqs the thread mask is 0 so we can avoid a
+ * conditional in irq_wake_thread().
*/
- if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
- ret = -EBUSY;
- goto out_mask;
+ if (new->flags & IRQF_ONESHOT) {
+ /*
+ * Unlikely to have 32 resp 64 irqs sharing one line,
+ * but who knows.
+ */
+ if (thread_mask == ~0UL) {
+ ret = -EBUSY;
+ goto out_mask;
+ }
+ /*
+ * The thread_mask for the action is or'ed to
+ * desc->thread_active to indicate that the
+ * IRQF_ONESHOT thread handler has been woken, but not
+ * yet finished. The bit is cleared when a thread
+ * completes. When all threads of a shared interrupt
+ * line have completed desc->threads_active becomes
+ * zero and the interrupt line is unmasked. See
+ * handle.c:irq_wake_thread() for further information.
+ *
+ * If no thread is woken by primary (hard irq context)
+ * interrupt handlers, then desc->threads_active is
+ * also checked for zero to unmask the irq line in the
+ * affected hard irq flow handlers
+ * (handle_[fasteoi|level]_irq).
+ *
+ * The new action gets the first zero bit of
+ * thread_mask assigned. See the loop above which or's
+ * all existing action->thread_mask bits.
+ */
+ new->thread_mask = 1 << ffz(thread_mask);
}
- new->thread_mask = 1 << ffz(thread_mask);
if (!shared) {
init_waitqueue_head(&desc->wait_for_threads);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 33a0676..b342f57 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6728,7 +6728,7 @@
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
@@ -6741,7 +6741,7 @@
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
return NOTIFY_OK;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 8e75003..38e612e 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -891,9 +891,15 @@
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
- return buf + vsnprintf(buf, end > buf ? end - buf : 0,
- ((struct va_format *)ptr)->fmt,
- *(((struct va_format *)ptr)->va));
+ {
+ va_list va;
+
+ va_copy(va, *((struct va_format *)ptr)->va);
+ buf += vsnprintf(buf, end > buf ? end - buf : 0,
+ ((struct va_format *)ptr)->fmt, va);
+ va_end(va);
+ return buf;
+ }
case 'K':
/*
* %pK cannot be used in IRQ context because its test
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 06b145fb..47296fe 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -640,10 +640,11 @@
unsigned long vmstart;
unsigned long vmend;
- vma = find_vma_prev(mm, start, &prev);
+ vma = find_vma(mm, start);
if (!vma || vma->vm_start > start)
return -EFAULT;
+ prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
diff --git a/mm/mlock.c b/mm/mlock.c
index 4f4f53b..ef726e8 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -385,10 +385,11 @@
return -EINVAL;
if (end == start)
return 0;
- vma = find_vma_prev(current->mm, start, &prev);
+ vma = find_vma(current->mm, start);
if (!vma || vma->vm_start > start)
return -ENOMEM;
+ prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
diff --git a/mm/mmap.c b/mm/mmap.c
index 22e1a0b..da15a79 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1266,8 +1266,9 @@
vma->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
+
if (file) {
- error = -EINVAL;
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
goto free_vma;
if (vm_flags & VM_DENYWRITE) {
@@ -1607,7 +1608,6 @@
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
- * Note: pprev is set to NULL when return value is NULL.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1616,7 +1616,16 @@
struct vm_area_struct *vma;
vma = find_vma(mm, addr);
- *pprev = vma ? vma->vm_prev : NULL;
+ if (vma) {
+ *pprev = vma->vm_prev;
+ } else {
+ struct rb_node *rb_node = mm->mm_rb.rb_node;
+ *pprev = NULL;
+ while (rb_node) {
+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ rb_node = rb_node->rb_right;
+ }
+ }
return vma;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5a688a2..f437d05 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -262,10 +262,11 @@
down_write(¤t->mm->mmap_sem);
- vma = find_vma_prev(current->mm, start, &prev);
+ vma = find_vma(current->mm, start);
error = -ENOMEM;
if (!vma)
goto out;
+ prev = vma->vm_prev;
if (unlikely(grows & PROT_GROWSDOWN)) {
if (vma->vm_start >= end)
goto out;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index de1616a..1ccbd71 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -379,13 +379,15 @@
pgoff_t offset = swp_offset(ent);
struct swap_cgroup_ctrl *ctrl;
struct page *mappage;
+ struct swap_cgroup *sc;
ctrl = &swap_cgroup_ctrl[swp_type(ent)];
if (ctrlp)
*ctrlp = ctrl;
mappage = ctrl->map[offset / SC_PER_PAGE];
- return page_address(mappage) + offset % SC_PER_PAGE;
+ sc = page_address(mappage);
+ return sc + offset % SC_PER_PAGE;
}
/**
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 2b66dae..a6d5d63 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -132,8 +132,7 @@
"Sending own" :
"Forwarding"));
bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
- " IDF %s, ttvn %d) on interface %s [%pM]\n",
+ "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batman_ogm_packet->orig,
ntohl(batman_ogm_packet->seqno),
@@ -171,8 +170,7 @@
directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
- pr_err("Error - can't forward packet: incoming iface not "
- "specified\n");
+ pr_err("Error - can't forward packet: incoming iface not specified\n");
goto out;
}
@@ -193,8 +191,7 @@
/* FIXME: what about aggregated packets ? */
bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %d, TTL %d) "
- "on interface %s [%pM]\n",
+ "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n",
(forw_packet->own ? "Sending own" : "Forwarding"),
batman_ogm_packet->orig,
ntohl(batman_ogm_packet->seqno),
@@ -508,8 +505,7 @@
batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq_orig: %i, tq_avg: %i, "
- "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
+ "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
batman_ogm_packet->header.ttl);
@@ -589,8 +585,8 @@
struct hlist_node *node;
uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
- bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
- "Searching and updating originator entry of received packet\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "update_originator(): Searching and updating originator entry of received packet\n");
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
@@ -783,8 +779,7 @@
* information */
tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
- /*
- * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
+ /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
* affect the nearly-symmetric links only a little, but
* punishes asymmetric links more. This will give a value
* between 0 and TQ_MAX_VALUE
@@ -802,10 +797,7 @@
(TQ_MAX_VALUE * TQ_MAX_VALUE));
bat_dbg(DBG_BATMAN, bat_priv,
- "bidirectional: "
- "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
- "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
- "total tq: %3i\n",
+ "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
orig_node->orig, orig_neigh_node->orig, total_count,
neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
@@ -933,9 +925,7 @@
batman_ogm_packet->orig) ? 1 : 0);
bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
- "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
- "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
@@ -978,16 +968,15 @@
if (is_my_addr) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: received my own broadcast (sender: %pM"
- ")\n",
+ "Drop packet: received my own broadcast (sender: %pM)\n",
ethhdr->h_source);
return;
}
if (is_broadcast) {
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "ignoring all packets with broadcast source addr (sender: %pM"
- ")\n", ethhdr->h_source);
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
@@ -1017,16 +1006,16 @@
spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
}
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "originator packet from myself (via neighbor)\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from myself (via neighbor)\n");
orig_node_free_ref(orig_neigh_node);
return;
}
if (is_my_oldorig) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast echos (sender: "
- "%pM)\n", ethhdr->h_source);
+ "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
@@ -1039,8 +1028,8 @@
if (is_duplicate == -1) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: packet within seqno protection time "
- "(sender: %pM)\n", ethhdr->h_source);
+ "Drop packet: packet within seqno protection time (sender: %pM)\n",
+ ethhdr->h_source);
goto out;
}
@@ -1061,8 +1050,8 @@
batman_ogm_packet->prev_sender)) &&
(compare_eth(router->addr, router_router->addr))) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast packets that "
- "may make me loop (sender: %pM)\n", ethhdr->h_source);
+ "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
+ ethhdr->h_source);
goto out;
}
@@ -1106,8 +1095,8 @@
bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1, if_incoming);
- bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
- "rebroadcast neighbor packet with direct link flag\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
goto out_neigh;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index b00101d..68ff759 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -255,8 +255,8 @@
buff[count - 1] = '\0';
bat_info(net_dev,
- "Invalid parameter for 'vis mode' setting received: "
- "%s\n", buff);
+ "Invalid parameter for 'vis mode' setting received: %s\n",
+ buff);
return -EINVAL;
}
@@ -330,8 +330,8 @@
if (gw_mode_tmp < 0) {
bat_info(net_dev,
- "Invalid parameter for 'gw mode' setting received: "
- "%s\n", buff);
+ "Invalid parameter for 'gw mode' setting received: %s\n",
+ buff);
return -EINVAL;
}
@@ -502,8 +502,8 @@
buff[count - 1] = '\0';
if (strlen(buff) >= IFNAMSIZ) {
- pr_err("Invalid parameter for 'mesh_iface' setting received: "
- "interface name too long '%s'\n", buff);
+ pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
+ buff);
hardif_free_ref(hard_iface);
return -EINVAL;
}
@@ -677,8 +677,8 @@
hardif_free_ref(primary_if);
if (ret)
- bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send "
- "uevent for (%s,%s,%s) event (err: %d)\n",
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
uev_type_str[type], uev_action_str[action],
(action == UEV_DEL ? "NULL" : data), ret);
return ret;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 0fa8e2d..6f9b9b7 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -224,16 +224,13 @@
} else if ((!curr_gw) && (next_gw)) {
bat_dbg(DBG_BATMAN, bat_priv,
"Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
- next_gw->orig_node->orig,
- next_gw->orig_node->gw_flags,
+ next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
router->tq_avg);
throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
} else {
bat_dbg(DBG_BATMAN, bat_priv,
- "Changing route to gateway %pM "
- "(gw_flags: %i, tq: %i)\n",
- next_gw->orig_node->orig,
- next_gw->orig_node->gw_flags,
+ "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
router->tq_avg);
throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
}
@@ -287,8 +284,7 @@
goto out;
bat_dbg(DBG_BATMAN, bat_priv,
- "Restarting gateway selection: better gateway found (tq curr: "
- "%i, tq new: %i)\n",
+ "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
gw_tq_avg, orig_tq_avg);
deselect:
@@ -352,8 +348,7 @@
continue;
bat_dbg(DBG_BATMAN, bat_priv,
- "Gateway class of originator %pM changed from "
- "%i to %i\n",
+ "Gateway class of originator %pM changed from %i to %i\n",
orig_node->orig, gw_node->orig_node->gw_flags,
new_gwflags);
@@ -474,23 +469,23 @@
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
- "specify interfaces to enable it\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
net_dev->name);
goto out;
}
if (primary_if->if_status != IF_ACTIVE) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
- "primary interface not active\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
- seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
- "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
- "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
- "outgoingIF", SOURCE_VERSION, primary_if->net_dev->name,
+ seq_printf(seq,
+ " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+ "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
+ SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
rcu_read_lock();
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 3ccb9c8..ca57ac7 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -125,8 +125,8 @@
ret = kstrtol(slash_ptr + 1, 10, &lup);
if (ret) {
bat_err(net_dev,
- "Upload speed of gateway mode invalid: "
- "%s\n", slash_ptr + 1);
+ "Upload speed of gateway mode invalid: %s\n",
+ slash_ptr + 1);
return false;
}
@@ -163,8 +163,8 @@
gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
gw_deselect(bat_priv);
- bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' "
- "(propagating: %d%s/%d%s)\n",
+ bat_info(net_dev,
+ "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
(down > 2048 ? down / 1024 : down),
(down > 2048 ? "MBit" : "KBit"),
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 409d027..3778977 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -175,11 +175,9 @@
net_dev->dev_addr))
continue;
- pr_warning("The newly added mac address (%pM) already exists "
- "on: %s\n", net_dev->dev_addr,
- hard_iface->net_dev->name);
- pr_warning("It is strongly recommended to keep mac addresses "
- "unique to avoid problems!\n");
+ pr_warning("The newly added mac address (%pM) already exists on: %s\n",
+ net_dev->dev_addr, hard_iface->net_dev->name);
+ pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
}
rcu_read_unlock();
}
@@ -282,10 +280,7 @@
/* hard-interface is part of a bridge */
if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
- pr_err("You are about to enable batman-adv on '%s' which "
- "already is part of a bridge. Unless you know exactly "
- "what you are doing this is probably wrong and won't "
- "work the way you think it would.\n",
+ pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n",
hard_iface->net_dev->name);
soft_iface = dev_get_by_name(&init_net, iface_name);
@@ -303,8 +298,7 @@
}
if (!softif_is_valid(soft_iface)) {
- pr_err("Can't create batman mesh interface %s: "
- "already exists as regular interface\n",
+ pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
soft_iface->name);
dev_put(soft_iface);
ret = -EINVAL;
@@ -317,8 +311,9 @@
bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
if (!hard_iface->packet_buff) {
- bat_err(hard_iface->soft_iface, "Can't add interface packet "
- "(%s): out of memory\n", hard_iface->net_dev->name);
+ bat_err(hard_iface->soft_iface,
+ "Can't add interface packet (%s): out of memory\n",
+ hard_iface->net_dev->name);
ret = -ENOMEM;
goto err;
}
@@ -341,29 +336,22 @@
if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
ETH_DATA_LEN + BAT_HEADER_LEN)
bat_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle "
- "the transport of batman-adv packets. Packets going "
- "over this interface will be fragmented on layer2 "
- "which could impact the performance. Setting the MTU "
- "to %zi would solve the problem.\n",
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
ETH_DATA_LEN + BAT_HEADER_LEN);
if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
ETH_DATA_LEN + BAT_HEADER_LEN)
bat_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle "
- "the transport of batman-adv packets. If you "
- "experience problems getting traffic through try "
- "increasing the MTU to %zi.\n",
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
ETH_DATA_LEN + BAT_HEADER_LEN);
if (hardif_is_iface_up(hard_iface))
hardif_activate_interface(hard_iface);
else
- bat_err(hard_iface->soft_iface, "Not using interface %s "
- "(retrying later): interface not active\n",
+ bat_err(hard_iface->soft_iface,
+ "Not using interface %s (retrying later): interface not active\n",
hard_iface->net_dev->name);
/* begin scheduling originator messages on that interface */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 9b755f9..b87518e 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -59,8 +59,7 @@
}
if (i == ARRAY_SIZE(socket_client_hash)) {
- pr_err("Error - can't add another packet client: "
- "maximum number of clients reached\n");
+ pr_err("Error - can't add another packet client: maximum number of clients reached\n");
kfree(socket_client);
return -EXFULL;
}
@@ -162,8 +161,7 @@
if (len < sizeof(struct icmp_packet)) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: "
- "invalid packet size\n");
+ "Error - can't send packet from char device: invalid packet size\n");
return -EINVAL;
}
@@ -193,16 +191,14 @@
if (icmp_packet->header.packet_type != BAT_ICMP) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: "
- "got bogus packet type (expected: BAT_ICMP)\n");
+ "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
goto free_skb;
}
if (icmp_packet->msg_type != ECHO_REQUEST) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: "
- "got bogus message type (expected: ECHO_REQUEST)\n");
+ "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
len = -EINVAL;
goto free_skb;
}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 08f3b3a..6d51caa 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -64,8 +64,8 @@
register_netdevice_notifier(&hard_if_notifier);
- pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) "
- "loaded\n", SOURCE_VERSION, COMPAT_VERSION);
+ pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
+ SOURCE_VERSION, COMPAT_VERSION);
return 0;
}
@@ -201,8 +201,8 @@
bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
if (bat_algo_ops_tmp) {
- pr_info("Trying to register already registered routing "
- "algorithm: %s\n", bat_algo_ops->name);
+ pr_info("Trying to register already registered routing algorithm: %s\n",
+ bat_algo_ops->name);
goto out;
}
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 1468788..94fa1c2 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -107,9 +107,7 @@
#define GW_THRESHOLD 50
-/*
- * Debug Messages
- */
+/* Debug Messages */
#ifdef pr_fmt
#undef pr_fmt
#endif
@@ -124,14 +122,7 @@
DBG_ALL = 7
};
-
-/*
- * Vis
- */
-
-/*
- * Kernel headers
- */
+/* Kernel headers */
#include <linux/mutex.h> /* mutex */
#include <linux/module.h> /* needed by all modules */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 371cc93..43c0a4f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -294,14 +294,12 @@
(neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED))
bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor purge: originator %pM, "
- "neighbor: %pM, iface: %s\n",
+ "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
orig_node->orig, neigh_node->addr,
neigh_node->if_incoming->net_dev->name);
else
bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor timeout: originator %pM, "
- "neighbor: %pM, last_valid: %lu\n",
+ "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n",
orig_node->orig, neigh_node->addr,
(neigh_node->last_valid / HZ));
@@ -416,15 +414,15 @@
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
net_dev->name);
goto out;
}
if (primary_if->if_status != IF_ACTIVE) {
- ret = seq_printf(seq, "BATMAN mesh %s "
- "disabled - primary interface not active\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index f535155..7f8e158 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -83,8 +83,7 @@
/* route changed */
} else if (neigh_node && curr_router) {
bat_dbg(DBG_ROUTES, bat_priv,
- "Changing route towards: %pM "
- "(now via %pM - was via %pM)\n",
+ "Changing route towards: %pM (now via %pM - was via %pM)\n",
orig_node->orig, neigh_node->addr,
curr_router->addr);
}
@@ -238,8 +237,9 @@
"old packet received, start protection\n");
return 0;
- } else
+ } else {
return 1;
+ }
}
return 0;
}
@@ -345,9 +345,8 @@
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
- pr_debug("Warning - can't forward icmp packet from %pM to "
- "%pM: ttl exceeded\n", icmp_packet->orig,
- icmp_packet->dst);
+ pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
+ icmp_packet->orig, icmp_packet->dst);
goto out;
}
@@ -674,9 +673,9 @@
if (!orig_node)
goto out;
- bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
- "(client %pM)\n", roam_adv_packet->src,
- roam_adv_packet->client);
+ bat_dbg(DBG_TT, bat_priv,
+ "Received ROAMING_ADV from %pM (client %pM)\n",
+ roam_adv_packet->src, roam_adv_packet->client);
tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
atomic_read(&orig_node->last_ttvn) + 1, true, false);
@@ -813,9 +812,8 @@
/* TTL exceeded */
if (unicast_packet->header.ttl < 2) {
- pr_debug("Warning - can't forward unicast packet from %pM to "
- "%pM: ttl exceeded\n", ethhdr->h_source,
- unicast_packet->dest);
+ pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
+ ethhdr->h_source, unicast_packet->dest);
goto out;
}
@@ -934,10 +932,10 @@
orig_node_free_ref(orig_node);
}
- bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
- "new_ttvn %u)! Rerouting unicast packet (for %pM) to "
- "%pM\n", unicast_packet->ttvn, curr_ttvn,
- ethhdr->h_dest, unicast_packet->dest);
+ bat_dbg(DBG_ROUTES, bat_priv,
+ "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
+ unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
+ unicast_packet->dest);
unicast_packet->ttvn = curr_ttvn;
}
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 4137580..af7a674 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -45,8 +45,8 @@
goto send_skb_err;
if (!(hard_iface->net_dev->flags & IFF_UP)) {
- pr_warning("Interface %s is not up - can't send packet via "
- "that interface!\n", hard_iface->net_dev->name);
+ pr_warning("Interface %s is not up - can't send packet via that interface!\n",
+ hard_iface->net_dev->name);
goto send_skb_err;
}
@@ -56,7 +56,7 @@
skb_reset_mac_header(skb);
- ethhdr = (struct ethhdr *) skb_mac_header(skb);
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 8fb16d2..a5590f4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -252,8 +252,8 @@
vid, curr_neigh->addr);
else if ((curr_neigh) && (new_neigh))
bat_dbg(DBG_ROUTES, bat_priv,
- "Changing mesh exit point on vid: %d from %pM "
- "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr);
+ "Changing mesh exit point on vid: %d from %pM to %pM.\n",
+ vid, curr_neigh->addr, new_neigh->addr);
else if ((!curr_neigh) && (new_neigh))
bat_dbg(DBG_ROUTES, bat_priv,
"Setting mesh exit point on vid: %d to %pM.\n",
@@ -327,15 +327,15 @@
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
net_dev->name);
goto out;
}
if (primary_if->if_status != IF_ACTIVE) {
- ret = seq_printf(seq, "BATMAN mesh %s "
- "disabled - primary interface not active\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
@@ -403,8 +403,7 @@
if (curr_softif_neigh == softif_neigh) {
bat_dbg(DBG_ROUTES, bat_priv,
- "Current mesh exit point on vid: %d "
- "'%pM' vanished.\n",
+ "Current mesh exit point on vid: %d '%pM' vanished.\n",
softif_neigh_vid->vid,
softif_neigh->addr);
do_deselect = 1;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c950705..1f86921 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -309,21 +309,21 @@
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
net_dev->name);
goto out;
}
if (primary_if->if_status != IF_ACTIVE) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
- "primary interface not active\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
- seq_printf(seq, "Locally retrieved addresses (from %s) "
- "announced via TT (TTVN: %u):\n",
+ seq_printf(seq,
+ "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
for (i = 0; i < hash->size; i++) {
@@ -365,8 +365,9 @@
* response issued before the net ttvn increment (consistency check) */
tt_local_entry->common.flags |= TT_CLIENT_PENDING;
- bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
- "%s\n", tt_local_entry->common.addr, message);
+ bat_dbg(DBG_TT, bat_priv,
+ "Local tt entry (%pM) pending to be removed: %s\n",
+ tt_local_entry->common.addr, message);
}
void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -574,15 +575,15 @@
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
- "specify interfaces to enable it\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
net_dev->name);
goto out;
}
if (primary_if->if_status != IF_ACTIVE) {
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
- "primary interface not active\n",
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
@@ -602,18 +603,18 @@
tt_global_entry = container_of(tt_common_entry,
struct tt_global_entry,
common);
- seq_printf(seq, " * %pM (%3u) via %pM (%3u) "
- "[%c%c]\n",
- tt_global_entry->common.addr,
- tt_global_entry->ttvn,
- tt_global_entry->orig_node->orig,
- (uint8_t) atomic_read(
+ seq_printf(seq,
+ " * %pM (%3u) via %pM (%3u) [%c%c]\n",
+ tt_global_entry->common.addr,
+ tt_global_entry->ttvn,
+ tt_global_entry->orig_node->orig,
+ (uint8_t) atomic_read(
&tt_global_entry->orig_node->
last_ttvn),
- (tt_global_entry->common.flags &
- TT_CLIENT_ROAM ? 'R' : '.'),
- (tt_global_entry->common.flags &
- TT_CLIENT_WIFI ? 'W' : '.'));
+ (tt_global_entry->common.flags &
+ TT_CLIENT_ROAM ? 'R' : '.'),
+ (tt_global_entry->common.flags &
+ TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
@@ -710,8 +711,7 @@
common);
if (tt_global_entry->orig_node == orig_node) {
bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM "
- "(via %pM): %s\n",
+ "Deleting global tt entry %pM (via %pM): %s\n",
tt_global_entry->common.addr,
tt_global_entry->orig_node->orig,
message);
@@ -751,8 +751,8 @@
TT_CLIENT_ROAM_TIMEOUT))
continue;
- bat_dbg(DBG_TT, bat_priv, "Deleting global "
- "tt entry (%pM): Roaming timeout\n",
+ bat_dbg(DBG_TT, bat_priv,
+ "Deleting global tt entry (%pM): Roaming timeout\n",
tt_global_entry->common.addr);
atomic_dec(&tt_global_entry->orig_node->tt_size);
hlist_del_rcu(node);
@@ -1134,8 +1134,9 @@
if (!neigh_node)
goto out;
- bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
- "[%c]\n", dst_orig_node->orig, neigh_node->addr,
+ bat_dbg(DBG_TT, bat_priv,
+ "Sending TT_REQUEST to %pM via %pM [%c]\n",
+ dst_orig_node->orig, neigh_node->addr,
(full_table ? 'F' : '.'));
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
@@ -1172,9 +1173,8 @@
struct tt_query_packet *tt_response;
bat_dbg(DBG_TT, bat_priv,
- "Received TT_REQUEST from %pM for "
- "ttvn: %u (%pM) [%c]\n", tt_request->src,
- tt_request->ttvn, tt_request->dst,
+ "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
+ tt_request->src, tt_request->ttvn, tt_request->dst,
(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
@@ -1299,9 +1299,8 @@
struct tt_query_packet *tt_response;
bat_dbg(DBG_TT, bat_priv,
- "Received TT_REQUEST from %pM for "
- "ttvn: %u (me) [%c]\n", tt_request->src,
- tt_request->ttvn,
+ "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
+ tt_request->src, tt_request->ttvn,
(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
@@ -1504,10 +1503,9 @@
struct tt_req_node *node, *safe;
struct orig_node *orig_node = NULL;
- bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
- "ttvn %d t_size: %d [%c]\n",
- tt_response->src, tt_response->ttvn,
- tt_response->tt_data,
+ bat_dbg(DBG_TT, bat_priv,
+ "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
+ tt_response->src, tt_response->ttvn, tt_response->tt_data,
(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
orig_node = orig_hash_find(bat_priv, tt_response->src);
@@ -1771,8 +1769,9 @@
if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
continue;
- bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
- "(%pM): pending\n", tt_common_entry->addr);
+ bat_dbg(DBG_TT, bat_priv,
+ "Deleting local tt entry (%pM): pending\n",
+ tt_common_entry->addr);
atomic_dec(&bat_priv->num_local_tt);
hlist_del_rcu(node);
@@ -1877,12 +1876,10 @@
if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
orig_node->tt_crc != tt_crc) {
request_table:
- bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
- "Need to retrieve the correct information "
- "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
- "%u num_changes: %u)\n", orig_node->orig, ttvn,
- orig_ttvn, tt_crc, orig_node->tt_crc,
- tt_num_changes);
+ bat_dbg(DBG_TT, bat_priv,
+ "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
+ orig_node->orig, ttvn, orig_ttvn, tt_crc,
+ orig_node->tt_crc, tt_num_changes);
send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
full_table);
return;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 0897dfa..676f6a6 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -66,7 +66,7 @@
kfree_skb(tmp_skb);
memmove(skb->data + uni_diff, skb->data, hdr_len);
- unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff);
+ unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff);
unicast_packet->header.packet_type = BAT_UNICAST;
return skb;
@@ -238,7 +238,7 @@
goto dropped;
skb_reserve(frag_skb, ucf_hdr_len);
- unicast_packet = (struct unicast_packet *) skb->data;
+ unicast_packet = (struct unicast_packet *)skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 9ec85eb..3537d38 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -29,7 +29,6 @@
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
HIDP Module (Human Interface Device Protocol)
- SMP Module (Security Manager Protocol)
Say Y here to compile Bluetooth support into the kernel or say M to
compile it as module (bluetooth).
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 17800b1..9f9c8dc 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -143,10 +143,10 @@
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -157,7 +157,7 @@
err = bnep_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 3f2dd5c..1230faa 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -137,10 +137,10 @@
{
if (cmd == CMTPGETCONNLIST) {
struct cmtp_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -151,7 +151,7 @@
err = cmtp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 07bc69e..947172b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -35,7 +35,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
-#include <linux/notifier.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -51,7 +50,7 @@
struct hci_cp_le_create_conn cp;
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
conn->sec_level = BT_SECURITY_LOW;
@@ -80,10 +79,10 @@
struct inquiry_entry *ie;
struct hci_cp_create_conn cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->link_mode = HCI_LM_MASTER;
@@ -105,7 +104,8 @@
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
- conn->ssp_mode = ie->data.ssp_mode;
+ if (ie->data.ssp_mode > 0)
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -151,7 +151,7 @@
BT_DBG("%p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->attempt++;
@@ -169,7 +169,7 @@
BT_DBG("%p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->attempt++;
@@ -279,16 +279,13 @@
{
struct hci_conn *conn = container_of(work, struct hci_conn,
disc_work.work);
- struct hci_dev *hdev = conn->hdev;
__u8 reason;
- BT_DBG("conn %p state %d", conn, conn->state);
+ BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
if (atomic_read(&conn->refcnt))
return;
- hci_dev_lock(hdev);
-
switch (conn->state) {
case BT_CONNECT:
case BT_CONNECT2:
@@ -308,8 +305,6 @@
conn->state = BT_CLOSED;
break;
}
-
- hci_dev_unlock(hdev);
}
/* Enter sniff mode */
@@ -337,7 +332,7 @@
hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
}
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
struct hci_cp_sniff_mode cp;
cp.handle = cpu_to_le16(conn->handle);
cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
@@ -372,7 +367,7 @@
BT_DBG("%s dst %s", hdev->name, batostr(dst));
- conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
+ conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
if (!conn)
return NULL;
@@ -386,7 +381,7 @@
conn->remote_auth = 0xff;
conn->key_type = 0xff;
- conn->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
switch (type) {
@@ -407,7 +402,7 @@
skb_queue_head_init(&conn->data_q);
- INIT_LIST_HEAD(&conn->chan_list);;
+ INIT_LIST_HEAD(&conn->chan_list);
INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
@@ -555,7 +550,7 @@
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst);
if (!acl)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
hci_conn_hold(acl);
@@ -575,7 +570,7 @@
sco = hci_conn_add(hdev, type, dst);
if (!sco) {
hci_conn_put(acl);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
}
@@ -586,12 +581,12 @@
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
- acl->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
- if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
+ if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
/* defer SCO setup until mode change completed */
- set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
+ set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
return sco;
}
@@ -607,8 +602,7 @@
{
BT_DBG("conn %p", conn);
- if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
- !(conn->link_mode & HCI_LM_ENCRYPT))
+ if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
return 0;
return 1;
@@ -633,17 +627,17 @@
conn->auth_type = auth_type;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
/* encrypt must be pending if auth is also pending */
- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
if (conn->key_type != 0xff)
- set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
+ set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
}
return 0;
@@ -654,7 +648,7 @@
{
BT_DBG("conn %p", conn);
- if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = cpu_to_le16(conn->handle);
cp.encrypt = 0x01;
@@ -674,8 +668,7 @@
/* For non 2.1 devices and low security level we don't need the link
key. */
- if (sec_level == BT_SECURITY_LOW &&
- (!conn->ssp_mode || !conn->hdev->ssp_mode))
+ if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
return 1;
/* For other security levels we need the link key. */
@@ -704,7 +697,7 @@
goto encrypt;
auth:
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return 0;
if (!hci_conn_auth(conn, sec_level, auth_type))
@@ -739,7 +732,7 @@
{
BT_DBG("conn %p", conn);
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
@@ -758,7 +751,7 @@
if (!role && conn->link_mode & HCI_LM_MASTER)
return 1;
- if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
struct hci_cp_switch_role cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.role = role;
@@ -782,10 +775,10 @@
if (conn->mode != HCI_CM_SNIFF)
goto timer;
- if (!conn->power_save && !force_active)
+ if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
goto timer;
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
struct hci_cp_exit_sniff_mode cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
@@ -801,11 +794,11 @@
void hci_conn_hash_flush(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
+ struct hci_conn *c, *n;
BT_DBG("hdev %s", hdev->name);
- list_for_each_entry_rcu(c, &h->list, list) {
+ list_for_each_entry_safe(c, n, &h->list, list) {
c->state = BT_CLOSED;
hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
@@ -950,7 +943,7 @@
BT_DBG("%s conn %p", hdev->name, conn);
- chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+ chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
if (!chan)
return NULL;
@@ -981,10 +974,10 @@
void hci_chan_list_flush(struct hci_conn *conn)
{
- struct hci_chan *chan;
+ struct hci_chan *chan, *n;
BT_DBG("conn %p", conn);
- list_for_each_entry_rcu(chan, &conn->chan_list, list)
+ list_for_each_entry_safe(chan, n, &conn->chan_list, list)
hci_chan_del(chan);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 5aeb624..59ec99e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -40,7 +40,6 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
-#include <linux/notifier.h>
#include <linux/rfkill.h>
#include <linux/timer.h>
#include <linux/crypto.h>
@@ -55,8 +54,6 @@
#define AUTO_OFF_TIMEOUT 2000
-bool enable_hs;
-
static void hci_rx_work(struct work_struct *work);
static void hci_cmd_work(struct work_struct *work);
static void hci_tx_work(struct work_struct *work);
@@ -69,24 +66,11 @@
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
-/* HCI notifiers list */
-static ATOMIC_NOTIFIER_HEAD(hci_notifier);
-
/* ---- HCI notifications ---- */
-int hci_register_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&hci_notifier, nb);
-}
-
-int hci_unregister_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&hci_notifier, nb);
-}
-
static void hci_notify(struct hci_dev *hdev, int event)
{
- atomic_notifier_call_chain(&hci_notifier, event, hdev);
+ hci_sock_dev_event(hdev, event);
}
/* ---- HCI requests ---- */
@@ -98,8 +82,28 @@
/* If this is the init phase check if the completed command matches
* the last init command, and if not just return.
*/
- if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
+ if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
+ struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ struct sk_buff *skb;
+
+ /* Some CSR based controllers generate a spontaneous
+ * reset complete event during init and any pending
+ * command will never be completed. In such a case we
+ * need to resend whatever was the last sent
+ * command.
+ */
+
+ if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
+ return;
+
+ skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
+ if (skb) {
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ }
+
return;
+ }
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -355,72 +359,209 @@
}
/* ---- Inquiry support ---- */
+
+bool hci_discovery_active(struct hci_dev *hdev)
+{
+ struct discovery_state *discov = &hdev->discovery;
+
+ switch (discov->state) {
+ case DISCOVERY_FINDING:
+ case DISCOVERY_RESOLVING:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+void hci_discovery_set_state(struct hci_dev *hdev, int state)
+{
+ BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
+
+ if (hdev->discovery.state == state)
+ return;
+
+ switch (state) {
+ case DISCOVERY_STOPPED:
+ if (hdev->discovery.state != DISCOVERY_STARTING)
+ mgmt_discovering(hdev, 0);
+ hdev->discovery.type = 0;
+ break;
+ case DISCOVERY_STARTING:
+ break;
+ case DISCOVERY_FINDING:
+ mgmt_discovering(hdev, 1);
+ break;
+ case DISCOVERY_RESOLVING:
+ break;
+ case DISCOVERY_STOPPING:
+ break;
+ }
+
+ hdev->discovery.state = state;
+}
+
static void inquiry_cache_flush(struct hci_dev *hdev)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *next = cache->list, *e;
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *p, *n;
- BT_DBG("cache %p", cache);
-
- cache->list = NULL;
- while ((e = next)) {
- next = e->next;
- kfree(e);
+ list_for_each_entry_safe(p, n, &cache->all, all) {
+ list_del(&p->all);
+ kfree(p);
}
+
+ INIT_LIST_HEAD(&cache->unknown);
+ INIT_LIST_HEAD(&cache->resolve);
}
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
- for (e = cache->list; e; e = e->next)
+ list_for_each_entry(e, &cache->all, all) {
if (!bacmp(&e->data.bdaddr, bdaddr))
- break;
- return e;
+ return e;
+ }
+
+ return NULL;
}
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+
+ list_for_each_entry(e, &cache->unknown, list) {
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
+
+ list_for_each_entry(e, &cache->resolve, list) {
+ if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
+ return e;
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct list_head *pos = &cache->resolve;
+ struct inquiry_entry *p;
+
+ list_del(&ie->list);
+
+ list_for_each_entry(p, &cache->resolve, list) {
+ if (p->name_state != NAME_PENDING &&
+ abs(p->data.rssi) >= abs(ie->data.rssi))
+ break;
+ pos = &p->list;
+ }
+
+ list_add(&ie->list, pos);
+}
+
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known, bool *ssp)
+{
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *ie;
BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
- ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
- if (!ie) {
- /* Entry not in the cache. Add new one. */
- ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
- if (!ie)
- return;
+ if (ssp)
+ *ssp = data->ssp_mode;
- ie->next = cache->list;
- cache->list = ie;
+ ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
+ if (ie) {
+ if (ie->data.ssp_mode && ssp)
+ *ssp = true;
+
+ if (ie->name_state == NAME_NEEDED &&
+ data->rssi != ie->data.rssi) {
+ ie->data.rssi = data->rssi;
+ hci_inquiry_cache_update_resolve(hdev, ie);
+ }
+
+ goto update;
+ }
+
+ /* Entry not in the cache. Add new one. */
+ ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+ if (!ie)
+ return false;
+
+ list_add(&ie->all, &cache->all);
+
+ if (name_known) {
+ ie->name_state = NAME_KNOWN;
+ } else {
+ ie->name_state = NAME_NOT_KNOWN;
+ list_add(&ie->list, &cache->unknown);
+ }
+
+update:
+ if (name_known && ie->name_state != NAME_KNOWN &&
+ ie->name_state != NAME_PENDING) {
+ ie->name_state = NAME_KNOWN;
+ list_del(&ie->list);
}
memcpy(&ie->data, data, sizeof(*data));
ie->timestamp = jiffies;
cache->timestamp = jiffies;
+
+ if (ie->name_state == NAME_NOT_KNOWN)
+ return false;
+
+ return true;
}
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_info *info = (struct inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
- for (e = cache->list; e && copied < num; e = e->next, copied++) {
+ list_for_each_entry(e, &cache->all, all) {
struct inquiry_data *data = &e->data;
+
+ if (copied >= num)
+ break;
+
bacpy(&info->bdaddr, &data->bdaddr);
info->pscan_rep_mode = data->pscan_rep_mode;
info->pscan_period_mode = data->pscan_period_mode;
info->pscan_mode = data->pscan_mode;
memcpy(info->dev_class, data->dev_class, 3);
info->clock_offset = data->clock_offset;
+
info++;
+ copied++;
}
BT_DBG("cache %p, copied %d", cache, copied);
@@ -567,7 +708,7 @@
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->flags)) {
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
hci_dev_lock(hdev);
mgmt_powered(hdev, 1);
hci_dev_unlock(hdev);
@@ -603,6 +744,8 @@
{
BT_DBG("%s %p", hdev->name, hdev);
+ cancel_work_sync(&hdev->le_scan);
+
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
@@ -619,14 +762,14 @@
if (hdev->discov_timeout > 0) {
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
}
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
- cancel_delayed_work(&hdev->power_off);
-
- if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
cancel_delayed_work(&hdev->service_cache);
+ cancel_delayed_work_sync(&hdev->le_scan_disable);
+
hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
@@ -667,13 +810,18 @@
* and no tasks are scheduled. */
hdev->close(hdev);
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 0);
- hci_dev_unlock(hdev);
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
+ }
/* Clear flags */
hdev->flags = 0;
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+
hci_req_unlock(hdev);
hci_dev_put(hdev);
@@ -688,7 +836,12 @@
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
+
err = hci_dev_do_close(hdev);
+
hci_dev_put(hdev);
return err;
}
@@ -847,11 +1000,11 @@
read_lock(&hci_dev_list_lock);
list_for_each_entry(hdev, &hci_dev_list, list) {
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
cancel_delayed_work(&hdev->power_off);
- if (!test_bit(HCI_MGMT, &hdev->flags))
- set_bit(HCI_PAIRABLE, &hdev->flags);
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
@@ -883,11 +1036,11 @@
if (!hdev)
return -ENODEV;
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
cancel_delayed_work_sync(&hdev->power_off);
- if (!test_bit(HCI_MGMT, &hdev->flags))
- set_bit(HCI_PAIRABLE, &hdev->flags);
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
@@ -967,11 +1120,11 @@
if (hci_dev_open(hdev->id) < 0)
return;
- if (test_bit(HCI_AUTO_OFF, &hdev->flags))
+ if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
schedule_delayed_work(&hdev->power_off,
msecs_to_jiffies(AUTO_OFF_TIMEOUT));
- if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
+ if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
mgmt_index_added(hdev);
}
@@ -982,9 +1135,7 @@
BT_DBG("%s", hdev->name);
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
-
- hci_dev_close(hdev->id);
+ hci_dev_do_close(hdev);
}
static void hci_discov_off(struct work_struct *work)
@@ -1037,6 +1188,18 @@
return 0;
}
+int hci_smp_ltks_clear(struct hci_dev *hdev)
+{
+ struct smp_ltk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ list_del(&k->list);
+ kfree(k);
+ }
+
+ return 0;
+}
+
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct link_key *k;
@@ -1084,44 +1247,38 @@
return 0;
}
-struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
{
- struct link_key *k;
+ struct smp_ltk *k;
- list_for_each_entry(k, &hdev->link_keys, list) {
- struct key_master_id *id;
-
- if (k->type != HCI_LK_SMP_LTK)
+ list_for_each_entry(k, &hdev->long_term_keys, list) {
+ if (k->ediv != ediv ||
+ memcmp(rand, k->rand, sizeof(k->rand)))
continue;
- if (k->dlen != sizeof(*id))
- continue;
-
- id = (void *) &k->data;
- if (id->ediv == ediv &&
- (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
- return k;
+ return k;
}
return NULL;
}
EXPORT_SYMBOL(hci_find_ltk);
-struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 type)
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type)
{
- struct link_key *k;
+ struct smp_ltk *k;
- list_for_each_entry(k, &hdev->link_keys, list)
- if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
+ list_for_each_entry(k, &hdev->long_term_keys, list)
+ if (addr_type == k->bdaddr_type &&
+ bacmp(bdaddr, &k->bdaddr) == 0)
return k;
return NULL;
}
-EXPORT_SYMBOL(hci_find_link_key_type);
+EXPORT_SYMBOL(hci_find_ltk_by_addr);
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
- bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
+ bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
u8 old_key_type, persistent;
@@ -1175,40 +1332,39 @@
return 0;
}
-int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
- u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
+int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
+ ediv, u8 rand[8])
{
- struct link_key *key, *old_key;
- struct key_master_id *id;
- u8 old_key_type;
+ struct smp_ltk *key, *old_key;
- BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
+ if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
+ return 0;
- old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
- if (old_key) {
+ old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
+ if (old_key)
key = old_key;
- old_key_type = old_key->type;
- } else {
- key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
+ else {
+ key = kzalloc(sizeof(*key), GFP_ATOMIC);
if (!key)
return -ENOMEM;
- list_add(&key->list, &hdev->link_keys);
- old_key_type = 0xff;
+ list_add(&key->list, &hdev->long_term_keys);
}
- key->dlen = sizeof(*id);
-
bacpy(&key->bdaddr, bdaddr);
- memcpy(key->val, ltk, sizeof(key->val));
- key->type = HCI_LK_SMP_LTK;
- key->pin_len = key_size;
+ key->bdaddr_type = addr_type;
+ memcpy(key->val, tk, sizeof(key->val));
+ key->authenticated = authenticated;
+ key->ediv = ediv;
+ key->enc_size = enc_size;
+ key->type = type;
+ memcpy(key->rand, rand, sizeof(key->rand));
- id = (void *) &key->data;
- id->ediv = ediv;
- memcpy(id->rand, rand, sizeof(id->rand));
+ if (!new_key)
+ return 0;
- if (new_key)
- mgmt_new_link_key(hdev, key, old_key_type);
+ if (type & HCI_SMP_LTK)
+ mgmt_new_ltk(hdev, key, 1);
return 0;
}
@@ -1229,6 +1385,23 @@
return 0;
}
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct smp_ltk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr))
+ continue;
+
+ BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+
+ list_del(&k->list);
+ kfree(k);
+ }
+
+ return 0;
+}
+
/* HCI command timer function */
static void hci_cmd_timer(unsigned long arg)
{
@@ -1240,7 +1413,7 @@
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
+ bdaddr_t *bdaddr)
{
struct oob_data *data;
@@ -1280,7 +1453,7 @@
}
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
- u8 *randomizer)
+ u8 *randomizer)
{
struct oob_data *data;
@@ -1303,8 +1476,7 @@
return 0;
}
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *b;
@@ -1331,7 +1503,7 @@
return 0;
}
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list *entry;
@@ -1349,10 +1521,10 @@
list_add(&entry->list, &hdev->blacklist);
- return mgmt_device_blocked(hdev, bdaddr);
+ return mgmt_device_blocked(hdev, bdaddr, type);
}
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list *entry;
@@ -1366,13 +1538,13 @@
list_del(&entry->list);
kfree(entry);
- return mgmt_device_unblocked(hdev, bdaddr);
+ return mgmt_device_unblocked(hdev, bdaddr, type);
}
static void hci_clear_adv_cache(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_work.work);
+ adv_work.work);
hci_dev_lock(hdev);
@@ -1415,11 +1587,7 @@
}
int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev)
-{
- struct adv_entry *entry;
-
- if (!is_connectable_adv(ev->evt_type))
+ struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
return -EINVAL;
/* Only new entries should be added to adv_entries. So, if
@@ -1427,7 +1595,7 @@
if (hci_find_adv_entry(hdev, &ev->bdaddr))
return 0;
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1442,16 +1610,116 @@
return 0;
}
+static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
+{
+ struct le_scan_params *param = (struct le_scan_params *) opt;
+ struct hci_cp_le_set_scan_param cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.type = param->type;
+ cp.interval = cpu_to_le16(param->interval);
+ cp.window = cpu_to_le16(param->window);
+
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
+}
+
+static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
+{
+ struct hci_cp_le_set_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = 1;
+
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
+ u16 window, int timeout)
+{
+ long timeo = msecs_to_jiffies(3000);
+ struct le_scan_params param;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EINPROGRESS;
+
+ param.type = type;
+ param.interval = interval;
+ param.window = window;
+
+ hci_req_lock(hdev);
+
+ err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
+ timeo);
+ if (!err)
+ err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
+
+ hci_req_unlock(hdev);
+
+ if (err < 0)
+ return err;
+
+ schedule_delayed_work(&hdev->le_scan_disable,
+ msecs_to_jiffies(timeout));
+
+ return 0;
+}
+
+static void le_scan_disable_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_disable.work);
+ struct hci_cp_le_set_scan_enable cp;
+
+ BT_DBG("%s", hdev->name);
+
+ memset(&cp, 0, sizeof(cp));
+
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static void le_scan_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
+ struct le_scan_params *param = &hdev->le_scan_params;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_do_le_scan(hdev, param->type, param->interval, param->window,
+ param->timeout);
+}
+
+int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
+ int timeout)
+{
+ struct le_scan_params *param = &hdev->le_scan_params;
+
+ BT_DBG("%s", hdev->name);
+
+ if (work_busy(&hdev->le_scan))
+ return -EINPROGRESS;
+
+ param->type = type;
+ param->interval = interval;
+ param->window = window;
+ param->timeout = timeout;
+
+ queue_work(system_long_wq, &hdev->le_scan);
+
+ return 0;
+}
+
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head = &hci_dev_list, *p;
int i, id, error;
- BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
- hdev->bus, hdev->owner);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- if (!hdev->open || !hdev->close || !hdev->destruct)
+ if (!hdev->open || !hdev->close)
return -EINVAL;
/* Do not allow HCI_AMP devices to register at index 0,
@@ -1472,7 +1740,6 @@
hdev->id = id;
list_add_tail(&hdev->list, head);
- atomic_set(&hdev->refcnt, 1);
mutex_init(&hdev->lock);
hdev->flags = 0;
@@ -1503,7 +1770,7 @@
init_waitqueue_head(&hdev->req_wait_q);
mutex_init(&hdev->req_lock);
- inquiry_cache_init(hdev);
+ discovery_init(hdev);
hci_conn_hash_init(hdev);
@@ -1514,6 +1781,7 @@
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data);
@@ -1529,6 +1797,10 @@
atomic_set(&hdev->promisc, 0);
+ INIT_WORK(&hdev->le_scan, le_scan_work);
+
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+
write_unlock(&hci_dev_list_lock);
hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
@@ -1551,11 +1823,12 @@
}
}
- set_bit(HCI_AUTO_OFF, &hdev->flags);
- set_bit(HCI_SETUP, &hdev->flags);
+ set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ set_bit(HCI_SETUP, &hdev->dev_flags);
schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
+ hci_dev_hold(hdev);
return id;
@@ -1587,7 +1860,7 @@
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->flags)) {
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
@@ -1614,11 +1887,12 @@
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
+ hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
- __hci_dev_put(hdev);
+ hci_dev_put(hdev);
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1706,7 +1980,7 @@
while (count) {
scb = (void *) skb->cb;
- len = min(scb->expect, (__u16)count);
+ len = min_t(uint, scb->expect, count);
memcpy(skb_put(skb, len), data, len);
@@ -1862,11 +2136,15 @@
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
- if (atomic_read(&hdev->promisc)) {
- /* Time stamp */
- __net_timestamp(skb);
+ /* Time stamp */
+ __net_timestamp(skb);
- hci_send_to_sock(hdev, skb, NULL);
+ /* Send copy to monitor */
+ hci_send_to_monitor(hdev, skb);
+
+ if (atomic_read(&hdev->promisc)) {
+ /* Send copy to the sockets */
+ hci_send_to_sock(hdev, skb);
}
/* Get rid of skb owner, prior to sending to the driver. */
@@ -2235,26 +2513,31 @@
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_chan *chan;
- struct sk_buff *skb;
- int quote;
- unsigned int cnt;
+ /* Calculate count of blocks used by this packet */
+ return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+}
- BT_DBG("%s", hdev->name);
-
- if (!hci_conn_num(hdev, ACL_LINK))
- return;
-
+static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+{
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
- if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
+ if (!cnt && time_after(jiffies, hdev->acl_last_tx +
+ msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
hci_link_tx_to(hdev, ACL_LINK);
}
+}
- cnt = hdev->acl_cnt;
+static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
+{
+ unsigned int cnt = hdev->acl_cnt;
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+ int quote;
+
+ __check_timeout(hdev, cnt);
while (hdev->acl_cnt &&
(chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
@@ -2270,7 +2553,7 @@
skb = skb_dequeue(&chan->data_q);
hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
@@ -2285,6 +2568,70 @@
hci_prio_recalculate(hdev, ACL_LINK);
}
+static inline void hci_sched_acl_blk(struct hci_dev *hdev)
+{
+ unsigned int cnt = hdev->block_cnt;
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+ int quote;
+
+ __check_timeout(hdev, cnt);
+
+ while (hdev->block_cnt > 0 &&
+ (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
+ int blocks;
+
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
+
+ blocks = __get_blocks(hdev, skb);
+ if (blocks > hdev->block_cnt)
+ return;
+
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
+
+ hci_send_frame(skb);
+ hdev->acl_last_tx = jiffies;
+
+ hdev->block_cnt -= blocks;
+ quote -= blocks;
+
+ chan->sent += blocks;
+ chan->conn->sent += blocks;
+ }
+ }
+
+ if (cnt != hdev->block_cnt)
+ hci_prio_recalculate(hdev, ACL_LINK);
+}
+
+static inline void hci_sched_acl(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!hci_conn_num(hdev, ACL_LINK))
+ return;
+
+ switch (hdev->flow_ctl_mode) {
+ case HCI_FLOW_CTL_MODE_PACKET_BASED:
+ hci_sched_acl_pkt(hdev);
+ break;
+
+ case HCI_FLOW_CTL_MODE_BLOCK_BASED:
+ hci_sched_acl_blk(hdev);
+ break;
+ }
+}
+
/* Schedule SCO */
static inline void hci_sched_sco(struct hci_dev *hdev)
{
@@ -2482,9 +2829,12 @@
BT_DBG("%s", hdev->name);
while ((skb = skb_dequeue(&hdev->rx_q))) {
+ /* Send copy to monitor */
+ hci_send_to_monitor(hdev, skb);
+
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
- hci_send_to_sock(hdev, skb, NULL);
+ hci_send_to_sock(hdev, skb);
}
if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -2568,6 +2918,8 @@
if (test_bit(HCI_INQUIRY, &hdev->flags))
return -EINPROGRESS;
+ inquiry_cache_flush(hdev);
+
memset(&cp, 0, sizeof(cp));
memcpy(&cp.lap, lap, sizeof(cp.lap));
cp.length = length;
@@ -2584,6 +2936,3 @@
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
-
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 001307f..badb785 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -35,7 +35,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
-#include <linux/notifier.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -45,8 +44,6 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-static bool enable_le;
-
/* Handle HCI Event packets */
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -65,7 +62,7 @@
clear_bit(HCI_INQUIRY, &hdev->flags);
hci_dev_lock(hdev);
- mgmt_discovering(hdev, 0);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -195,7 +192,10 @@
hci_req_complete(hdev, HCI_OP_RESET, status);
- hdev->dev_flags = 0;
+ /* Reset all non-persistent flags */
+ hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
+
+ hdev->discovery.state = DISCOVERY_STOPPED;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -211,13 +211,14 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_set_local_name_complete(hdev, sent, status);
-
- if (status == 0)
+ else if (!status)
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
hci_dev_unlock(hdev);
+
+ hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -229,7 +230,8 @@
if (rp->status)
return;
- memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
+ if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -252,6 +254,9 @@
clear_bit(HCI_AUTH, &hdev->flags);
}
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_auth_enable_complete(hdev, status);
+
hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
}
@@ -349,14 +354,19 @@
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status)
- return;
-
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
return;
- memcpy(hdev->dev_class, sent, 3);
+ hci_dev_lock(hdev);
+
+ if (status == 0)
+ memcpy(hdev->dev_class, sent, 3);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_set_class_of_dev_complete(hdev, sent, status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -419,18 +429,6 @@
hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
}
-static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
- if (rp->status)
- return;
-
- hdev->ssp_mode = rp->mode;
-}
-
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -438,14 +436,18 @@
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status)
- return;
-
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
return;
- hdev->ssp_mode = *((__u8 *) sent);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
+ else if (!status) {
+ if (*((u8 *) sent))
+ set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ else
+ clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ }
}
static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
@@ -540,20 +542,6 @@
hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
}
-static void hci_set_le_support(struct hci_dev *hdev)
-{
- struct hci_cp_write_le_host_supported cp;
-
- memset(&cp, 0, sizeof(cp));
-
- if (enable_le) {
- cp.le = 1;
- cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
- }
-
- hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
-}
-
static void hci_setup(struct hci_dev *hdev)
{
if (hdev->dev_type != HCI_BREDR)
@@ -565,8 +553,18 @@
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
- u8 mode = 0x01;
- hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ u8 mode = 0x01;
+ hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode);
+ } else {
+ struct hci_cp_write_eir cp;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(&cp, 0, sizeof(cp));
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+ }
}
if (hdev->features[3] & LMP_RSSI_INQ)
@@ -579,12 +577,15 @@
struct hci_cp_read_local_ext_features cp;
cp.page = 0x01;
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
+ &cp);
}
- if (hdev->features[4] & LMP_LE)
- hci_set_le_support(hdev);
+ if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
+ u8 enable = 1;
+ hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
+ &enable);
+ }
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -594,7 +595,7 @@
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
- return;
+ goto done;
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
@@ -608,6 +609,9 @@
if (test_bit(HCI_INIT, &hdev->flags))
hci_setup(hdev);
+
+done:
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
}
static void hci_setup_link_policy(struct hci_dev *hdev)
@@ -624,8 +628,8 @@
link_policy |= HCI_LP_PARK;
link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
- sizeof(link_policy), &link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
+ &link_policy);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -701,6 +705,22 @@
hdev->features[6], hdev->features[7]);
}
+static void hci_set_le_support(struct hci_dev *hdev)
+{
+ struct hci_cp_write_le_host_supported cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ cp.le = 1;
+ cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+ }
+
+ if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
+ hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
+ &cp);
+}
+
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -709,7 +729,7 @@
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
- return;
+ goto done;
switch (rp->page) {
case 0:
@@ -720,6 +740,10 @@
break;
}
+ if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
+ hci_set_le_support(hdev);
+
+done:
hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
}
@@ -890,7 +914,7 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
if (rp->status != 0)
@@ -916,7 +940,7 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
@@ -951,9 +975,9 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
- rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
+ rp->status);
hci_dev_unlock(hdev);
}
@@ -967,9 +991,9 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
- rp->status);
+ ACL_LINK, 0, rp->status);
hci_dev_unlock(hdev);
}
@@ -982,9 +1006,9 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
- rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
+ 0, rp->status);
hci_dev_unlock(hdev);
}
@@ -998,9 +1022,9 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
- rp->status);
+ ACL_LINK, 0, rp->status);
hci_dev_unlock(hdev);
}
@@ -1023,6 +1047,15 @@
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
+
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -1033,28 +1066,47 @@
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status)
- return;
-
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
return;
switch (cp->enable) {
case LE_SCANNING_ENABLED:
+ hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
+
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
cancel_delayed_work_sync(&hdev->adv_work);
hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
+ if (status)
+ return;
+
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
+
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
+ mgmt_interleaved_discovery(hdev);
+ } else {
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+ }
+
break;
default:
@@ -1090,16 +1142,27 @@
static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
struct sk_buff *skb)
{
- struct hci_cp_read_local_ext_features cp;
+ struct hci_cp_write_le_host_supported *sent;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status)
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
+ if (!sent)
return;
- cp.page = 0x01;
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
+ if (!status) {
+ if (sent->le)
+ hdev->host_features[0] |= LMP_HOST_LE;
+ else
+ hdev->host_features[0] &= ~LMP_HOST_LE;
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_bit(HCI_INIT, &hdev->flags))
+ mgmt_le_enable_complete(hdev, sent->le, status);
+
+ hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
}
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -1110,7 +1173,7 @@
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_start_discovery_failed(hdev, status);
hci_dev_unlock(hdev);
return;
@@ -1119,7 +1182,7 @@
set_bit(HCI_INQUIRY, &hdev->flags);
hci_dev_lock(hdev);
- mgmt_discovering(hdev, 1);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
}
@@ -1153,7 +1216,7 @@
if (!conn) {
conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
if (conn) {
- conn->out = 1;
+ conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
} else
BT_ERR("No memory for new connection");
@@ -1263,7 +1326,7 @@
/* Only request authentication for SSP connections or non-SSP
* devices with sec_level HIGH or if MITM protection is requested */
- if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
+ if (!hci_conn_ssp_enabled(conn) &&
conn->pending_sec_level != BT_SECURITY_HIGH &&
!(conn->auth_type & 0x01))
return 0;
@@ -1271,6 +1334,73 @@
return 1;
}
+static inline int hci_resolve_name(struct hci_dev *hdev,
+ struct inquiry_entry *e)
+{
+ struct hci_cp_remote_name_req cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ cp.pscan_rep_mode = e->data.pscan_rep_mode;
+ cp.pscan_mode = e->data.pscan_mode;
+ cp.clock_offset = e->data.clock_offset;
+
+ return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
+}
+
+static bool hci_resolve_next_name(struct hci_dev *hdev)
+{
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ if (list_empty(&discov->resolve))
+ return false;
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (hci_resolve_name(hdev, e) == 0) {
+ e->name_state = NAME_PENDING;
+ return true;
+ }
+
+ return false;
+}
+
+static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
+ bdaddr_t *bdaddr, u8 *name, u8 name_len)
+{
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
+ name_len, conn->dev_class);
+
+ if (discov->state == DISCOVERY_STOPPED)
+ return;
+
+ if (discov->state == DISCOVERY_STOPPING)
+ goto discov_complete;
+
+ if (discov->state != DISCOVERY_RESOLVING)
+ return;
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
+ if (e) {
+ e->name_state = NAME_KNOWN;
+ list_del(&e->list);
+ if (name)
+ mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
+ e->data.rssi, name, name_len);
+ }
+
+ if (hci_resolve_next_name(hdev))
+ return;
+
+discov_complete:
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+}
+
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_remote_name_req *cp;
@@ -1290,13 +1420,17 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
+
if (!conn)
goto unlock;
if (!hci_outgoing_auth_needed(hdev, conn))
goto unlock;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
@@ -1413,9 +1547,9 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, status);
}
@@ -1440,15 +1574,37 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, status);
}
hci_dev_unlock(hdev);
}
+static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_disconnect *cp;
+ struct hci_conn *conn;
+
+ if (!status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
+ if (conn)
+ mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, status);
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_le_create_conn *cp;
@@ -1478,7 +1634,7 @@
conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
if (conn) {
conn->dst_type = cp->peer_addr_type;
- conn->out = 1;
+ conn->out = true;
} else {
BT_ERR("No memory for new connection");
}
@@ -1496,6 +1652,8 @@
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
BT_DBG("%s status %d", hdev->name, status);
@@ -1506,8 +1664,28 @@
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
- mgmt_discovering(hdev, 0);
+
+ if (discov->state != DISCOVERY_FINDING)
+ goto unlock;
+
+ if (list_empty(&discov->resolve)) {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (e && hci_resolve_name(hdev, e) == 0) {
+ e->name_state = NAME_PENDING;
+ hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
+ } else {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -1525,6 +1703,8 @@
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
+ bool name_known, ssp;
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -1533,9 +1713,11 @@
data.clock_offset = info->clock_offset;
data.rssi = 0x00;
data.ssp_mode = 0x00;
- hci_inquiry_cache_update(hdev, &data);
+
+ name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, 0, NULL);
+ info->dev_class, 0, !name_known, ssp, NULL,
+ 0);
}
hci_dev_unlock(hdev);
@@ -1569,8 +1751,6 @@
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev, &ev->bdaddr, conn->type,
- conn->dst_type);
} else
conn->state = BT_CONNECTED;
@@ -1588,7 +1768,7 @@
struct hci_cp_read_remote_features cp;
cp.handle = ev->handle;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
/* Set packet type for incoming connection */
@@ -1596,14 +1776,14 @@
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
+ &cp);
}
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
- conn->dst_type, ev->status);
+ conn->dst_type, ev->status);
}
if (conn->type == ACL_LINK)
@@ -1668,8 +1848,8 @@
else
cp.role = 0x01; /* Remain slave */
- hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
+ &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
@@ -1683,7 +1863,7 @@
cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
} else {
/* Connection rejected */
@@ -1711,12 +1891,14 @@
if (ev->status == 0)
conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
+ (conn->type == ACL_LINK || conn->type == LE_LINK)) {
if (ev->status != 0)
- mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
+ mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
else
- mgmt_disconnected(hdev, &conn->dst, conn->type,
- conn->dst_type);
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type,
+ conn->dst_type);
}
if (ev->status == 0) {
@@ -1742,22 +1924,23 @@
goto unlock;
if (!ev->status) {
- if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
+ if (!hci_conn_ssp_enabled(conn) &&
+ test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
BT_INFO("re-auth of legacy device is not possible.");
} else {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
}
} else {
- mgmt_auth_failed(hdev, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ ev->status);
}
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
- clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+ clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
if (conn->state == BT_CONFIG) {
- if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
+ if (!ev->status && hci_conn_ssp_enabled(conn)) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = ev->handle;
cp.encrypt = 0x01;
@@ -1776,7 +1959,7 @@
hci_conn_put(conn);
}
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
if (!ev->status) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = ev->handle;
@@ -1784,7 +1967,7 @@
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
&cp);
} else {
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
hci_encrypt_cfm(conn, ev->status, 0x00);
}
}
@@ -1804,17 +1987,25 @@
hci_dev_lock(hdev);
- if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
- mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
-
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto check_auth;
+
+ if (ev->status == 0)
+ hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
+ strnlen(ev->name, HCI_MAX_NAME_LENGTH));
+ else
+ hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
+
+check_auth:
if (!conn)
goto unlock;
if (!hci_outgoing_auth_needed(hdev, conn))
goto unlock;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
@@ -1845,7 +2036,7 @@
conn->link_mode &= ~HCI_LM_ENCRYPT;
}
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
if (conn->state == BT_CONFIG) {
if (!ev->status)
@@ -1874,7 +2065,7 @@
if (!ev->status)
conn->link_mode |= HCI_LM_SECURE;
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
hci_key_change_cfm(conn, ev->status);
}
@@ -1916,7 +2107,10 @@
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- }
+ } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -2024,10 +2218,6 @@
hci_cc_host_buffer_size(hdev, skb);
break;
- case HCI_OP_READ_SSP_MODE:
- hci_cc_read_ssp_mode(hdev, skb);
- break;
-
case HCI_OP_WRITE_SSP_MODE:
hci_cc_write_ssp_mode(hdev, skb);
break;
@@ -2213,8 +2403,7 @@
break;
case HCI_OP_DISCONNECT:
- if (ev->status != 0)
- mgmt_disconnect_failed(hdev, NULL, ev->status);
+ hci_cs_disconnect(hdev, ev->status);
break;
case HCI_OP_LE_CREATE_CONN:
@@ -2258,7 +2447,7 @@
conn->link_mode |= HCI_LM_MASTER;
}
- clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
hci_role_switch_cfm(conn, ev->status, ev->role);
}
@@ -2332,6 +2521,56 @@
queue_work(hdev->workqueue, &hdev->tx_work);
}
+static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
+ int i;
+
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
+ BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+ return;
+ }
+
+ if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
+ ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
+ BT_DBG("%s bad parameters", hdev->name);
+ return;
+ }
+
+ BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
+ ev->num_hndl);
+
+ for (i = 0; i < ev->num_hndl; i++) {
+ struct hci_comp_blocks_info *info = &ev->handles[i];
+ struct hci_conn *conn;
+ __u16 handle, block_count;
+
+ handle = __le16_to_cpu(info->handle);
+ block_count = __le16_to_cpu(info->blocks);
+
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn)
+ continue;
+
+ conn->sent -= block_count;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ hdev->block_cnt += block_count;
+ if (hdev->block_cnt > hdev->num_blocks)
+ hdev->block_cnt = hdev->num_blocks;
+ break;
+
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
+ }
+ }
+
+ queue_work(hdev->workqueue, &hdev->tx_work);
+}
+
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
@@ -2346,14 +2585,14 @@
conn->mode = ev->mode;
conn->interval = __le16_to_cpu(ev->interval);
- if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
if (conn->mode == HCI_CM_ACTIVE)
- conn->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
else
- conn->power_save = 0;
+ clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
}
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, ev->status);
}
@@ -2379,10 +2618,10 @@
hci_conn_put(conn);
}
- if (!test_bit(HCI_PAIRABLE, &hdev->flags))
+ if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
- else if (test_bit(HCI_MGMT, &hdev->flags)) {
+ else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
u8 secure;
if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -2406,7 +2645,7 @@
BT_DBG("%s", hdev->name);
- if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
+ if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
return;
hci_dev_lock(hdev);
@@ -2421,7 +2660,7 @@
BT_DBG("%s found key type %u for %s", hdev->name, key->type,
batostr(&ev->bdaddr));
- if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
key->type == HCI_LK_DEBUG_COMBINATION) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
@@ -2483,7 +2722,7 @@
hci_conn_put(conn);
}
- if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+ if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
ev->key_type, pin_len);
@@ -2551,6 +2790,7 @@
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
+ bool name_known, ssp;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
@@ -2572,10 +2812,12 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- hci_inquiry_cache_update(hdev, &data);
+
+ name_known = hci_inquiry_cache_update(hdev, &data,
+ false, &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, info->rssi,
- NULL);
+ info->dev_class, info->rssi,
+ !name_known, ssp, NULL, 0);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -2589,10 +2831,11 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- hci_inquiry_cache_update(hdev, &data);
+ name_known = hci_inquiry_cache_update(hdev, &data,
+ false, &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, info->rssi,
- NULL);
+ info->dev_class, info->rssi,
+ !name_known, ssp, NULL, 0);
}
}
@@ -2617,9 +2860,10 @@
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
- conn->ssp_mode = (ev->features[0] & 0x01);
+ if (ev->features[0] & LMP_HOST_SSP)
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
if (conn->state != BT_CONFIG)
@@ -2631,7 +2875,10 @@
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- }
+ } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -2724,6 +2971,8 @@
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
+ bool name_known, ssp;
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -2732,9 +2981,19 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x01;
- hci_inquiry_cache_update(hdev, &data);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ name_known = eir_has_data_type(info->data,
+ sizeof(info->data),
+ EIR_NAME_COMPLETE);
+ else
+ name_known = true;
+
+ name_known = hci_inquiry_cache_update(hdev, &data, name_known,
+ &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, info->rssi, info->data);
+ info->dev_class, info->rssi, !name_known,
+ ssp, info->data, sizeof(info->data));
}
hci_dev_unlock(hdev);
@@ -2774,19 +3033,22 @@
hci_conn_hold(conn);
- if (!test_bit(HCI_MGMT, &hdev->flags))
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
- if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
+ if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.capability = conn->io_capability;
+ /* Change the IO capability from KeyboardDisplay
+ * to DisplayYesNo as it is not supported by BT spec. */
+ cp.capability = (conn->io_capability == 0x04) ?
+ 0x01 : conn->io_capability;
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
- if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
+ if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
hci_find_remote_oob_data(hdev, &conn->dst))
cp.oob_data = 0x01;
else
@@ -2822,8 +3084,9 @@
goto unlock;
conn->remote_cap = ev->capability;
- conn->remote_oob = ev->oob_data;
conn->remote_auth = ev->authentication;
+ if (ev->oob_data)
+ set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
unlock:
hci_dev_unlock(hdev);
@@ -2840,7 +3103,7 @@
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->flags))
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -2869,7 +3132,7 @@
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
* confirm_hint set to 1). */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
BT_DBG("Confirming auto-accept as acceptor");
confirm_hint = 1;
goto confirm;
@@ -2890,8 +3153,8 @@
}
confirm:
- mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
- confirm_hint);
+ mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
+ confirm_hint);
unlock:
hci_dev_unlock(hdev);
@@ -2906,8 +3169,8 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_passkey_request(hdev, &ev->bdaddr);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
hci_dev_unlock(hdev);
}
@@ -2930,8 +3193,9 @@
* initiated the authentication. A traditional auth_complete
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
- mgmt_auth_failed(hdev, &conn->dst, ev->status);
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
+ mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ ev->status);
hci_conn_put(conn);
@@ -2950,13 +3214,13 @@
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
hci_dev_unlock(hdev);
}
static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
struct oob_data *data;
@@ -2965,7 +3229,7 @@
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->flags))
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
@@ -3020,7 +3284,9 @@
goto unlock;
}
- mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
+ if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, 0, NULL, 0, NULL);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -3040,6 +3306,7 @@
{
u8 num_reports = skb->data[0];
void *ptr = &skb->data[1];
+ s8 rssi;
hci_dev_lock(hdev);
@@ -3048,6 +3315,10 @@
hci_add_adv_entry(hdev, ev);
+ rssi = ev->data[ev->length];
+ mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
+ NULL, rssi, 0, 1, ev->data, ev->length);
+
ptr += sizeof(*ev) + ev->length + 1;
}
@@ -3061,7 +3332,7 @@
struct hci_cp_le_ltk_reply cp;
struct hci_cp_le_ltk_neg_reply neg;
struct hci_conn *conn;
- struct link_key *ltk;
+ struct smp_ltk *ltk;
BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
@@ -3077,10 +3348,17 @@
memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
cp.handle = cpu_to_le16(conn->handle);
- conn->pin_length = ltk->pin_len;
+
+ if (ltk->authenticated)
+ conn->sec_level = BT_SECURITY_HIGH;
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+ if (ltk->type & HCI_SMP_STK) {
+ list_del(<k->list);
+ kfree(ltk);
+ }
+
hci_dev_unlock(hdev);
return;
@@ -3271,6 +3549,10 @@
hci_remote_oob_data_request_evt(hdev, skb);
break;
+ case HCI_EV_NUM_COMP_BLOCKS:
+ hci_num_comp_blocks_evt(hdev, skb);
+ break;
+
default:
BT_DBG("%s event 0x%x", hdev->name, event);
break;
@@ -3279,34 +3561,3 @@
kfree_skb(skb);
hdev->stat.evt_rx++;
}
-
-/* Generate internal stack event */
-void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
-{
- struct hci_event_hdr *hdr;
- struct hci_ev_stack_internal *ev;
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
- if (!skb)
- return;
-
- hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
- hdr->evt = HCI_EV_STACK_INTERNAL;
- hdr->plen = sizeof(*ev) + dlen;
-
- ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
- ev->type = type;
- memcpy(ev->data, data, dlen);
-
- bt_cb(skb)->incoming = 1;
- __net_timestamp(skb);
-
- bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
- skb->dev = (void *) hdev;
- hci_send_to_sock(hdev, skb, NULL);
- kfree_skb(skb);
-}
-
-module_param(enable_le, bool, 0644);
-MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 0dcc962..63afd234 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -48,8 +48,9 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_mon.h>
-static bool enable_mgmt;
+static atomic_t monitor_promisc = ATOMIC_INIT(0);
/* ----- HCI socket interface ----- */
@@ -85,22 +86,20 @@
};
/* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
- struct sock *skip_sk)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
struct sock *sk;
struct hlist_node *node;
+ struct sk_buff *skb_copy = NULL;
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
+
sk_for_each(sk, node, &hci_sk_list.head) {
struct hci_filter *flt;
struct sk_buff *nskb;
- if (sk == skip_sk)
- continue;
-
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
@@ -108,12 +107,9 @@
if (skb->sk == sk)
continue;
- if (bt_cb(skb)->channel != hci_pi(sk)->channel)
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
continue;
- if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
- goto clone;
-
/* Apply filter */
flt = &hci_pi(sk)->filter;
@@ -137,19 +133,301 @@
continue;
}
-clone:
- nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb_copy) {
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
+ if (!skb_copy)
+ continue;
+
+ /* Put type byte before the data */
+ memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
+ }
+
+ nskb = skb_clone(skb_copy, GFP_ATOMIC);
if (!nskb)
continue;
- /* Put type byte before the data */
- if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
- memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
-
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
+
read_unlock(&hci_sk_list.lock);
+
+ kfree_skb(skb_copy);
+}
+
+/* Send frame to control socket */
+void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ /* Skip the original socket */
+ if (sk == skip_sk)
+ continue;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
+/* Send frame to monitor socket */
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ struct sk_buff *skb_copy = NULL;
+ __le16 opcode;
+
+ if (!atomic_read(&monitor_promisc))
+ return;
+
+ BT_DBG("hdev %p len %d", hdev, skb->len);
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
+ break;
+ case HCI_EVENT_PKT:
+ opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
+ break;
+ case HCI_ACLDATA_PKT:
+ if (bt_cb(skb)->incoming)
+ opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
+ else
+ opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
+ break;
+ case HCI_SCODATA_PKT:
+ if (bt_cb(skb)->incoming)
+ opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
+ else
+ opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
+ break;
+ default:
+ return;
+ }
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ if (!skb_copy) {
+ struct hci_mon_hdr *hdr;
+
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
+ if (!skb_copy)
+ continue;
+
+ /* Put header before the data */
+ hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len);
+ }
+
+ nskb = skb_clone(skb_copy, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+
+ kfree_skb(skb_copy);
+}
+
+static void send_monitor_event(struct sk_buff *skb)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
+static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
+{
+ struct hci_mon_hdr *hdr;
+ struct hci_mon_new_index *ni;
+ struct sk_buff *skb;
+ __le16 opcode;
+
+ switch (event) {
+ case HCI_DEV_REG:
+ skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+ ni->type = hdev->dev_type;
+ ni->bus = hdev->bus;
+ bacpy(&ni->bdaddr, &hdev->bdaddr);
+ memcpy(ni->name, hdev->name, 8);
+
+ opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
+ break;
+
+ case HCI_DEV_UNREG:
+ skb = bt_skb_alloc(0, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
+ break;
+
+ default:
+ return NULL;
+ }
+
+ __net_timestamp(skb);
+
+ hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
+static void send_monitor_replay(struct sock *sk)
+{
+ struct hci_dev *hdev;
+
+ read_lock(&hci_dev_list_lock);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ struct sk_buff *skb;
+
+ skb = create_monitor_event(hdev, HCI_DEV_REG);
+ if (!skb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, skb))
+ kfree_skb(skb);
+ }
+
+ read_unlock(&hci_dev_list_lock);
+}
+
+/* Generate internal stack event */
+static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
+{
+ struct hci_event_hdr *hdr;
+ struct hci_ev_stack_internal *ev;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
+ hdr->evt = HCI_EV_STACK_INTERNAL;
+ hdr->plen = sizeof(*ev) + dlen;
+
+ ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
+ ev->type = type;
+ memcpy(ev->data, data, dlen);
+
+ bt_cb(skb)->incoming = 1;
+ __net_timestamp(skb);
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ skb->dev = (void *) hdev;
+ hci_send_to_sock(hdev, skb);
+ kfree_skb(skb);
+}
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event)
+{
+ struct hci_ev_si_device ev;
+
+ BT_DBG("hdev %s event %d", hdev->name, event);
+
+ /* Send event to monitor */
+ if (atomic_read(&monitor_promisc)) {
+ struct sk_buff *skb;
+
+ skb = create_monitor_event(hdev, event);
+ if (skb) {
+ send_monitor_event(skb);
+ kfree_skb(skb);
+ }
+ }
+
+ /* Send event to sockets */
+ ev.event = event;
+ ev.dev_id = hdev->id;
+ hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+
+ if (event == HCI_DEV_UNREG) {
+ struct sock *sk;
+ struct hlist_node *node;
+
+ /* Detach sockets from device */
+ read_lock(&hci_sk_list.lock);
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ bh_lock_sock_nested(sk);
+ if (hci_pi(sk)->hdev == hdev) {
+ hci_pi(sk)->hdev = NULL;
+ sk->sk_err = EPIPE;
+ sk->sk_state = BT_OPEN;
+ sk->sk_state_change(sk);
+
+ hci_dev_put(hdev);
+ }
+ bh_unlock_sock(sk);
+ }
+ read_unlock(&hci_sk_list.lock);
+ }
}
static int hci_sock_release(struct socket *sock)
@@ -164,6 +442,9 @@
hdev = hci_pi(sk)->hdev;
+ if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
+ atomic_dec(&monitor_promisc);
+
bt_sock_unlink(&hci_sk_list, sk);
if (hdev) {
@@ -190,7 +471,7 @@
hci_dev_lock(hdev);
- err = hci_blacklist_add(hdev, &bdaddr);
+ err = hci_blacklist_add(hdev, &bdaddr, 0);
hci_dev_unlock(hdev);
@@ -207,7 +488,7 @@
hci_dev_lock(hdev);
- err = hci_blacklist_del(hdev, &bdaddr);
+ err = hci_blacklist_del(hdev, &bdaddr, 0);
hci_dev_unlock(hdev);
@@ -340,34 +621,69 @@
if (haddr.hci_family != AF_BLUETOOTH)
return -EINVAL;
- if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
- return -EINVAL;
-
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
- if (!enable_mgmt)
- return -EINVAL;
- set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
- }
-
lock_sock(sk);
- if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
+ if (sk->sk_state == BT_BOUND) {
err = -EALREADY;
goto done;
}
- if (haddr.hci_dev != HCI_DEV_NONE) {
- hdev = hci_dev_get(haddr.hci_dev);
- if (!hdev) {
- err = -ENODEV;
+ switch (haddr.hci_channel) {
+ case HCI_CHANNEL_RAW:
+ if (hci_pi(sk)->hdev) {
+ err = -EALREADY;
goto done;
}
- atomic_inc(&hdev->promisc);
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ hdev = hci_dev_get(haddr.hci_dev);
+ if (!hdev) {
+ err = -ENODEV;
+ goto done;
+ }
+
+ atomic_inc(&hdev->promisc);
+ }
+
+ hci_pi(sk)->hdev = hdev;
+ break;
+
+ case HCI_CHANNEL_CONTROL:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ break;
+
+ case HCI_CHANNEL_MONITOR:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_RAW)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ send_monitor_replay(sk);
+
+ atomic_inc(&monitor_promisc);
+ break;
+
+ default:
+ err = -EINVAL;
+ goto done;
}
+
hci_pi(sk)->channel = haddr.hci_channel;
- hci_pi(sk)->hdev = hdev;
sk->sk_state = BT_BOUND;
done:
@@ -461,7 +777,15 @@
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- hci_sock_cmsg(sk, msg, skb);
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ hci_sock_cmsg(sk, msg, skb);
+ break;
+ case HCI_CHANNEL_CONTROL:
+ case HCI_CHANNEL_MONITOR:
+ sock_recv_timestamp(msg, sk, skb);
+ break;
+ }
skb_free_datagram(sk, skb);
@@ -495,6 +819,9 @@
case HCI_CHANNEL_CONTROL:
err = mgmt_control(sk, msg, len);
goto done;
+ case HCI_CHANNEL_MONITOR:
+ err = -EOPNOTSUPP;
+ goto done;
default:
err = -EINVAL;
goto done;
@@ -574,6 +901,11 @@
lock_sock(sk);
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (optname) {
case HCI_DATA_DIR:
if (get_user(opt, (int __user *)optval)) {
@@ -636,6 +968,7 @@
break;
}
+done:
release_sock(sk);
return err;
}
@@ -644,11 +977,20 @@
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
- int len, opt;
+ int len, opt, err = 0;
+
+ BT_DBG("sk %p, opt %d", sk, optname);
if (get_user(len, optlen))
return -EFAULT;
+ lock_sock(sk);
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (optname) {
case HCI_DATA_DIR:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
@@ -657,7 +999,7 @@
opt = 0;
if (put_user(opt, optval))
- return -EFAULT;
+ err = -EFAULT;
break;
case HCI_TIME_STAMP:
@@ -667,7 +1009,7 @@
opt = 0;
if (put_user(opt, optval))
- return -EFAULT;
+ err = -EFAULT;
break;
case HCI_FILTER:
@@ -682,15 +1024,17 @@
len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
- return -EFAULT;
+ err = -EFAULT;
break;
default:
- return -ENOPROTOOPT;
+ err = -ENOPROTOOPT;
break;
}
- return 0;
+done:
+ release_sock(sk);
+ return err;
}
static const struct proto_ops hci_sock_ops = {
@@ -748,52 +1092,12 @@
return 0;
}
-static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct hci_dev *hdev = (struct hci_dev *) ptr;
- struct hci_ev_si_device ev;
-
- BT_DBG("hdev %s event %ld", hdev->name, event);
-
- /* Send event to sockets */
- ev.event = event;
- ev.dev_id = hdev->id;
- hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
-
- if (event == HCI_DEV_UNREG) {
- struct sock *sk;
- struct hlist_node *node;
-
- /* Detach sockets from device */
- read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
- bh_lock_sock_nested(sk);
- if (hci_pi(sk)->hdev == hdev) {
- hci_pi(sk)->hdev = NULL;
- sk->sk_err = EPIPE;
- sk->sk_state = BT_OPEN;
- sk->sk_state_change(sk);
-
- hci_dev_put(hdev);
- }
- bh_unlock_sock(sk);
- }
- read_unlock(&hci_sk_list.lock);
- }
-
- return NOTIFY_DONE;
-}
-
static const struct net_proto_family hci_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = hci_sock_create,
};
-static struct notifier_block hci_sock_nblock = {
- .notifier_call = hci_sock_dev_event
-};
-
int __init hci_sock_init(void)
{
int err;
@@ -806,8 +1110,6 @@
if (err < 0)
goto error;
- hci_register_notifier(&hci_sock_nblock);
-
BT_INFO("HCI socket layer initialized");
return 0;
@@ -823,10 +1125,5 @@
if (bt_sock_unregister(BTPROTO_HCI) < 0)
BT_ERR("HCI socket unregistration failed");
- hci_unregister_notifier(&hci_sock_nblock);
-
proto_unregister(&hci_sk_proto);
}
-
-module_param(enable_mgmt, bool, 0644);
-MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5210956..bc15429 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -33,19 +33,19 @@
static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
+ struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
+ struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", batostr(&conn->dst));
}
static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
+ struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
conn->features[0], conn->features[1],
@@ -79,8 +79,8 @@
static void bt_link_release(struct device *dev)
{
- void *data = dev_get_drvdata(dev);
- kfree(data);
+ struct hci_conn *conn = to_hci_conn(dev);
+ kfree(conn);
}
static struct device_type bt_link = {
@@ -120,8 +120,6 @@
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
- dev_set_drvdata(&conn->dev, conn);
-
if (device_add(&conn->dev) < 0) {
BT_ERR("Failed to register connection device");
return;
@@ -189,19 +187,19 @@
static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
int i;
@@ -214,20 +212,20 @@
static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%.2x%.2x%.2x\n",
hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
}
static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
hdev->features[0], hdev->features[1],
@@ -238,31 +236,31 @@
static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
unsigned int val;
int rv;
@@ -280,13 +278,13 @@
static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
int rv;
@@ -304,13 +302,13 @@
static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
int rv;
@@ -370,8 +368,9 @@
static void bt_host_release(struct device *dev)
{
- void *data = dev_get_drvdata(dev);
- kfree(data);
+ struct hci_dev *hdev = to_hci_dev(dev);
+ kfree(hdev);
+ module_put(THIS_MODULE);
}
static struct device_type bt_host = {
@@ -383,12 +382,12 @@
static int inquiry_cache_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
hci_dev_lock(hdev);
- for (e = cache->list; e; e = e->next) {
+ list_for_each_entry(e, &cache->all, all) {
struct inquiry_data *data = &e->data;
seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
batostr(&data->bdaddr),
@@ -523,7 +522,7 @@
dev->type = &bt_host;
dev->class = bt_class;
- dev_set_drvdata(dev, hdev);
+ __module_get(THIS_MODULE);
device_initialize(dev);
}
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 178ac7f..73a32d7 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -160,10 +160,10 @@
{
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -174,7 +174,7 @@
err = hidp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 32d338c..3e450f4 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -73,42 +73,28 @@
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
struct l2cap_chan *chan, int err);
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
-
/* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
{
- struct l2cap_chan *c, *r = NULL;
+ struct l2cap_chan *c;
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &conn->chan_l, list) {
- if (c->dcid == cid) {
- r = c;
- break;
- }
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->dcid == cid)
+ return c;
}
-
- rcu_read_unlock();
- return r;
+ return NULL;
}
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
- struct l2cap_chan *c, *r = NULL;
+ struct l2cap_chan *c;
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &conn->chan_l, list) {
- if (c->scid == cid) {
- r = c;
- break;
- }
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->scid == cid)
+ return c;
}
-
- rcu_read_unlock();
- return r;
+ return NULL;
}
/* Find channel with given SCID.
@@ -117,36 +103,32 @@
{
struct l2cap_chan *c;
+ mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
- if (c)
- lock_sock(c->sk);
+ mutex_unlock(&conn->chan_lock);
+
return c;
}
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
{
- struct l2cap_chan *c, *r = NULL;
+ struct l2cap_chan *c;
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &conn->chan_l, list) {
- if (c->ident == ident) {
- r = c;
- break;
- }
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->ident == ident)
+ return c;
}
-
- rcu_read_unlock();
- return r;
+ return NULL;
}
static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
{
struct l2cap_chan *c;
+ mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_ident(conn, ident);
- if (c)
- lock_sock(c->sk);
+ mutex_unlock(&conn->chan_lock);
+
return c;
}
@@ -217,51 +199,51 @@
return 0;
}
-static char *state_to_string(int state)
+static void __l2cap_state_change(struct l2cap_chan *chan, int state)
{
- switch(state) {
- case BT_CONNECTED:
- return "BT_CONNECTED";
- case BT_OPEN:
- return "BT_OPEN";
- case BT_BOUND:
- return "BT_BOUND";
- case BT_LISTEN:
- return "BT_LISTEN";
- case BT_CONNECT:
- return "BT_CONNECT";
- case BT_CONNECT2:
- return "BT_CONNECT2";
- case BT_CONFIG:
- return "BT_CONFIG";
- case BT_DISCONN:
- return "BT_DISCONN";
- case BT_CLOSED:
- return "BT_CLOSED";
- }
-
- return "invalid state";
-}
-
-static void l2cap_state_change(struct l2cap_chan *chan, int state)
-{
- BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
+ BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
state_to_string(state));
chan->state = state;
chan->ops->state_change(chan->data, state);
}
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
+{
+ struct sock *sk = chan->sk;
+
+ lock_sock(sk);
+ __l2cap_state_change(chan, state);
+ release_sock(sk);
+}
+
+static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+{
+ struct sock *sk = chan->sk;
+
+ sk->sk_err = err;
+}
+
+static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+{
+ struct sock *sk = chan->sk;
+
+ lock_sock(sk);
+ __l2cap_chan_set_err(chan, err);
+ release_sock(sk);
+}
+
static void l2cap_chan_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
chan_timer.work);
- struct sock *sk = chan->sk;
+ struct l2cap_conn *conn = chan->conn;
int reason;
- BT_DBG("chan %p state %d", chan, chan->state);
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
- lock_sock(sk);
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(chan);
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
reason = ECONNREFUSED;
@@ -273,9 +255,11 @@
l2cap_chan_close(chan, reason);
- release_sock(sk);
+ l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ mutex_unlock(&conn->chan_lock);
+
l2cap_chan_put(chan);
}
@@ -287,6 +271,8 @@
if (!chan)
return NULL;
+ mutex_init(&chan->lock);
+
chan->sk = sk;
write_lock(&chan_list_lock);
@@ -313,7 +299,7 @@
l2cap_chan_put(chan);
}
-static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
chan->psm, chan->dcid);
@@ -322,7 +308,8 @@
chan->conn = conn;
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_CONN_ORIENTED:
if (conn->hcon->type == LE_LINK) {
/* LE connection */
chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -333,12 +320,16 @@
chan->scid = l2cap_alloc_cid(conn);
chan->omtu = L2CAP_DEFAULT_MTU;
}
- } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
+ break;
+
+ case L2CAP_CHAN_CONN_LESS:
/* Connectionless socket */
chan->scid = L2CAP_CID_CONN_LESS;
chan->dcid = L2CAP_CID_CONN_LESS;
chan->omtu = L2CAP_DEFAULT_MTU;
- } else {
+ break;
+
+ default:
/* Raw socket can send/recv signalling messages only */
chan->scid = L2CAP_CID_SIGNALING;
chan->dcid = L2CAP_CID_SIGNALING;
@@ -354,11 +345,16 @@
l2cap_chan_hold(chan);
- list_add_rcu(&chan->list, &conn->chan_l);
+ list_add(&chan->list, &conn->chan_l);
}
-/* Delete channel.
- * Must be called on the locked socket. */
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+{
+ mutex_lock(&conn->chan_lock);
+ __l2cap_chan_add(conn, chan);
+ mutex_unlock(&conn->chan_lock);
+}
+
static void l2cap_chan_del(struct l2cap_chan *chan, int err)
{
struct sock *sk = chan->sk;
@@ -371,8 +367,7 @@
if (conn) {
/* Delete from channel list */
- list_del_rcu(&chan->list);
- synchronize_rcu();
+ list_del(&chan->list);
l2cap_chan_put(chan);
@@ -380,11 +375,13 @@
hci_conn_put(conn->hcon);
}
- l2cap_state_change(chan, BT_CLOSED);
+ lock_sock(sk);
+
+ __l2cap_state_change(chan, BT_CLOSED);
sock_set_flag(sk, SOCK_ZAPPED);
if (err)
- sk->sk_err = err;
+ __l2cap_chan_set_err(chan, err);
if (parent) {
bt_accept_unlink(sk);
@@ -392,6 +389,8 @@
} else
sk->sk_state_change(sk);
+ release_sock(sk);
+
if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
test_bit(CONF_INPUT_DONE, &chan->conf_state)))
return;
@@ -423,10 +422,12 @@
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ l2cap_chan_lock(chan);
__clear_chan_timer(chan);
- lock_sock(sk);
l2cap_chan_close(chan, ECONNRESET);
- release_sock(sk);
+ l2cap_chan_unlock(chan);
+
chan->ops->close(chan->data);
}
}
@@ -436,14 +437,17 @@
struct l2cap_conn *conn = chan->conn;
struct sock *sk = chan->sk;
- BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
+ BT_DBG("chan %p state %s sk %p", chan,
+ state_to_string(chan->state), sk);
switch (chan->state) {
case BT_LISTEN:
+ lock_sock(sk);
l2cap_chan_cleanup_listen(sk);
- l2cap_state_change(chan, BT_CLOSED);
+ __l2cap_state_change(chan, BT_CLOSED);
sock_set_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
break;
case BT_CONNECTED:
@@ -486,7 +490,9 @@
break;
default:
+ lock_sock(sk);
sock_set_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
break;
}
}
@@ -661,6 +667,21 @@
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
}
+static void l2cap_send_conn_req(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn_req req;
+
+ req.scid = cpu_to_le16(chan->scid);
+ req.psm = chan->psm;
+
+ chan->ident = l2cap_get_ident(conn);
+
+ set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -670,17 +691,8 @@
return;
if (l2cap_chan_check_security(chan) &&
- __l2cap_no_conn_pending(chan)) {
- struct l2cap_conn_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
-
- chan->ident = l2cap_get_ident(conn);
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
-
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
- sizeof(req), &req);
- }
+ __l2cap_no_conn_pending(chan))
+ l2cap_send_conn_req(chan);
} else {
struct l2cap_info_req req;
req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -688,8 +700,7 @@
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- schedule_delayed_work(&conn->info_timer,
- msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
l2cap_send_cmd(conn, conn->info_ident,
L2CAP_INFO_REQ, sizeof(req), &req);
@@ -714,14 +725,12 @@
static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
{
- struct sock *sk;
+ struct sock *sk = chan->sk;
struct l2cap_disconn_req req;
if (!conn)
return;
- sk = chan->sk;
-
if (chan->mode == L2CAP_MODE_ERTM) {
__clear_retrans_timer(chan);
__clear_monitor_timer(chan);
@@ -733,56 +742,47 @@
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_DISCONN_REQ, sizeof(req), &req);
- l2cap_state_change(chan, BT_DISCONN);
- sk->sk_err = err;
+ lock_sock(sk);
+ __l2cap_state_change(chan, BT_DISCONN);
+ __l2cap_chan_set_err(chan, err);
+ release_sock(sk);
}
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan *chan, *tmp;
BT_DBG("conn %p", conn);
- rcu_read_lock();
+ mutex_lock(&conn->chan_lock);
- list_for_each_entry_rcu(chan, &conn->chan_l, list) {
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
struct sock *sk = chan->sk;
- bh_lock_sock(sk);
+ l2cap_chan_lock(chan);
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
if (chan->state == BT_CONNECT) {
- struct l2cap_conn_req req;
-
if (!l2cap_chan_check_security(chan) ||
!__l2cap_no_conn_pending(chan)) {
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
&& test_bit(CONF_STATE2_DEVICE,
&chan->conf_state)) {
- /* l2cap_chan_close() calls list_del(chan)
- * so release the lock */
l2cap_chan_close(chan, ECONNRESET);
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
-
- chan->ident = l2cap_get_ident(conn);
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
-
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
- sizeof(req), &req);
+ l2cap_send_conn_req(chan);
} else if (chan->state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
@@ -791,6 +791,7 @@
rsp.dcid = cpu_to_le16(chan->scid);
if (l2cap_chan_check_security(chan)) {
+ lock_sock(sk);
if (bt_sk(sk)->defer_setup) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -799,10 +800,11 @@
parent->sk_data_ready(parent, 0);
} else {
- l2cap_state_change(chan, BT_CONFIG);
+ __l2cap_state_change(chan, BT_CONFIG);
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
}
+ release_sock(sk);
} else {
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -813,7 +815,7 @@
if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
rsp.result != L2CAP_CR_SUCCESS) {
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
@@ -823,10 +825,10 @@
chan->num_conf_req++;
}
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
}
- rcu_read_unlock();
+ mutex_unlock(&conn->chan_lock);
}
/* Find socket with cid and source bdaddr.
@@ -902,28 +904,34 @@
__set_chan_timer(chan, sk->sk_sndtimeo);
- l2cap_state_change(chan, BT_CONNECTED);
+ __l2cap_state_change(chan, BT_CONNECTED);
parent->sk_data_ready(parent, 0);
clean:
release_sock(parent);
}
-static void l2cap_chan_ready(struct sock *sk)
+static void l2cap_chan_ready(struct l2cap_chan *chan)
{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- struct sock *parent = bt_sk(sk)->parent;
+ struct sock *sk = chan->sk;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
BT_DBG("sk %p, parent %p", sk, parent);
chan->conf_state = 0;
__clear_chan_timer(chan);
- l2cap_state_change(chan, BT_CONNECTED);
+ __l2cap_state_change(chan, BT_CONNECTED);
sk->sk_state_change(sk);
if (parent)
parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -938,29 +946,31 @@
if (conn->hcon->out && conn->hcon->type == LE_LINK)
smp_conn_security(conn, conn->hcon->pending_sec_level);
- rcu_read_lock();
+ mutex_lock(&conn->chan_lock);
- list_for_each_entry_rcu(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
+ list_for_each_entry(chan, &conn->chan_l, list) {
- bh_lock_sock(sk);
+ l2cap_chan_lock(chan);
if (conn->hcon->type == LE_LINK) {
if (smp_conn_security(conn, chan->sec_level))
- l2cap_chan_ready(sk);
+ l2cap_chan_ready(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ struct sock *sk = chan->sk;
__clear_chan_timer(chan);
- l2cap_state_change(chan, BT_CONNECTED);
+ lock_sock(sk);
+ __l2cap_state_change(chan, BT_CONNECTED);
sk->sk_state_change(sk);
+ release_sock(sk);
} else if (chan->state == BT_CONNECT)
l2cap_do_start(chan);
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
}
- rcu_read_unlock();
+ mutex_unlock(&conn->chan_lock);
}
/* Notify sockets that we cannot guaranty reliability anymore */
@@ -970,16 +980,14 @@
BT_DBG("conn %p", conn);
- rcu_read_lock();
+ mutex_lock(&conn->chan_lock);
- list_for_each_entry_rcu(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
-
+ list_for_each_entry(chan, &conn->chan_l, list) {
if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
- sk->sk_err = err;
+ __l2cap_chan_set_err(chan, err);
}
- rcu_read_unlock();
+ mutex_unlock(&conn->chan_lock);
}
static void l2cap_info_timeout(struct work_struct *work)
@@ -997,7 +1005,6 @@
{
struct l2cap_conn *conn = hcon->l2cap_data;
struct l2cap_chan *chan, *l;
- struct sock *sk;
if (!conn)
return;
@@ -1006,21 +1013,27 @@
kfree_skb(conn->rx_skb);
+ mutex_lock(&conn->chan_lock);
+
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
- sk = chan->sk;
- lock_sock(sk);
+ l2cap_chan_lock(chan);
+
l2cap_chan_del(chan, err);
- release_sock(sk);
+
+ l2cap_chan_unlock(chan);
+
chan->ops->close(chan->data);
}
+ mutex_unlock(&conn->chan_lock);
+
hci_chan_del(conn->hchan);
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
cancel_delayed_work_sync(&conn->info_timer);
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
cancel_delayed_work_sync(&conn->security_timer);
smp_chan_destroy(conn);
}
@@ -1072,6 +1085,7 @@
conn->feat_mask = 0;
spin_lock_init(&conn->lock);
+ mutex_init(&conn->chan_lock);
INIT_LIST_HEAD(&conn->chan_l);
@@ -1139,7 +1153,7 @@
hci_dev_lock(hdev);
- lock_sock(sk);
+ l2cap_chan_lock(chan);
/* PSM must be odd and lsb of upper byte must be 0 */
if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
@@ -1166,17 +1180,21 @@
goto done;
}
+ lock_sock(sk);
+
switch (sk->sk_state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
/* Already connecting */
err = 0;
+ release_sock(sk);
goto done;
case BT_CONNECTED:
/* Already connected */
err = -EISCONN;
+ release_sock(sk);
goto done;
case BT_OPEN:
@@ -1186,11 +1204,15 @@
default:
err = -EBADFD;
+ release_sock(sk);
goto done;
}
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, dst);
+
+ release_sock(sk);
+
chan->psm = psm;
chan->dcid = cid;
@@ -1218,7 +1240,9 @@
/* Update source addr of the socket */
bacpy(src, conn->src);
+ l2cap_chan_unlock(chan);
l2cap_chan_add(conn, chan);
+ l2cap_chan_lock(chan);
l2cap_state_change(chan, BT_CONNECT);
__set_chan_timer(chan, sk->sk_sndtimeo);
@@ -1235,6 +1259,7 @@
err = 0;
done:
+ l2cap_chan_unlock(chan);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1276,14 +1301,14 @@
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
monitor_timer.work);
- struct sock *sk = chan->sk;
BT_DBG("chan %p", chan);
- lock_sock(sk);
+ l2cap_chan_lock(chan);
+
if (chan->retry_count >= chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- release_sock(sk);
+ l2cap_chan_unlock(chan);
return;
}
@@ -1291,25 +1316,26 @@
__set_monitor_timer(chan);
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- release_sock(sk);
+ l2cap_chan_unlock(chan);
}
static void l2cap_retrans_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
retrans_timer.work);
- struct sock *sk = chan->sk;
BT_DBG("chan %p", chan);
- lock_sock(sk);
+ l2cap_chan_lock(chan);
+
chan->retry_count = 1;
__set_monitor_timer(chan);
set_bit(CONN_WAIT_F, &chan->conn_state);
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- release_sock(sk);
+
+ l2cap_chan_unlock(chan);
}
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1450,17 +1476,19 @@
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
- if (bt_cb(skb)->retries == 1)
+ if (bt_cb(skb)->retries == 1) {
chan->unacked_frames++;
+ if (!nsent++)
+ __clear_ack_timer(chan);
+ }
+
chan->frames_sent++;
if (skb_queue_is_last(&chan->tx_q, skb))
chan->tx_send_head = NULL;
else
chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
-
- nsent++;
}
return nsent;
@@ -1478,7 +1506,7 @@
return ret;
}
-static void l2cap_send_ack(struct l2cap_chan *chan)
+static void __l2cap_send_ack(struct l2cap_chan *chan)
{
u32 control = 0;
@@ -1498,6 +1526,12 @@
l2cap_send_sframe(chan, control);
}
+static void l2cap_send_ack(struct l2cap_chan *chan)
+{
+ __clear_ack_timer(chan);
+ __l2cap_send_ack(chan);
+}
+
static void l2cap_send_srejtail(struct l2cap_chan *chan)
{
struct srej_list *tail;
@@ -1512,9 +1546,11 @@
l2cap_send_sframe(chan, control);
}
-static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
+static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
+ struct msghdr *msg, int len,
+ int count, struct sk_buff *skb)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
+ struct l2cap_conn *conn = chan->conn;
struct sk_buff **frag;
int err, sent = 0;
@@ -1529,7 +1565,10 @@
while (len) {
count = min_t(unsigned int, conn->mtu, len);
- *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
+ *frag = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT,
+ &err);
+
if (!*frag)
return err;
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
@@ -1550,17 +1589,18 @@
struct msghdr *msg, size_t len,
u32 priority)
{
- struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
+ BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+
if (!skb)
return ERR_PTR(err);
@@ -1572,7 +1612,7 @@
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
put_unaligned_le16(chan->psm, skb_put(skb, 2));
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1584,17 +1624,18 @@
struct msghdr *msg, size_t len,
u32 priority)
{
- struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("sk %p len %d", sk, (int)len);
+ BT_DBG("chan %p len %d", chan, (int)len);
count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+
if (!skb)
return ERR_PTR(err);
@@ -1605,7 +1646,7 @@
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1617,13 +1658,12 @@
struct msghdr *msg, size_t len,
u32 control, u16 sdulen)
{
- struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen;
struct l2cap_hdr *lh;
- BT_DBG("sk %p len %d", sk, (int)len);
+ BT_DBG("chan %p len %d", chan, (int)len);
if (!conn)
return ERR_PTR(-ENOTCONN);
@@ -1640,8 +1680,10 @@
hlen += L2CAP_FCS_SIZE;
count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+
if (!skb)
return ERR_PTR(err);
@@ -1655,7 +1697,7 @@
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1801,9 +1843,9 @@
BT_DBG("conn %p", conn);
- rcu_read_lock();
+ mutex_lock(&conn->chan_lock);
- list_for_each_entry_rcu(chan, &conn->chan_l, list) {
+ list_for_each_entry(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
@@ -1819,7 +1861,7 @@
kfree_skb(nskb);
}
- rcu_read_unlock();
+ mutex_unlock(&conn->chan_lock);
}
/* ---- L2CAP signalling commands ---- */
@@ -1987,9 +2029,13 @@
BT_DBG("chan %p", chan);
- lock_sock(chan->sk);
- l2cap_send_ack(chan);
- release_sock(chan->sk);
+ l2cap_chan_lock(chan);
+
+ __l2cap_send_ack(chan);
+
+ l2cap_chan_unlock(chan);
+
+ l2cap_chan_put(chan);
}
static inline void l2cap_ertm_init(struct l2cap_chan *chan)
@@ -2607,6 +2653,7 @@
parent = pchan->sk;
+ mutex_lock(&conn->chan_lock);
lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
@@ -2647,7 +2694,7 @@
bt_accept_enqueue(parent, sk);
- l2cap_chan_add(conn, chan);
+ __l2cap_chan_add(conn, chan);
dcid = chan->scid;
@@ -2658,28 +2705,29 @@
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
if (l2cap_chan_check_security(chan)) {
if (bt_sk(sk)->defer_setup) {
- l2cap_state_change(chan, BT_CONNECT2);
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
parent->sk_data_ready(parent, 0);
} else {
- l2cap_state_change(chan, BT_CONFIG);
+ __l2cap_state_change(chan, BT_CONFIG);
result = L2CAP_CR_SUCCESS;
status = L2CAP_CS_NO_INFO;
}
} else {
- l2cap_state_change(chan, BT_CONNECT2);
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
}
} else {
- l2cap_state_change(chan, BT_CONNECT2);
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_NO_INFO;
}
response:
release_sock(parent);
+ mutex_unlock(&conn->chan_lock);
sendresp:
rsp.scid = cpu_to_le16(scid);
@@ -2695,8 +2743,7 @@
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- schedule_delayed_work(&conn->info_timer,
- msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
l2cap_send_cmd(conn, conn->info_ident,
L2CAP_INFO_REQ, sizeof(info), &info);
@@ -2719,27 +2766,36 @@
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
u16 scid, dcid, result, status;
struct l2cap_chan *chan;
- struct sock *sk;
u8 req[128];
+ int err;
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
result = __le16_to_cpu(rsp->result);
status = __le16_to_cpu(rsp->status);
- BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ dcid, scid, result, status);
+
+ mutex_lock(&conn->chan_lock);
if (scid) {
- chan = l2cap_get_chan_by_scid(conn, scid);
- if (!chan)
- return -EFAULT;
+ chan = __l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+ err = -EFAULT;
+ goto unlock;
+ }
} else {
- chan = l2cap_get_chan_by_ident(conn, cmd->ident);
- if (!chan)
- return -EFAULT;
+ chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+ if (!chan) {
+ err = -EFAULT;
+ goto unlock;
+ }
}
- sk = chan->sk;
+ err = 0;
+
+ l2cap_chan_lock(chan);
switch (result) {
case L2CAP_CR_SUCCESS:
@@ -2765,8 +2821,12 @@
break;
}
- release_sock(sk);
- return 0;
+ l2cap_chan_unlock(chan);
+
+unlock:
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
}
static inline void set_default_fcs(struct l2cap_chan *chan)
@@ -2786,7 +2846,6 @@
u16 dcid, flags;
u8 rsp[64];
struct l2cap_chan *chan;
- struct sock *sk;
int len;
dcid = __le16_to_cpu(req->dcid);
@@ -2798,7 +2857,7 @@
if (!chan)
return -ENOENT;
- sk = chan->sk;
+ l2cap_chan_lock(chan);
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
@@ -2860,7 +2919,7 @@
if (chan->mode == L2CAP_MODE_ERTM)
l2cap_ertm_init(chan);
- l2cap_chan_ready(sk);
+ l2cap_chan_ready(chan);
goto unlock;
}
@@ -2887,7 +2946,7 @@
}
unlock:
- release_sock(sk);
+ l2cap_chan_unlock(chan);
return 0;
}
@@ -2896,7 +2955,6 @@
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct l2cap_chan *chan;
- struct sock *sk;
int len = cmd->len - sizeof(*rsp);
scid = __le16_to_cpu(rsp->scid);
@@ -2910,7 +2968,7 @@
if (!chan)
return 0;
- sk = chan->sk;
+ l2cap_chan_lock(chan);
switch (result) {
case L2CAP_CONF_SUCCESS:
@@ -2969,9 +3027,9 @@
}
default:
- sk->sk_err = ECONNRESET;
- __set_chan_timer(chan,
- msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
+ l2cap_chan_set_err(chan, ECONNRESET);
+
+ __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
l2cap_send_disconn_req(conn, chan, ECONNRESET);
goto done;
}
@@ -2991,11 +3049,11 @@
if (chan->mode == L2CAP_MODE_ERTM)
l2cap_ertm_init(chan);
- l2cap_chan_ready(sk);
+ l2cap_chan_ready(chan);
}
done:
- release_sock(sk);
+ l2cap_chan_unlock(chan);
return 0;
}
@@ -3012,9 +3070,15 @@
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
- chan = l2cap_get_chan_by_scid(conn, dcid);
- if (!chan)
+ mutex_lock(&conn->chan_lock);
+
+ chan = __l2cap_get_chan_by_scid(conn, dcid);
+ if (!chan) {
+ mutex_unlock(&conn->chan_lock);
return 0;
+ }
+
+ l2cap_chan_lock(chan);
sk = chan->sk;
@@ -3022,12 +3086,18 @@
rsp.scid = cpu_to_le16(chan->dcid);
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
+ lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
-
- l2cap_chan_del(chan, ECONNRESET);
release_sock(sk);
+ l2cap_chan_del(chan, ECONNRESET);
+
+ l2cap_chan_unlock(chan);
+
chan->ops->close(chan->data);
+
+ mutex_unlock(&conn->chan_lock);
+
return 0;
}
@@ -3036,23 +3106,30 @@
struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
u16 dcid, scid;
struct l2cap_chan *chan;
- struct sock *sk;
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
- chan = l2cap_get_chan_by_scid(conn, scid);
- if (!chan)
- return 0;
+ mutex_lock(&conn->chan_lock);
- sk = chan->sk;
+ chan = __l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
+ l2cap_chan_lock(chan);
l2cap_chan_del(chan, 0);
- release_sock(sk);
+
+ l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+
+ mutex_unlock(&conn->chan_lock);
+
return 0;
}
@@ -3132,7 +3209,8 @@
return 0;
}
- if (type == L2CAP_IT_FEAT_MASK) {
+ switch (type) {
+ case L2CAP_IT_FEAT_MASK:
conn->feat_mask = get_unaligned_le32(rsp->data);
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
@@ -3149,11 +3227,15 @@
l2cap_conn_start(conn);
}
- } else if (type == L2CAP_IT_FIXED_CHAN) {
+ break;
+
+ case L2CAP_IT_FIXED_CHAN:
+ conn->fixed_chan_mask = rsp->data[0];
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
l2cap_conn_start(conn);
+ break;
}
return 0;
@@ -3713,19 +3795,11 @@
static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
{
- u32 control;
-
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- control = __set_reqseq(chan, chan->buffer_seq);
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- l2cap_send_sframe(chan, control);
-
- set_bit(CONN_RNR_SENT, &chan->conn_state);
-
- __clear_ack_timer(chan);
+ __set_ack_timer(chan);
}
static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
@@ -3865,8 +3939,11 @@
goto drop;
}
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
+ l2cap_send_ack(chan);
goto drop;
+ }
if (tx_seq == chan->expected_tx_seq)
goto expected;
@@ -3927,15 +4004,15 @@
__skb_queue_head_init(&chan->srej_q);
l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
- set_bit(CONN_SEND_PBIT, &chan->conn_state);
+ /* Set P-bit only if there are some I-frames to ack. */
+ if (__clear_ack_timer(chan))
+ set_bit(CONN_SEND_PBIT, &chan->conn_state);
err = l2cap_send_srejframe(chan, tx_seq);
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, -err);
return err;
}
-
- __clear_ack_timer(chan);
}
return 0;
@@ -4135,9 +4212,8 @@
return 0;
}
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
+static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
u32 control;
u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
@@ -4205,7 +4281,6 @@
static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
{
struct l2cap_chan *chan;
- struct sock *sk = NULL;
u32 control;
u16 tx_seq;
int len;
@@ -4213,10 +4288,12 @@
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
BT_DBG("unknown cid 0x%4.4x", cid);
- goto drop;
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return 0;
}
- sk = chan->sk;
+ l2cap_chan_lock(chan);
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4238,7 +4315,7 @@
break;
case L2CAP_MODE_ERTM:
- l2cap_ertm_data_rcv(sk, skb);
+ l2cap_ertm_data_rcv(chan, skb);
goto done;
@@ -4287,26 +4364,20 @@
kfree_skb(skb);
done:
- if (sk)
- release_sock(sk);
+ l2cap_chan_unlock(chan);
return 0;
}
static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
{
- struct sock *sk = NULL;
struct l2cap_chan *chan;
chan = l2cap_global_chan_by_psm(0, psm, conn->src);
if (!chan)
goto drop;
- sk = chan->sk;
-
- lock_sock(sk);
-
- BT_DBG("sk %p, len %d", sk, skb->len);
+ BT_DBG("chan %p, len %d", chan, skb->len);
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
@@ -4315,31 +4386,23 @@
goto drop;
if (!chan->ops->recv(chan->data, skb))
- goto done;
+ return 0;
drop:
kfree_skb(skb);
-done:
- if (sk)
- release_sock(sk);
return 0;
}
static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
{
- struct sock *sk = NULL;
struct l2cap_chan *chan;
chan = l2cap_global_chan_by_scid(0, cid, conn->src);
if (!chan)
goto drop;
- sk = chan->sk;
-
- lock_sock(sk);
-
- BT_DBG("sk %p, len %d", sk, skb->len);
+ BT_DBG("chan %p, len %d", chan, skb->len);
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
@@ -4348,14 +4411,11 @@
goto drop;
if (!chan->ops->recv(chan->data, skb))
- goto done;
+ return 0;
drop:
kfree_skb(skb);
-done:
- if (sk)
- release_sock(sk);
return 0;
}
@@ -4479,8 +4539,7 @@
if (encrypt == 0x00) {
if (chan->sec_level == BT_SECURITY_MEDIUM) {
__clear_chan_timer(chan);
- __set_chan_timer(chan,
- msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
+ __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
} else if (chan->sec_level == BT_SECURITY_HIGH)
l2cap_chan_close(chan, ECONNREFUSED);
} else {
@@ -4504,57 +4563,49 @@
cancel_delayed_work(&conn->security_timer);
}
- rcu_read_lock();
+ mutex_lock(&conn->chan_lock);
- list_for_each_entry_rcu(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
-
- bh_lock_sock(sk);
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ l2cap_chan_lock(chan);
BT_DBG("chan->scid %d", chan->scid);
if (chan->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
chan->sec_level = hcon->sec_level;
- l2cap_chan_ready(sk);
+ l2cap_chan_ready(chan);
}
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
if (!status && (chan->state == BT_CONNECTED ||
chan->state == BT_CONFIG)) {
l2cap_check_encryption(chan, encrypt);
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
if (chan->state == BT_CONNECT) {
if (!status) {
- struct l2cap_conn_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
-
- chan->ident = l2cap_get_ident(conn);
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
-
- l2cap_send_cmd(conn, chan->ident,
- L2CAP_CONN_REQ, sizeof(req), &req);
+ l2cap_send_conn_req(chan);
} else {
__clear_chan_timer(chan);
- __set_chan_timer(chan,
- msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
} else if (chan->state == BT_CONNECT2) {
+ struct sock *sk = chan->sk;
struct l2cap_conn_rsp rsp;
__u16 res, stat;
+ lock_sock(sk);
+
if (!status) {
if (bt_sk(sk)->defer_setup) {
struct sock *parent = bt_sk(sk)->parent;
@@ -4563,18 +4614,19 @@
if (parent)
parent->sk_data_ready(parent, 0);
} else {
- l2cap_state_change(chan, BT_CONFIG);
+ __l2cap_state_change(chan, BT_CONFIG);
res = L2CAP_CR_SUCCESS;
stat = L2CAP_CS_NO_INFO;
}
} else {
- l2cap_state_change(chan, BT_DISCONN);
- __set_chan_timer(chan,
- msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
+ __l2cap_state_change(chan, BT_DISCONN);
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
res = L2CAP_CR_SEC_BLOCK;
stat = L2CAP_CS_NO_INFO;
}
+ release_sock(sk);
+
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(res);
@@ -4583,10 +4635,10 @@
sizeof(rsp), &rsp);
}
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
}
- rcu_read_unlock();
+ mutex_unlock(&conn->chan_lock);
return 0;
}
@@ -4647,6 +4699,7 @@
if (chan && chan->sk) {
struct sock *sk = chan->sk;
+ lock_sock(sk);
if (chan->imtu < len - L2CAP_HDR_SIZE) {
BT_ERR("Frame exceeding recv MTU (len %d, "
@@ -4717,7 +4770,7 @@
c->state, __le16_to_cpu(c->psm),
c->scid, c->dcid, c->imtu, c->omtu,
c->sec_level, c->mode);
-}
+ }
read_unlock(&chan_list_lock);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 401d942..c4fe583 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -125,13 +125,15 @@
err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
if (err)
- goto done;
+ return err;
+
+ lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
- if (sock_owned_by_user(sk))
- release_sock(sk);
+
+ release_sock(sk);
+
return err;
}
@@ -783,7 +785,7 @@
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->sk_state);
+ BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
/* Kill poor orphan */
@@ -795,7 +797,8 @@
static int l2cap_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_chan *chan;
+ struct l2cap_conn *conn;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -803,13 +806,24 @@
if (!sk)
return 0;
+ chan = l2cap_pi(sk)->chan;
+ conn = chan->conn;
+
+ if (conn)
+ mutex_lock(&conn->chan_lock);
+
+ l2cap_chan_lock(chan);
lock_sock(sk);
+
if (!sk->sk_shutdown) {
if (chan->mode == L2CAP_MODE_ERTM)
err = __l2cap_wait_ack(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
+
+ release_sock(sk);
l2cap_chan_close(chan, 0);
+ lock_sock(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -820,6 +834,11 @@
err = -sk->sk_err;
release_sock(sk);
+ l2cap_chan_unlock(chan);
+
+ if (conn)
+ mutex_unlock(&conn->chan_lock);
+
return err;
}
@@ -862,8 +881,12 @@
struct sock *sk = data;
struct l2cap_pinfo *pi = l2cap_pi(sk);
- if (pi->rx_busy_skb)
- return -ENOMEM;
+ lock_sock(sk);
+
+ if (pi->rx_busy_skb) {
+ err = -ENOMEM;
+ goto done;
+ }
err = sock_queue_rcv_skb(sk, skb);
@@ -882,6 +905,9 @@
err = 0;
}
+done:
+ release_sock(sk);
+
return err;
}
@@ -899,12 +925,22 @@
sk->sk_state = state;
}
+static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb,
+ int *err)
+{
+ struct sock *sk = chan->sk;
+
+ return bt_skb_send_alloc(sk, len, nb, err);
+}
+
static struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
.recv = l2cap_sock_recv_cb,
.close = l2cap_sock_close_cb,
.state_change = l2cap_sock_state_change_cb,
+ .alloc_skb = l2cap_sock_alloc_skb_cb,
};
static void l2cap_sock_destruct(struct sock *sk)
@@ -1004,7 +1040,7 @@
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 86a6bed..5066288 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -24,6 +24,8 @@
/* Bluetooth kernel library. */
+#define pr_fmt(fmt) "Bluetooth: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -151,7 +153,7 @@
}
EXPORT_SYMBOL(bt_to_errno);
-int bt_printk(const char *level, const char *format, ...)
+int bt_info(const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -162,10 +164,29 @@
vaf.fmt = format;
vaf.va = &args;
- r = printk("%sBluetooth: %pV\n", level, &vaf);
+ r = pr_info("%pV", &vaf);
va_end(args);
return r;
}
-EXPORT_SYMBOL(bt_printk);
+EXPORT_SYMBOL(bt_info);
+
+int bt_err(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = pr_err("%pV", &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index bc8e59d..7fcff88 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1,6 +1,8 @@
/*
BlueZ - Bluetooth protocol stack for Linux
+
Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -32,12 +34,92 @@
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
-#define MGMT_VERSION 0
-#define MGMT_REVISION 1
+bool enable_hs;
+bool enable_le;
-#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+#define MGMT_VERSION 1
+#define MGMT_REVISION 0
-#define SERVICE_CACHE_TIMEOUT (5 * 1000)
+static const u16 mgmt_commands[] = {
+ MGMT_OP_READ_INDEX_LIST,
+ MGMT_OP_READ_INFO,
+ MGMT_OP_SET_POWERED,
+ MGMT_OP_SET_DISCOVERABLE,
+ MGMT_OP_SET_CONNECTABLE,
+ MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_OP_SET_PAIRABLE,
+ MGMT_OP_SET_LINK_SECURITY,
+ MGMT_OP_SET_SSP,
+ MGMT_OP_SET_HS,
+ MGMT_OP_SET_LE,
+ MGMT_OP_SET_DEV_CLASS,
+ MGMT_OP_SET_LOCAL_NAME,
+ MGMT_OP_ADD_UUID,
+ MGMT_OP_REMOVE_UUID,
+ MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_OP_DISCONNECT,
+ MGMT_OP_GET_CONNECTIONS,
+ MGMT_OP_PIN_CODE_REPLY,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_OP_PAIR_DEVICE,
+ MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_OP_UNPAIR_DEVICE,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ MGMT_OP_START_DISCOVERY,
+ MGMT_OP_STOP_DISCOVERY,
+ MGMT_OP_CONFIRM_NAME,
+ MGMT_OP_BLOCK_DEVICE,
+ MGMT_OP_UNBLOCK_DEVICE,
+};
+
+static const u16 mgmt_events[] = {
+ MGMT_EV_CONTROLLER_ERROR,
+ MGMT_EV_INDEX_ADDED,
+ MGMT_EV_INDEX_REMOVED,
+ MGMT_EV_NEW_SETTINGS,
+ MGMT_EV_CLASS_OF_DEV_CHANGED,
+ MGMT_EV_LOCAL_NAME_CHANGED,
+ MGMT_EV_NEW_LINK_KEY,
+ MGMT_EV_NEW_LONG_TERM_KEY,
+ MGMT_EV_DEVICE_CONNECTED,
+ MGMT_EV_DEVICE_DISCONNECTED,
+ MGMT_EV_CONNECT_FAILED,
+ MGMT_EV_PIN_CODE_REQUEST,
+ MGMT_EV_USER_CONFIRM_REQUEST,
+ MGMT_EV_USER_PASSKEY_REQUEST,
+ MGMT_EV_AUTH_FAILED,
+ MGMT_EV_DEVICE_FOUND,
+ MGMT_EV_DISCOVERING,
+ MGMT_EV_DEVICE_BLOCKED,
+ MGMT_EV_DEVICE_UNBLOCKED,
+ MGMT_EV_DEVICE_UNPAIRED,
+};
+
+/*
+ * These LE scan and inquiry parameters were chosen according to LE General
+ * Discovery Procedure specification.
+ */
+#define LE_SCAN_TYPE 0x01
+#define LE_SCAN_WIN 0x12
+#define LE_SCAN_INT 0x12
+#define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
+#define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
+
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
+
+#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
+
+#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
struct pending_cmd {
struct list_head list;
@@ -151,8 +233,8 @@
return err;
}
-static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
- size_t rp_len)
+static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ void *rp, size_t rp_len)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
@@ -173,6 +255,7 @@
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
put_unaligned_le16(cmd, &ev->opcode);
+ ev->status = status;
if (rp)
memcpy(ev->data, rp, rp_len);
@@ -181,10 +264,11 @@
if (err < 0)
kfree_skb(skb);
- return err;;
+ return err;
}
-static int read_version(struct sock *sk)
+static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
struct mgmt_rp_read_version rp;
@@ -193,11 +277,46 @@
rp.version = MGMT_VERSION;
put_unaligned_le16(MGMT_REVISION, &rp.revision);
- return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
- sizeof(rp));
+ return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
+ sizeof(rp));
}
-static int read_index_list(struct sock *sk)
+static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct mgmt_rp_read_commands *rp;
+ u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ u16 num_events = ARRAY_SIZE(mgmt_events);
+ u16 *opcode;
+ size_t rp_size;
+ int i, err;
+
+ BT_DBG("sock %p", sk);
+
+ rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ put_unaligned_le16(num_commands, &rp->num_commands);
+ put_unaligned_le16(num_events, &rp->num_events);
+
+ for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
+ put_unaligned_le16(mgmt_commands[i], opcode);
+
+ for (i = 0; i < num_events; i++, opcode++)
+ put_unaligned_le16(mgmt_events[i], opcode);
+
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
+ rp_size);
+ kfree(rp);
+
+ return err;
+}
+
+static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
@@ -226,10 +345,7 @@
i = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
- cancel_delayed_work(&d->power_off);
-
- if (test_bit(HCI_SETUP, &d->flags))
+ if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
put_unaligned_le16(d->id, &rp->index[i++]);
@@ -238,8 +354,8 @@
read_unlock(&hci_dev_list_lock);
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
- rp_len);
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
+ rp_len);
kfree(rp);
@@ -264,8 +380,13 @@
settings |= MGMT_SETTING_LINK_SECURITY;
}
- if (hdev->features[4] & LMP_LE)
- settings |= MGMT_SETTING_LE;
+ if (enable_hs)
+ settings |= MGMT_SETTING_HS;
+
+ if (enable_le) {
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
+ }
return settings;
}
@@ -274,47 +395,36 @@
{
u32 settings = 0;
- if (test_bit(HCI_UP, &hdev->flags))
+ if (hdev_is_powered(hdev))
settings |= MGMT_SETTING_POWERED;
- else
- return settings;
- if (test_bit(HCI_PSCAN, &hdev->flags))
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
settings |= MGMT_SETTING_CONNECTABLE;
- if (test_bit(HCI_ISCAN, &hdev->flags))
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
settings |= MGMT_SETTING_DISCOVERABLE;
- if (test_bit(HCI_PAIRABLE, &hdev->flags))
+ if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
settings |= MGMT_SETTING_PAIRABLE;
if (!(hdev->features[4] & LMP_NO_BREDR))
settings |= MGMT_SETTING_BREDR;
- if (hdev->host_features[0] & LMP_HOST_LE)
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
settings |= MGMT_SETTING_LE;
- if (test_bit(HCI_AUTH, &hdev->flags))
+ if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
settings |= MGMT_SETTING_LINK_SECURITY;
- if (hdev->ssp_mode > 0)
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
settings |= MGMT_SETTING_SSP;
+ if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_HS;
+
return settings;
}
-#define EIR_FLAGS 0x01 /* flags */
-#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
-#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
-#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
-#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
-#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
-#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
-#define EIR_NAME_SHORT 0x08 /* shortened local name */
-#define EIR_NAME_COMPLETE 0x09 /* complete local name */
-#define EIR_TX_POWER 0x0A /* transmit power level */
-#define EIR_DEVICE_ID 0x10 /* device ID */
-
#define PNP_INFO_SVCLASS_ID 0x1200
static u8 bluetooth_base_uuid[] = {
@@ -425,13 +535,16 @@
{
struct hci_cp_write_eir cp;
+ if (!hdev_is_powered(hdev))
+ return 0;
+
if (!(hdev->features[6] & LMP_EXT_INQ))
return 0;
- if (hdev->ssp_mode == 0)
+ if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
return 0;
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
return 0;
memset(&cp, 0, sizeof(cp));
@@ -460,10 +573,14 @@
static int update_class(struct hci_dev *hdev)
{
u8 cod[3];
+ int err;
BT_DBG("%s", hdev->name);
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
return 0;
cod[0] = hdev->minor_class;
@@ -473,15 +590,19 @@
if (memcmp(cod, hdev->dev_class, 3) == 0)
return 0;
- return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+ if (err == 0)
+ set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
+
+ return err;
}
static void service_cache_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
- service_cache.work);
+ service_cache.work);
- if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
return;
hci_dev_lock(hdev);
@@ -492,36 +613,30 @@
hci_dev_unlock(hdev);
}
-static void mgmt_init_hdev(struct hci_dev *hdev)
+static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
{
- if (!test_and_set_bit(HCI_MGMT, &hdev->flags))
- INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+ if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
- if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags))
- schedule_delayed_work(&hdev->service_cache,
- msecs_to_jiffies(SERVICE_CACHE_TIMEOUT));
+ INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+
+ /* Non-mgmt controlled devices get this bit set
+ * implicitly so that pairing works for them, however
+ * for mgmt we require user-space to explicitly enable
+ * it
+ */
+ clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
}
-static int read_controller_info(struct sock *sk, u16 index)
+static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
{
struct mgmt_rp_read_info rp;
- struct hci_dev *hdev;
- BT_DBG("sock %p hci%u", sk, index);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_READ_INFO,
- MGMT_STATUS_INVALID_PARAMS);
-
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
- cancel_delayed_work_sync(&hdev->power_off);
+ BT_DBG("sock %p %s", sk, hdev->name);
hci_dev_lock(hdev);
- if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
- mgmt_init_hdev(hdev);
-
memset(&rp, 0, sizeof(rp));
bacpy(&rp.bdaddr, &hdev->bdaddr);
@@ -536,11 +651,12 @@
memcpy(rp.dev_class, hdev->dev_class, 3);
memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
+ memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
- return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+ return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
+ sizeof(rp));
}
static void mgmt_pending_free(struct pending_cmd *cmd)
@@ -551,8 +667,8 @@
}
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- struct hci_dev *hdev,
- void *data, u16 len)
+ struct hci_dev *hdev, void *data,
+ u16 len)
{
struct pending_cmd *cmd;
@@ -581,8 +697,8 @@
}
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
- void (*cb)(struct pending_cmd *cmd, void *data),
- void *data)
+ void (*cb)(struct pending_cmd *cmd, void *data),
+ void *data)
{
struct list_head *p, *n;
@@ -620,40 +736,39 @@
{
__le32 settings = cpu_to_le32(get_current_settings(hdev));
- return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings));
+ return cmd_complete(sk, hdev->id, opcode, 0, &settings,
+ sizeof(settings));
}
-static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
+ struct mgmt_mode *cp = data;
struct pending_cmd *cmd;
- int err, up;
+ int err;
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_POWERED,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_POWERED,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
- up = test_bit(HCI_UP, &hdev->flags);
- if ((cp->val && up) || (!cp->val && !up)) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ cancel_delayed_work(&hdev->power_off);
+
+ if (cp->val) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
+ mgmt_powered(hdev, 1);
+ goto failed;
+ }
+ }
+
+ if (!!cp->val == hdev_is_powered(hdev)) {
err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
goto failed;
}
if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -672,49 +787,115 @@
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
-static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
+ struct sock *skip_sk)
{
- struct mgmt_cp_set_discoverable *cp;
- struct hci_dev *hdev;
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(event);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+ hdr->len = cpu_to_le16(data_len);
+
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ /* Time stamp */
+ __net_timestamp(skb);
+
+ hci_send_to_control(skb, skip_sk);
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int new_settings(struct hci_dev *hdev, struct sock *skip)
+{
+ __le32 ev;
+
+ ev = cpu_to_le32(get_current_settings(hdev));
+
+ return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
+}
+
+static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_discoverable *cp = data;
struct pending_cmd *cmd;
+ u16 timeout;
u8 scan;
int err;
- cp = (void *) data;
+ BT_DBG("request for %s", hdev->name);
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ timeout = get_unaligned_le16(&cp->timeout);
+ if (!cp->val && timeout > 0)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev) && timeout > 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
- if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
- test_bit(HCI_PSCAN, &hdev->flags)) {
+ if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_REJECTED);
+ goto failed;
+ }
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
+ change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ }
+
+ if (cp->val && timeout > 0) {
+ hdev->discov_timeout = timeout;
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ msecs_to_jiffies(hdev->discov_timeout * 1000));
+ }
+
err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
goto failed;
}
@@ -737,53 +918,56 @@
mgmt_pending_remove(cmd);
if (cp->val)
- hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+ hdev->discov_timeout = timeout;
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
+ struct mgmt_mode *cp = data;
struct pending_cmd *cmd;
u8 scan;
int err;
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+
+ if (cp->val) {
+ set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ } else {
+ clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
goto failed;
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
- if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+ if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
goto failed;
}
@@ -794,116 +978,282 @@
goto failed;
}
- if (cp->val)
+ if (cp->val) {
scan = SCAN_PAGE;
- else
+ } else {
scan = 0;
+ if (test_bit(HCI_ISCAN, &hdev->flags) &&
+ hdev->discov_timeout > 0)
+ cancel_delayed_work(&hdev->discov_off);
+ }
+
err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
- u16 data_len, struct sock *skip_sk)
+static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
-
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
-
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- if (hdev)
- hdr->index = cpu_to_le16(hdev->id);
- else
- hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
- hdr->len = cpu_to_le16(data_len);
-
- if (data)
- memcpy(skb_put(skb, data_len), data, data_len);
-
- hci_send_to_sock(NULL, skb, skip_sk);
- kfree_skb(skb);
-
- return 0;
-}
-
-static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
- __le32 ev;
+ struct mgmt_mode *cp = data;
int err;
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
if (cp->val)
- set_bit(HCI_PAIRABLE, &hdev->flags);
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
else
- clear_bit(HCI_PAIRABLE, &hdev->flags);
+ clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
if (err < 0)
goto failed;
- ev = cpu_to_le32(get_current_settings(hdev));
-
- err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk);
+ err = new_settings(hdev, sk);
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct mgmt_cp_add_uuid *cp;
- struct hci_dev *hdev;
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 val;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags)) {
+ change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ val = !!cp->val;
+
+ if (test_bit(HCI_AUTH, &hdev->flags) == val) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 val;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_NOT_SUPPORTED);
+ goto failed;
+ }
+
+ val = !!cp->val;
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!enable_hs)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val)
+ set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ else
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+
+ return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+}
+
+static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct hci_cp_write_le_host_supported hci_cp;
+ struct pending_cmd *cmd;
+ int err;
+ u8 val, enabled;
+
+ BT_DBG("request for %s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!enable_le || !(hdev->features[4] & LMP_LE)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_NOT_SUPPORTED);
+ goto unlock;
+ }
+
+ val = !!cp->val;
+ enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
+
+ if (!hdev_is_powered(hdev) || val == enabled) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ memset(&hci_cp, 0, sizeof(hci_cp));
+
+ if (val) {
+ hci_cp.le = val;
+ hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
+ &hci_cp);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto unlock;
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_cp_add_uuid *cp = data;
+ struct pending_cmd *cmd;
struct bt_uuid *uuid;
int err;
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ADD_UUID,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ADD_UUID,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
+ if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
if (!uuid) {
err = -ENOMEM;
@@ -923,41 +1273,65 @@
if (err < 0)
goto failed;
- err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
+ if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
+ hdev->dev_class, 3);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static bool enable_service_cache(struct hci_dev *hdev)
{
+ if (!hdev_is_powered(hdev))
+ return false;
+
+ if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
+ return true;
+ }
+
+ return false;
+}
+
+static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_remove_uuid *cp = data;
+ struct pending_cmd *cmd;
struct list_head *p, *n;
- struct mgmt_cp_remove_uuid *cp;
- struct hci_dev *hdev;
u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int err, found;
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
+ if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
err = hci_uuids_clear(hdev);
- goto unlock;
+
+ if (enable_service_cache(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ 0, hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ goto update_class;
}
found = 0;
@@ -973,11 +1347,12 @@
}
if (found == 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_INVALID_PARAMS);
+ err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
+update_class:
err = update_class(hdev);
if (err < 0)
goto unlock;
@@ -986,41 +1361,50 @@
if (err < 0)
goto unlock;
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
+ if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
unlock:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_set_dev_class *cp;
+ struct mgmt_cp_set_dev_class *cp = data;
+ struct pending_cmd *cmd;
int err;
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
+ if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
hdev->major_class = cp->major;
hdev->minor_class = cp->minor;
- if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) {
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
hci_dev_unlock(hdev);
cancel_delayed_work_sync(&hdev->service_cache);
hci_dev_lock(hdev);
@@ -1028,30 +1412,33 @@
}
err = update_class(hdev);
+ if (err < 0)
+ goto unlock;
- if (err == 0)
- err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
+ if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+unlock:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
+static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_load_link_keys *cp;
+ struct mgmt_cp_load_link_keys *cp = data;
u16 key_count, expected_len;
int i;
- cp = (void *) data;
-
- if (len < sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
-
key_count = get_unaligned_le16(&cp->key_count);
expected_len = sizeof(*cp) + key_count *
@@ -1059,92 +1446,103 @@
if (expected_len != len) {
BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
len, expected_len);
- return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
-
- BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
+ BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
key_count);
hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
- set_bit(HCI_LINK_KEYS, &hdev->flags);
+ set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
if (cp->debug_keys)
- set_bit(HCI_DEBUG_KEYS, &hdev->flags);
+ set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
else
- clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
+ clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
for (i = 0; i < key_count; i++) {
struct mgmt_link_key_info *key = &cp->keys[i];
- hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
- key->pin_len);
+ hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
+ key->type, key->pin_len);
}
- cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
+ cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
return 0;
}
-static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, struct sock *skip_sk)
{
- struct hci_dev *hdev;
- struct mgmt_cp_remove_keys *cp;
- struct mgmt_rp_remove_keys rp;
+ struct mgmt_ev_device_unpaired ev;
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = addr_type;
+
+ return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
+ skip_sk);
+}
+
+static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_unpair_device *cp = data;
+ struct mgmt_rp_unpair_device rp;
struct hci_cp_disconnect dc;
struct pending_cmd *cmd;
struct hci_conn *conn;
int err;
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
memset(&rp, 0, sizeof(rp));
- bacpy(&rp.bdaddr, &cp->bdaddr);
- rp.status = MGMT_STATUS_FAILED;
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- err = hci_remove_link_key(hdev, &cp->bdaddr);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ if (cp->addr.type == MGMT_ADDR_BREDR)
+ err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
+ else
+ err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
+
if (err < 0) {
- rp.status = MGMT_STATUS_NOT_PAIRED;
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
goto unlock;
}
- if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) {
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
- sizeof(rp));
- goto unlock;
+ if (cp->disconnect) {
+ if (cp->addr.type == MGMT_ADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
+ &cp->addr.bdaddr);
+ } else {
+ conn = NULL;
}
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (!conn) {
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
- sizeof(rp));
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
+ &rp, sizeof(rp));
+ device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp));
+ cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
+ sizeof(*cp));
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -1157,19 +1555,14 @@
mgmt_pending_remove(cmd);
unlock:
- if (err < 0)
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
- sizeof(rp));
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_disconnect *cp;
+ struct mgmt_cp_disconnect *cp = data;
struct hci_cp_disconnect dc;
struct pending_cmd *cmd;
struct hci_conn *conn;
@@ -1177,38 +1570,28 @@
BT_DBG("");
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_DISCONNECT,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_DISCONNECT,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_POWERED);
+ err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_BUSY);
goto failed;
}
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (!conn)
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
+ if (cp->addr.type == MGMT_ADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_CONNECTED);
+ err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
@@ -1227,8 +1610,6 @@
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
@@ -1251,41 +1632,42 @@
}
}
-static int get_connections(struct sock *sk, u16 index)
+static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
struct mgmt_rp_get_connections *rp;
- struct hci_dev *hdev;
struct hci_conn *c;
- struct list_head *p;
size_t rp_len;
- u16 count;
- int i, err;
+ int err;
+ u16 i;
BT_DBG("");
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
- count = 0;
- list_for_each(p, &hdev->conn_hash.list) {
- count++;
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
}
- rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
+ i = 0;
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
+ i++;
+ }
+
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
rp = kmalloc(rp_len, GFP_ATOMIC);
if (!rp) {
err = -ENOMEM;
goto unlock;
}
- put_unaligned_le16(count, &rp->conn_count);
-
i = 0;
list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
+ continue;
bacpy(&rp->addr[i].bdaddr, &c->dst);
rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
if (rp->addr[i].type == MGMT_ADDR_INVALID)
@@ -1293,85 +1675,77 @@
i++;
}
+ put_unaligned_le16(i, &rp->conn_count);
+
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
- err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
+ rp_len);
+
+ kfree(rp);
unlock:
- kfree(rp);
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
-static int send_pin_code_neg_reply(struct sock *sk, u16 index,
- struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
+static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_pin_code_neg_reply *cp)
{
struct pending_cmd *cmd;
int err;
cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
- sizeof(*cp));
+ sizeof(*cp));
if (!cmd)
return -ENOMEM;
- err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
- &cp->bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+ sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
if (err < 0)
mgmt_pending_remove(cmd);
return err;
}
-static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
struct hci_conn *conn;
- struct mgmt_cp_pin_code_reply *cp;
- struct mgmt_cp_pin_code_neg_reply ncp;
+ struct mgmt_cp_pin_code_reply *cp = data;
struct hci_cp_pin_code_reply reply;
struct pending_cmd *cmd;
int err;
BT_DBG("");
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_NOT_CONNECTED);
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
- bacpy(&ncp.bdaddr, &cp->bdaddr);
+ struct mgmt_cp_pin_code_neg_reply ncp;
+
+ memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
BT_ERR("PIN code is not 16 bytes long");
- err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
+ err = send_pin_code_neg_reply(sk, hdev, &ncp);
if (err >= 0)
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
@@ -1382,7 +1756,7 @@
goto failed;
}
- bacpy(&reply.bdaddr, &cp->bdaddr);
+ bacpy(&reply.bdaddr, &cp->addr.bdaddr);
reply.pin_len = cp->pin_len;
memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
@@ -1392,67 +1766,39 @@
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_pin_code_neg_reply *cp;
+ struct mgmt_cp_pin_code_neg_reply *cp = data;
int err;
BT_DBG("");
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- err = send_pin_code_neg_reply(sk, index, hdev, cp);
+ err = send_pin_code_neg_reply(sk, hdev, cp);
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_set_io_capability *cp;
+ struct mgmt_cp_set_io_capability *cp = data;
BT_DBG("");
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
hdev->io_capability = cp->io_capability;
@@ -1461,9 +1807,9 @@
hdev->io_capability);
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
- return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
+ return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
+ 0);
}
static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
@@ -1491,9 +1837,9 @@
bacpy(&rp.addr.bdaddr, &conn->dst);
rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
- rp.status = status;
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
+ &rp, sizeof(rp));
/* So we don't get further callbacks for this connection */
conn->connect_cfm_cb = NULL;
@@ -1515,13 +1861,13 @@
if (!cmd)
BT_DBG("Unable to find a pending command");
else
- pairing_complete(cmd, status);
+ pairing_complete(cmd, mgmt_status(status));
}
-static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_pair_device *cp;
+ struct mgmt_cp_pair_device *cp = data;
struct mgmt_rp_pair_device rp;
struct pending_cmd *cmd;
u8 sec_level, auth_type;
@@ -1530,19 +1876,14 @@
BT_DBG("");
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
+ }
+
sec_level = BT_SECURITY_MEDIUM;
if (cp->io_cap == 0x03)
auth_type = HCI_AT_DEDICATED_BONDING;
@@ -1551,27 +1892,26 @@
if (cp->addr.type == MGMT_ADDR_BREDR)
conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ auth_type);
else
conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ auth_type);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp.addr.type = cp->addr.type;
if (IS_ERR(conn)) {
- rp.status = -PTR_ERR(conn);
- err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
- &rp, sizeof(rp));
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_CONNECT_FAILED, &rp,
+ sizeof(rp));
goto unlock;
}
if (conn->connect_cfm_cb) {
hci_conn_put(conn);
- rp.status = EBUSY;
- err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
- &rp, sizeof(rp));
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto unlock;
}
@@ -1599,58 +1939,88 @@
unlock:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
- u16 mgmt_op, u16 hci_op, __le32 passkey)
+static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
+ struct mgmt_addr_info *addr = data;
struct pending_cmd *cmd;
- struct hci_dev *hdev;
struct hci_conn *conn;
int err;
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, mgmt_op,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("");
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
+ if (!cmd) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ conn = cmd->user_data;
+
+ if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ pairing_complete(cmd, MGMT_STATUS_CANCELLED);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
+ addr, sizeof(*addr));
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
+ u16 hci_op, __le32 passkey)
+{
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_POWERED);
goto done;
}
- /*
- * Check for an existing ACL link, if present pair via
- * HCI commands.
- *
- * If no ACL link is present, check for an LE link and if
- * present, pair via the SMP engine.
- *
- * If neither ACL nor LE links are present, fail with error.
- */
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
- if (!conn) {
+ if (type == MGMT_ADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+ else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
- if (!conn) {
- err = cmd_status(sk, index, mgmt_op,
- MGMT_STATUS_NOT_CONNECTED);
- goto done;
- }
+ if (!conn) {
+ err = cmd_status(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_CONNECTED);
+ goto done;
+ }
+
+ if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
if (!err)
- err = cmd_status(sk, index, mgmt_op,
- MGMT_STATUS_SUCCESS);
+ err = cmd_status(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_SUCCESS);
else
- err = cmd_status(sk, index, mgmt_op,
- MGMT_STATUS_FAILED);
+ err = cmd_status(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_FAILED);
goto done;
}
@@ -1676,144 +2046,137 @@
done:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
+static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+ struct mgmt_cp_user_confirm_reply *cp = data;
BT_DBG("");
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
+ return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
- return user_pairing_resp(sk, index, &cp->bdaddr,
- MGMT_OP_USER_CONFIRM_REPLY,
- HCI_OP_USER_CONFIRM_REPLY, 0);
+ return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ HCI_OP_USER_CONFIRM_REPLY, 0);
}
-static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
- u16 len)
+static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
struct mgmt_cp_user_confirm_neg_reply *cp = data;
BT_DBG("");
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
- return user_pairing_resp(sk, index, &cp->bdaddr,
- MGMT_OP_USER_CONFIRM_NEG_REPLY,
- HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+ return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
}
-static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len)
+static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct mgmt_cp_user_passkey_reply *cp = (void *) data;
+ struct mgmt_cp_user_passkey_reply *cp = data;
BT_DBG("");
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY,
- EINVAL);
-
- return user_pairing_resp(sk, index, &cp->bdaddr,
- MGMT_OP_USER_PASSKEY_REPLY,
- HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+ return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
}
-static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data,
- u16 len)
+static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data;
+ struct mgmt_cp_user_passkey_neg_reply *cp = data;
BT_DBG("");
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY,
- EINVAL);
-
- return user_pairing_resp(sk, index, &cp->bdaddr,
- MGMT_OP_USER_PASSKEY_NEG_REPLY,
- HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+ return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
}
-static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int update_name(struct hci_dev *hdev, const char *name)
{
- struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
- struct hci_cp_write_local_name hci_cp;
- struct hci_dev *hdev;
+ struct hci_cp_write_local_name cp;
+
+ memcpy(cp.name, name, sizeof(cp.name));
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
+}
+
+static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_local_name *cp = data;
struct pending_cmd *cmd;
int err;
BT_DBG("");
- if (len != sizeof(*mgmt_cp))
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
- MGMT_STATUS_INVALID_PARAMS);
-
hci_dev_lock(hdev);
+ memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
+
+ if (!hdev_is_powered(hdev)) {
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
+ if (err < 0)
+ goto failed;
+
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
+ sk);
+
+ goto failed;
+ }
+
cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
- err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
- &hci_cp);
+ err = update_name(hdev, cp->name);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int read_local_oob_data(struct sock *sk, u16 index)
+static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
{
- struct hci_dev *hdev;
struct pending_cmd *cmd;
int err;
- BT_DBG("hci%u", index);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_NOT_SUPPORTED);
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
}
if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_BUSY);
goto unlock;
}
@@ -1829,104 +2192,112 @@
unlock:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
+ struct mgmt_cp_add_remote_oob_data *cp = data;
+ u8 status;
int err;
- BT_DBG("hci%u ", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s ", hdev->name);
hci_dev_lock(hdev);
- err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
- cp->randomizer);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_NOT_POWERED, &cp->addr,
+ sizeof(cp->addr));
+ goto unlock;
+ }
+
+ err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
+ cp->randomizer);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_FAILED);
+ status = MGMT_STATUS_FAILED;
else
- err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
- 0);
+ status = 0;
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
+ &cp->addr, sizeof(cp->addr));
+
+unlock:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int remove_remote_oob_data(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
+static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
+ struct mgmt_cp_remove_remote_oob_data *cp = data;
+ u8 status;
int err;
- BT_DBG("hci%u ", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ MGMT_STATUS_NOT_POWERED, &cp->addr,
+ sizeof(cp->addr));
+ goto unlock;
+ }
+
+ err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
+ status = MGMT_STATUS_INVALID_PARAMS;
else
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- NULL, 0);
+ status = 0;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+int mgmt_interleaved_discovery(struct hci_dev *hdev)
+{
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
+ if (err < 0)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
-static int start_discovery(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
+static int start_discovery(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct mgmt_cp_start_discovery *cp = (void *) data;
+ struct mgmt_cp_start_discovery *cp = data;
struct pending_cmd *cmd;
- struct hci_dev *hdev;
int err;
- BT_DBG("hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_POWERED);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ if (hdev->discovery.state != DISCOVERY_STOPPED) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1936,137 +2307,217 @@
goto failed;
}
- err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
+ hdev->discovery.type = cp->type;
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ if (lmp_bredr_capable(hdev))
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
+ else
+ err = -ENOTSUPP;
+ break;
+
+ case DISCOV_TYPE_LE:
+ if (lmp_host_le_capable(hdev))
+ err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
+ LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
+ else
+ err = -ENOTSUPP;
+ break;
+
+ case DISCOV_TYPE_INTERLEAVED:
+ if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
+ err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
+ LE_SCAN_WIN,
+ LE_SCAN_TIMEOUT_BREDR_LE);
+ else
+ err = -ENOTSUPP;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
if (err < 0)
mgmt_pending_remove(cmd);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int stop_discovery(struct sock *sk, u16 index)
+static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
+ struct mgmt_cp_stop_discovery *mgmt_cp = data;
struct pending_cmd *cmd;
+ struct hci_cp_remote_name_req_cancel cp;
+ struct inquiry_entry *e;
int err;
- BT_DBG("hci%u", index);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
+ if (!hci_discovery_active(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_REJECTED, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ goto unlock;
+ }
+
+ if (hdev->discovery.type != mgmt_cp->type) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ goto unlock;
+ }
+
cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (hdev->discovery.state == DISCOVERY_FINDING) {
+ err = hci_cancel_inquiry(hdev);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ goto unlock;
+ }
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
+ if (!e) {
+ mgmt_pending_remove(cmd);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type, sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
+ &cp);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_confirm_name *cp = data;
+ struct inquiry_entry *e;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hci_discovery_active(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_FAILED);
goto failed;
}
- err = hci_cancel_inquiry(hdev);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
+ if (!e) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto failed;
+ }
+
+ if (cp->name_known) {
+ e->name_state = NAME_KNOWN;
+ list_del(&e->list);
+ } else {
+ e->name_state = NAME_NEEDED;
+ hci_inquiry_cache_update_resolve(hdev, e);
+ }
+
+ err = 0;
failed:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
-static int block_device(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_block_device *cp = (void *) data;
+ struct mgmt_cp_block_device *cp = data;
+ u8 status;
int err;
- BT_DBG("hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- err = hci_blacklist_add(hdev, &cp->bdaddr);
+ err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- MGMT_STATUS_FAILED);
+ status = MGMT_STATUS_FAILED;
else
- err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
- NULL, 0);
+ status = 0;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
-static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_unblock_device *cp = (void *) data;
+ struct mgmt_cp_unblock_device *cp = data;
+ u8 status;
int err;
- BT_DBG("hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- err = hci_blacklist_del(hdev, &cp->bdaddr);
-
+ err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
+ status = MGMT_STATUS_INVALID_PARAMS;
else
- err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- NULL, 0);
+ status = 0;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
-static int set_fast_connectable(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
+static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_mode *cp = (void *) data;
+ struct mgmt_mode *cp = data;
struct hci_cp_write_page_scan_activity acp;
u8 type;
int err;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ if (!hdev_is_powered(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_NOT_POWERED);
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_REJECTED);
hci_dev_lock(hdev);
@@ -2080,35 +2531,128 @@
acp.window = 0x0012; /* default 11.25 msec page scan window */
- err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
- sizeof(acp), &acp);
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
+ &acp);
if (err < 0) {
- err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
goto done;
}
err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
if (err < 0) {
- err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
goto done;
}
- err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- NULL, 0);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
+ NULL, 0);
done:
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
return err;
}
+static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ void *cp_data, u16 len)
+{
+ struct mgmt_cp_load_long_term_keys *cp = cp_data;
+ u16 key_count, expected_len;
+ int i;
+
+ key_count = get_unaligned_le16(&cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_ltk_info);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ EINVAL);
+ }
+
+ BT_DBG("%s key_count %u", hdev->name, key_count);
+
+ hci_dev_lock(hdev);
+
+ hci_smp_ltks_clear(hdev);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_ltk_info *key = &cp->keys[i];
+ u8 type;
+
+ if (key->master)
+ type = HCI_SMP_LTK;
+ else
+ type = HCI_SMP_LTK_SLAVE;
+
+ hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
+ type, 0, key->authenticated, key->val,
+ key->enc_size, key->ediv, key->rand);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+struct mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ bool var_len;
+ size_t data_len;
+} mgmt_handlers[] = {
+ { NULL }, /* 0x0000 (no command) */
+ { read_version, false, MGMT_READ_VERSION_SIZE },
+ { read_commands, false, MGMT_READ_COMMANDS_SIZE },
+ { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
+ { read_controller_info, false, MGMT_READ_INFO_SIZE },
+ { set_powered, false, MGMT_SETTING_SIZE },
+ { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
+ { set_connectable, false, MGMT_SETTING_SIZE },
+ { set_fast_connectable, false, MGMT_SETTING_SIZE },
+ { set_pairable, false, MGMT_SETTING_SIZE },
+ { set_link_security, false, MGMT_SETTING_SIZE },
+ { set_ssp, false, MGMT_SETTING_SIZE },
+ { set_hs, false, MGMT_SETTING_SIZE },
+ { set_le, false, MGMT_SETTING_SIZE },
+ { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
+ { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
+ { add_uuid, false, MGMT_ADD_UUID_SIZE },
+ { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
+ { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
+ { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
+ { disconnect, false, MGMT_DISCONNECT_SIZE },
+ { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
+ { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
+ { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
+ { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
+ { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
+ { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
+ { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
+ { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
+ { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
+ { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
+ { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
+ { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
+ { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
+ { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
+ { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
+ { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
+ { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
+ { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
+ { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+};
+
+
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
- unsigned char *buf;
+ void *buf;
+ u8 *cp;
struct mgmt_hdr *hdr;
u16 opcode, index, len;
+ struct hci_dev *hdev = NULL;
+ struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2125,7 +2669,7 @@
goto done;
}
- hdr = (struct mgmt_hdr *) buf;
+ hdr = buf;
opcode = get_unaligned_le16(&hdr->opcode);
index = get_unaligned_le16(&hdr->index);
len = get_unaligned_le16(&hdr->len);
@@ -2135,117 +2679,54 @@
goto done;
}
- switch (opcode) {
- case MGMT_OP_READ_VERSION:
- err = read_version(sk);
- break;
- case MGMT_OP_READ_INDEX_LIST:
- err = read_index_list(sk);
- break;
- case MGMT_OP_READ_INFO:
- err = read_controller_info(sk, index);
- break;
- case MGMT_OP_SET_POWERED:
- err = set_powered(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_DISCOVERABLE:
- err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_CONNECTABLE:
- err = set_connectable(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_FAST_CONNECTABLE:
- err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_SET_PAIRABLE:
- err = set_pairable(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_ADD_UUID:
- err = add_uuid(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_REMOVE_UUID:
- err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_DEV_CLASS:
- err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_LOAD_LINK_KEYS:
- err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_REMOVE_KEYS:
- err = remove_keys(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_DISCONNECT:
- err = disconnect(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_GET_CONNECTIONS:
- err = get_connections(sk, index);
- break;
- case MGMT_OP_PIN_CODE_REPLY:
- err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_PIN_CODE_NEG_REPLY:
- err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_IO_CAPABILITY:
- err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_PAIR_DEVICE:
- err = pair_device(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_USER_CONFIRM_REPLY:
- err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_USER_CONFIRM_NEG_REPLY:
- err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_USER_PASSKEY_REPLY:
- err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_USER_PASSKEY_NEG_REPLY:
- err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_SET_LOCAL_NAME:
- err = set_local_name(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_READ_LOCAL_OOB_DATA:
- err = read_local_oob_data(sk, index);
- break;
- case MGMT_OP_ADD_REMOTE_OOB_DATA:
- err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
- err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_START_DISCOVERY:
- err = start_discovery(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_STOP_DISCOVERY:
- err = stop_discovery(sk, index);
- break;
- case MGMT_OP_BLOCK_DEVICE:
- err = block_device(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_UNBLOCK_DEVICE:
- err = unblock_device(sk, index, buf + sizeof(*hdr), len);
- break;
- default:
- BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_UNKNOWN_COMMAND);
- break;
+ if (index != MGMT_INDEX_NONE) {
+ hdev = hci_dev_get(index);
+ if (!hdev) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
}
+ if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
+ mgmt_handlers[opcode].func == NULL) {
+ BT_DBG("Unknown op %u", opcode);
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_UNKNOWN_COMMAND);
+ goto done;
+ }
+
+ if ((hdev && opcode < MGMT_OP_READ_INFO) ||
+ (!hdev && opcode >= MGMT_OP_READ_INFO)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+
+ handler = &mgmt_handlers[opcode];
+
+ if ((handler->var_len && len < handler->data_len) ||
+ (!handler->var_len && len != handler->data_len)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto done;
+ }
+
+ if (hdev)
+ mgmt_init_hdev(sk, hdev);
+
+ cp = buf + sizeof(*hdr);
+
+ err = handler->func(sk, hdev, cp, len);
if (err < 0)
goto done;
err = msglen;
done:
+ if (hdev)
+ hci_dev_put(hdev);
+
kfree(buf);
return err;
}
@@ -2265,7 +2746,7 @@
int mgmt_index_removed(struct hci_dev *hdev)
{
- u8 status = ENODEV;
+ u8 status = MGMT_STATUS_INVALID_INDEX;
mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
@@ -2273,9 +2754,9 @@
}
struct cmd_lookup {
- u8 val;
struct sock *sk;
struct hci_dev *hdev;
+ u8 mgmt_status;
};
static void settings_rsp(struct pending_cmd *cmd, void *data)
@@ -2296,63 +2777,91 @@
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
- struct cmd_lookup match = { powered, NULL, hdev };
- __le32 ev;
- int ret;
+ struct cmd_lookup match = { NULL, hdev };
+ int err;
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return 0;
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
- if (!powered) {
- u8 status = ENETDOWN;
+ if (powered) {
+ u8 scan = 0;
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ scan |= SCAN_PAGE;
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ scan |= SCAN_INQUIRY;
+
+ if (scan)
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+
+ update_class(hdev);
+ update_name(hdev, hdev->dev_name);
+ update_eir(hdev);
+ } else {
+ u8 status = MGMT_STATUS_NOT_POWERED;
mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
}
- ev = cpu_to_le32(get_current_settings(hdev));
-
- ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
- match.sk);
+ err = new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
- return ret;
+ return err;
}
int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
- struct cmd_lookup match = { discoverable, NULL, hdev };
- __le32 ev;
- int ret;
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match);
+ if (discoverable) {
+ if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ changed = true;
+ }
- ev = cpu_to_le32(get_current_settings(hdev));
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
+ &match);
- ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
- match.sk);
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
if (match.sk)
sock_put(match.sk);
- return ret;
+ return err;
}
int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
{
- __le32 ev;
- struct cmd_lookup match = { connectable, NULL, hdev };
- int ret;
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
+
+ if (connectable) {
+ if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+ }
mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
- &match);
+ &match);
- ev = cpu_to_le32(get_current_settings(hdev));
-
- ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk);
+ if (changed)
+ err = new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
- return ret;
+ return err;
}
int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
@@ -2361,24 +2870,24 @@
if (scan & SCAN_PAGE)
mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
- cmd_status_rsp, &mgmt_err);
+ cmd_status_rsp, &mgmt_err);
if (scan & SCAN_INQUIRY)
mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
- cmd_status_rsp, &mgmt_err);
+ cmd_status_rsp, &mgmt_err);
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
- u8 persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
{
struct mgmt_ev_new_link_key ev;
memset(&ev, 0, sizeof(ev));
ev.store_hint = persistent;
- bacpy(&ev.key.bdaddr, &key->bdaddr);
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+ ev.key.addr.type = MGMT_ADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
@@ -2386,15 +2895,54 @@
return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type)
+int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
{
- struct mgmt_addr_info ev;
+ struct mgmt_ev_new_long_term_key ev;
- bacpy(&ev.bdaddr, bdaddr);
- ev.type = link_to_mgmt(link_type, addr_type);
+ memset(&ev, 0, sizeof(ev));
- return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
+ ev.store_hint = persistent;
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+ ev.key.addr.type = key->bdaddr_type;
+ ev.key.authenticated = key->authenticated;
+ ev.key.enc_size = key->enc_size;
+ ev.key.ediv = key->ediv;
+
+ if (key->type == HCI_SMP_LTK)
+ ev.key.master = 1;
+
+ memcpy(ev.key.rand, key->rand, sizeof(key->rand));
+ memcpy(ev.key.val, key->val, sizeof(key->val));
+
+ return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
+ NULL);
+}
+
+int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class)
+{
+ char buf[512];
+ struct mgmt_ev_device_connected *ev = (void *) buf;
+ u16 eir_len = 0;
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_mgmt(link_type, addr_type);
+
+ ev->flags = __cpu_to_le32(flags);
+
+ if (name_len > 0)
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
+ name, name_len);
+
+ if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
+ eir_len = eir_append_data(&ev->eir[eir_len], eir_len,
+ EIR_CLASS_OF_DEV, dev_class, 3);
+
+ put_unaligned_le16(eir_len, &ev->eir_len);
+
+ return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
+ sizeof(*ev) + eir_len, NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2403,10 +2951,11 @@
struct sock **sk = data;
struct mgmt_rp_disconnect rp;
- bacpy(&rp.bdaddr, &cp->bdaddr);
- rp.status = 0;
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
+ sizeof(rp));
*sk = cmd->sk;
sock_hold(*sk);
@@ -2414,25 +2963,25 @@
mgmt_pending_remove(cmd);
}
-static void remove_keys_rsp(struct pending_cmd *cmd, void *data)
+static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
{
- u8 *status = data;
- struct mgmt_cp_remove_keys *cp = cmd->param;
- struct mgmt_rp_remove_keys rp;
+ struct hci_dev *hdev = data;
+ struct mgmt_cp_unpair_device *cp = cmd->param;
+ struct mgmt_rp_unpair_device rp;
memset(&rp, 0, sizeof(rp));
- bacpy(&rp.bdaddr, &cp->bdaddr);
- if (status != NULL)
- rp.status = *status;
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp,
- sizeof(rp));
+ device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
+
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
mgmt_pending_remove(cmd);
}
-int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type)
+int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type)
{
struct mgmt_addr_info ev;
struct sock *sk = NULL;
@@ -2443,45 +2992,44 @@
bacpy(&ev.bdaddr, bdaddr);
ev.type = link_to_mgmt(link_type, addr_type);
- err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
+ sk);
if (sk)
- sock_put(sk);
+ sock_put(sk);
- mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL);
+ mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+ hdev);
return err;
}
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
+ struct mgmt_rp_disconnect rp;
struct pending_cmd *cmd;
- u8 mgmt_err = mgmt_status(status);
int err;
cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
return -ENOENT;
- if (bdaddr) {
- struct mgmt_rp_disconnect rp;
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = link_to_mgmt(link_type, addr_type);
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
-
- err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
- &rp, sizeof(rp));
- } else
- err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
- mgmt_err);
+ err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
+ mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+ hdev);
return err;
}
int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+ u8 addr_type, u8 status)
{
struct mgmt_ev_connect_failed ev;
@@ -2496,15 +3044,16 @@
{
struct mgmt_ev_pin_code_request ev;
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = MGMT_ADDR_BREDR;
ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ NULL);
}
int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
@@ -2514,11 +3063,11 @@
if (!cmd)
return -ENOENT;
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = mgmt_status(status);
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = MGMT_ADDR_BREDR;
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
- sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2526,7 +3075,7 @@
}
int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
@@ -2536,11 +3085,11 @@
if (!cmd)
return -ENOENT;
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = mgmt_status(status);
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = MGMT_ADDR_BREDR;
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
- sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2548,34 +3097,39 @@
}
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
- __le32 value, u8 confirm_hint)
+ u8 link_type, u8 addr_type, __le32 value,
+ u8 confirm_hint)
{
struct mgmt_ev_user_confirm_request ev;
BT_DBG("%s", hdev->name);
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.confirm_hint = confirm_hint;
put_unaligned_le32(value, &ev.value);
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ NULL);
}
-int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type)
{
struct mgmt_ev_user_passkey_request ev;
BT_DBG("%s", hdev->name);
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ NULL);
}
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status, u8 opcode)
+ u8 link_type, u8 addr_type, u8 status,
+ u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
@@ -2585,9 +3139,10 @@
if (!cmd)
return -ENOENT;
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = mgmt_status(status);
- err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = link_to_mgmt(link_type, addr_type);
+ err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
+ &rp, sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2595,72 +3150,215 @@
}
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+ u8 link_type, u8 addr_type, u8 status)
{
- return user_pairing_resp_complete(hdev, bdaddr, status,
- MGMT_OP_USER_CONFIRM_REPLY);
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_CONFIRM_REPLY);
}
-int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
- return user_pairing_resp_complete(hdev, bdaddr, status,
- MGMT_OP_USER_CONFIRM_NEG_REPLY);
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+ u8 link_type, u8 addr_type, u8 status)
{
- return user_pairing_resp_complete(hdev, bdaddr, status,
- MGMT_OP_USER_PASSKEY_REPLY);
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_PASSKEY_REPLY);
}
-int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 status)
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
- return user_pairing_resp_complete(hdev, bdaddr, status,
- MGMT_OP_USER_PASSKEY_NEG_REPLY);
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
}
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_auth_failed ev;
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
}
+int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+ mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
+ cmd_status_rsp, &mgmt_err);
+ return 0;
+ }
+
+ if (test_bit(HCI_AUTH, &hdev->flags)) {
+ if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ changed = true;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
+ &match);
+
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
+}
+
+static int clear_eir(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ if (!(hdev->features[6] & LMP_EXT_INQ))
+ return 0;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+
+ memset(&cp, 0, sizeof(cp));
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+}
+
+int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags))
+ err = new_settings(hdev, NULL);
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
+ &mgmt_err);
+
+ return err;
+ }
+
+ if (enable) {
+ if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ changed = true;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
+
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ update_eir(hdev);
+ else
+ clear_eir(hdev);
+
+ return err;
+}
+
+static void class_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct cmd_lookup *match = data;
+
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
+ match->hdev->dev_class, 3);
+
+ list_del(&cmd->list);
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
+ }
+
+ mgmt_pending_free(cmd);
+}
+
+int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
+ int err = 0;
+
+ clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
+
+ mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
+
+ if (!status)
+ err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
+ 3, NULL);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
+}
+
int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
{
struct pending_cmd *cmd;
struct mgmt_cp_set_local_name ev;
- int err;
+ bool changed = false;
+ int err = 0;
+
+ if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
+ memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
+ changed = true;
+ }
memset(&ev, 0, sizeof(ev));
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
+ memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
if (!cmd)
goto send_event;
+ /* Always assume that either the short or the complete name has
+ * changed if there was a pending mgmt command */
+ changed = true;
+
if (status) {
err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
- mgmt_status(status));
+ mgmt_status(status));
goto failed;
}
- update_eir(hdev);
-
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
- sizeof(ev));
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
+ sizeof(ev));
if (err < 0)
goto failed;
send_event:
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ if (changed)
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
+ sizeof(ev), cmd ? cmd->sk : NULL);
+
+ update_eir(hdev);
failed:
if (cmd)
@@ -2669,7 +3367,7 @@
}
int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
- u8 *randomizer, u8 status)
+ u8 *randomizer, u8 status)
{
struct pending_cmd *cmd;
int err;
@@ -2681,9 +3379,8 @@
return -ENOENT;
if (status) {
- err = cmd_status(cmd->sk, hdev->id,
- MGMT_OP_READ_LOCAL_OOB_DATA,
- mgmt_status(status));
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ mgmt_status(status));
} else {
struct mgmt_rp_read_local_oob_data rp;
@@ -2691,8 +3388,8 @@
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
err = cmd_complete(cmd->sk, hdev->id,
- MGMT_OP_READ_LOCAL_OOB_DATA,
- &rp, sizeof(rp));
+ MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
+ sizeof(rp));
}
mgmt_pending_remove(cmd);
@@ -2700,48 +3397,120 @@
return err;
}
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir)
+int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
{
- struct mgmt_ev_device_found ev;
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
- memset(&ev, 0, sizeof(ev));
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
- ev.rssi = rssi;
+ if (enable && test_and_clear_bit(HCI_LE_ENABLED,
+ &hdev->dev_flags))
+ err = new_settings(hdev, NULL);
- if (eir)
- memcpy(ev.eir, eir, sizeof(ev.eir));
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
+ cmd_status_rsp, &mgmt_err);
- if (dev_class)
- memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
+ return err;
+ }
- return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
+ if (enable) {
+ if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ changed = true;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
+
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
}
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
+ ssp, u8 *eir, u16 eir_len)
{
- struct mgmt_ev_remote_name ev;
+ char buf[512];
+ struct mgmt_ev_device_found *ev = (void *) buf;
+ size_t ev_size;
- memset(&ev, 0, sizeof(ev));
+ /* Leave 5 bytes for a potential CoD field */
+ if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
+ return -EINVAL;
- bacpy(&ev.bdaddr, bdaddr);
- memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
+ memset(buf, 0, sizeof(buf));
- return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->rssi = rssi;
+ if (cfm_name)
+ ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
+ if (!ssp)
+ ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
+
+ if (eir_len > 0)
+ memcpy(ev->eir, eir, eir_len);
+
+ if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
+ eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
+ dev_class, 3);
+
+ put_unaligned_le16(eir_len, &ev->eir_len);
+
+ ev_size = sizeof(*ev) + eir_len;
+
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+}
+
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len)
+{
+ struct mgmt_ev_device_found *ev;
+ char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
+ u16 eir_len;
+
+ ev = (struct mgmt_ev_device_found *) buf;
+
+ memset(buf, 0, sizeof(buf));
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->rssi = rssi;
+
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
+ name_len);
+
+ put_unaligned_le16(eir_len, &ev->eir_len);
+
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
+ sizeof(*ev) + eir_len, NULL);
}
int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
{
struct pending_cmd *cmd;
+ u8 type;
int err;
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
if (!cmd)
return -ENOENT;
- err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+ type = hdev->discovery.type;
+
+ err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+ &type, sizeof(type));
mgmt_pending_remove(cmd);
return err;
@@ -2756,7 +3525,8 @@
if (!cmd)
return -ENOENT;
- err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+ err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+ &hdev->discovery.type, sizeof(hdev->discovery.type));
mgmt_pending_remove(cmd);
return err;
@@ -2764,44 +3534,61 @@
int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
{
+ struct mgmt_ev_discovering ev;
struct pending_cmd *cmd;
+ BT_DBG("%s discovering %u", hdev->name, discovering);
+
if (discovering)
cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
else
cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
if (cmd != NULL) {
- cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
+ u8 type = hdev->discovery.type;
+
+ cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
+ sizeof(type));
mgmt_pending_remove(cmd);
}
- return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
- sizeof(discovering), NULL);
+ memset(&ev, 0, sizeof(ev));
+ ev.type = hdev->discovery.type;
+ ev.discovering = discovering;
+
+ return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_blocked ev;
cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = type;
return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ cmd ? cmd->sk : NULL);
}
-int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_unblocked ev;
cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = type;
return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ cmd ? cmd->sk : NULL);
}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
+
+module_param(enable_le, bool, 0644);
+MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index a2d4f51..c179734 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -196,7 +196,7 @@
static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
{
struct rfcomm_dev *dev, *entry;
- struct list_head *head = &rfcomm_dev_list, *p;
+ struct list_head *head = &rfcomm_dev_list;
int err = 0;
BT_DBG("id %d channel %d", req->dev_id, req->channel);
@@ -215,7 +215,7 @@
break;
dev->id++;
- head = p;
+ head = &entry->list;
}
} else {
dev->id = req->dev_id;
@@ -229,7 +229,7 @@
if (entry->id > dev->id - 1)
break;
- head = p;
+ head = &entry->list;
}
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 32c47de..deb1198 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -29,7 +29,7 @@
#include <linux/scatterlist.h>
#include <crypto/b128ops.h>
-#define SMP_TIMEOUT 30000 /* 30 seconds */
+#define SMP_TIMEOUT msecs_to_jiffies(30000)
static inline void swap128(u8 src[16], u8 dst[16])
{
@@ -186,8 +186,7 @@
hci_send_acl(conn->hchan, skb, 0);
cancel_delayed_work_sync(&conn->security_timer);
- schedule_delayed_work(&conn->security_timer,
- msecs_to_jiffies(SMP_TIMEOUT));
+ schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
}
static __u8 authreq_to_seclevel(__u8 authreq)
@@ -217,7 +216,7 @@
{
u8 dist_keys = 0;
- if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
+ if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
dist_keys = SMP_DIST_ENC_KEY;
authreq |= SMP_AUTH_BONDING;
} else {
@@ -250,21 +249,27 @@
(max_key_size < SMP_MIN_ENC_KEY_SIZE))
return SMP_ENC_KEY_SIZE;
- smp->smp_key_size = max_key_size;
+ smp->enc_key_size = max_key_size;
return 0;
}
static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
{
+ struct hci_conn *hcon = conn->hcon;
+
if (send)
smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
&reason);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
- mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason);
- cancel_delayed_work_sync(&conn->security_timer);
- smp_chan_destroy(conn);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
+ mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
+ hcon->dst_type, reason);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+ cancel_delayed_work_sync(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
}
#define JUST_WORKS 0x00
@@ -305,7 +310,7 @@
remote_io > SMP_IO_KEYBOARD_DISPLAY)
method = JUST_WORKS;
else
- method = gen_method[local_io][remote_io];
+ method = gen_method[remote_io][local_io];
/* If not bonding, don't ask user to confirm a Zero TK */
if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -346,9 +351,11 @@
hci_dev_lock(hcon->hdev);
if (method == REQ_PASSKEY)
- ret = mgmt_user_passkey_request(hcon->hdev, conn->dst);
+ ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
+ hcon->type, hcon->dst_type);
else
ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+ hcon->type, hcon->dst_type,
cpu_to_le32(passkey), 0);
hci_dev_unlock(hcon->hdev);
@@ -377,12 +384,11 @@
if (conn->hcon->out)
ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst,
- res);
+ conn->src, conn->hcon->dst_type, conn->dst, res);
else
ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
- conn->hcon->dst_type, conn->dst, 0, conn->src,
- res);
+ conn->hcon->dst_type, conn->dst, 0, conn->src,
+ res);
if (ret) {
reason = SMP_UNSPECIFIED;
goto error;
@@ -417,12 +423,10 @@
if (hcon->out)
ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
- conn->src, hcon->dst_type, conn->dst,
- res);
+ conn->src, hcon->dst_type, conn->dst, res);
else
ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
- hcon->dst_type, conn->dst, 0, conn->src,
- res);
+ hcon->dst_type, conn->dst, 0, conn->src, res);
if (ret) {
reason = SMP_UNSPECIFIED;
goto error;
@@ -446,16 +450,16 @@
smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
swap128(key, stk);
- memset(stk + smp->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
+ memset(stk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) {
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) {
reason = SMP_UNSPECIFIED;
goto error;
}
hci_le_start_enc(hcon, ediv, rand, stk);
- hcon->enc_key_size = smp->smp_key_size;
+ hcon->enc_key_size = smp->enc_key_size;
} else {
u8 stk[16], r[16], rand[8];
__le16 ediv;
@@ -469,11 +473,12 @@
smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
swap128(key, stk);
- memset(stk + smp->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
+ memset(stk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
- hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size,
- ediv, rand, stk);
+ hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
+ HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
+ ediv, rand);
}
return;
@@ -506,7 +511,7 @@
{
struct smp_chan *smp = conn->smp_chan;
- clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+ BUG_ON(!smp);
if (smp->tfm)
crypto_free_blkcipher(smp->tfm);
@@ -571,7 +576,7 @@
if (conn->hcon->link_mode & HCI_LM_MASTER)
return SMP_CMD_NOTSUPP;
- if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
+ if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
smp = smp_chan_create(conn);
smp = conn->smp_chan;
@@ -584,6 +589,8 @@
if (req->auth_req & SMP_AUTH_BONDING)
auth = req->auth_req;
+ conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
+
build_pairing_cmd(conn, req, &rsp, auth);
key_size = min(req->max_key_size, rsp.max_key_size);
@@ -698,23 +705,18 @@
static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
{
- struct link_key *key;
- struct key_master_id *master;
+ struct smp_ltk *key;
struct hci_conn *hcon = conn->hcon;
- key = hci_find_link_key_type(hcon->hdev, conn->dst,
- HCI_LK_SMP_LTK);
+ key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
if (!key)
return 0;
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND,
- &hcon->pend))
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
return 1;
- master = (void *) key->data;
- hci_le_start_enc(hcon, master->ediv, master->rand,
- key->val);
- hcon->enc_key_size = key->pin_len;
+ hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
+ hcon->enc_key_size = key->enc_size;
return 1;
@@ -733,7 +735,7 @@
if (smp_ltk_encrypt(conn))
return 0;
- if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
return 0;
smp = smp_chan_create(conn);
@@ -772,7 +774,7 @@
if (smp_ltk_encrypt(conn))
goto done;
- if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
return 0;
smp = smp_chan_create(conn);
@@ -817,13 +819,19 @@
{
struct smp_cmd_master_ident *rp = (void *) skb->data;
struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+ u8 authenticated;
skb_pull(skb, sizeof(*rp));
- hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
- rp->ediv, rp->rand, smp->tk);
-
+ hci_dev_lock(hdev);
+ authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
+ hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+ HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
+ rp->ediv, rp->rand);
smp_distribute_keys(conn, 1);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -908,7 +916,7 @@
BT_DBG("conn %p force %d", conn, force);
- if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
+ if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
return 0;
rsp = (void *) &smp->prsp[1];
@@ -933,6 +941,8 @@
if (*keydist & SMP_DIST_ENC_KEY) {
struct smp_cmd_encrypt_info enc;
struct smp_cmd_master_ident ident;
+ struct hci_conn *hcon = conn->hcon;
+ u8 authenticated;
__le16 ediv;
get_random_bytes(enc.ltk, sizeof(enc.ltk));
@@ -941,8 +951,10 @@
smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
- hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
- ediv, ident.rand, enc.ltk);
+ authenticated = hcon->sec_level == BT_SECURITY_HIGH;
+ hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+ HCI_SMP_LTK_SLAVE, 1, authenticated,
+ enc.ltk, smp->enc_key_size, ediv, ident.rand);
ident.ediv = cpu_to_le16(ediv);
@@ -982,7 +994,7 @@
}
if (conn->hcon->out || force) {
- clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
cancel_delayed_work_sync(&conn->security_timer);
smp_chan_destroy(conn);
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 8412247..dec4f38 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -62,6 +62,15 @@
#define brnf_filter_pppoe_tagged 0
#endif
+#define IS_IP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+
+#define IS_IPV6(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+
+#define IS_ARP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
@@ -639,8 +648,7 @@
return NF_DROP;
br = p->br;
- if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb)) {
+ if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
return NF_ACCEPT;
@@ -651,8 +659,7 @@
if (!brnf_call_iptables && !br->nf_call_iptables)
return NF_ACCEPT;
- if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
- !IS_PPPOE_IP(skb))
+ if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
@@ -701,7 +708,7 @@
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *in;
- if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
+ if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
@@ -718,6 +725,7 @@
return 0;
}
+
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
@@ -744,11 +752,9 @@
if (!parent)
return NF_DROP;
- if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
- IS_PPPOE_IP(skb))
+ if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = PF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb))
+ else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
@@ -795,7 +801,7 @@
if (!brnf_call_arptables && !br->nf_call_arptables)
return NF_ACCEPT;
- if (skb->protocol != htons(ETH_P_ARP)) {
+ if (!IS_ARP(skb)) {
if (!IS_VLAN_ARP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
@@ -853,11 +859,9 @@
if (!realoutdev)
return NF_DROP;
- if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
- IS_PPPOE_IP(skb))
+ if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = PF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb))
+ else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6751ed4..8c836d9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -31,7 +31,7 @@
void br_log_state(const struct net_bridge_port *p)
{
- br_info(p->br, "port %u(%s) entering %s state\n",
+ br_info(p->br, "port %u(%s) entered %s state\n",
(unsigned) p->port_no, p->dev->name,
br_port_state_names[p->state]);
}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 19308e3..f494496 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -98,14 +98,13 @@
struct net_bridge *br = p->br;
int wasroot;
- br_log_state(p);
-
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
p->state = BR_STATE_DISABLED;
p->topology_change_ack = 0;
p->config_pending = 0;
+ br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
del_timer(&p->message_age_timer);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8aa4ad0..5fe2ff3 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1335,7 +1335,12 @@
const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)m - base);
- if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+ /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
+ long. Copy 29 bytes and fill remaining bytes with zeroes. */
+ strncpy(name, m->u.match->name, sizeof(name));
+ if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
@@ -1344,7 +1349,10 @@
const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)w - base);
- if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+ strncpy(name, w->u.watcher->name, sizeof(name));
+ if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
@@ -1355,6 +1363,7 @@
int ret;
char __user *hlp;
const struct ebt_entry_target *t;
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
if (e->bitmask == 0)
return 0;
@@ -1368,7 +1377,8 @@
ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
if (ret != 0)
return ret;
- if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
+ strncpy(name, t->u.target->name, sizeof(name));
+ if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 65d6ef3..2914659 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -41,8 +41,10 @@
struct caif_payload_info *info;
int ret;
- if (!cfsrvl_ready(service, &ret))
+ if (!cfsrvl_ready(service, &ret)) {
+ cfpkt_destroy(pkt);
return ret;
+ }
/* Add info for MUX-layer to route the packet out */
info = cfpkt_info(pkt);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0f5ff27..a63f4a5 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -86,12 +86,17 @@
struct caif_payload_info *info;
struct cfsrvl *service = container_obj(layr);
int ret;
- if (!cfsrvl_ready(service, &ret))
+
+ if (!cfsrvl_ready(service, &ret)) {
+ cfpkt_destroy(pkt);
return ret;
+ }
/* STE Modem cannot handle more than 1500 bytes datagrams */
- if (cfpkt_getlen(pkt) > DGM_MTU)
+ if (cfpkt_getlen(pkt) > DGM_MTU) {
+ cfpkt_destroy(pkt);
return -EMSGSIZE;
+ }
cfpkt_add_head(pkt, &zero, 3);
packet_type = 0x08; /* B9 set - UNCLASSIFIED */
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 6dc75d4..2b563ad 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -184,6 +184,11 @@
rfml->serv.dev_info.id);
}
spin_unlock(&rfml->sync);
+
+ if (unlikely(err == -EAGAIN))
+ /* It is not possible to recover after drop of a fragment */
+ err = -EIO;
+
return err;
}
@@ -218,7 +223,7 @@
caif_assert(layr->dn->transmit != NULL);
if (!cfsrvl_ready(&rfml->serv, &err))
- return err;
+ goto out;
err = -EPROTO;
if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
@@ -251,8 +256,11 @@
err = cfrfml_transmit_segment(rfml, frontpkt);
- if (err != 0)
+ if (err != 0) {
+ frontpkt = NULL;
goto out;
+ }
+
frontpkt = rearpkt;
rearpkt = NULL;
@@ -286,19 +294,8 @@
if (rearpkt)
cfpkt_destroy(rearpkt);
- if (frontpkt && frontpkt != pkt) {
-
+ if (frontpkt)
cfpkt_destroy(frontpkt);
- /*
- * Socket layer will free the original packet,
- * but this packet may already be sent and
- * freed. So we have to return 0 in this case
- * to avoid socket layer to re-free this packet.
- * The return of shutdown indication will
- * cause connection to be invalidated anyhow.
- */
- err = 0;
- }
}
return err;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index b99f5b2..4aa33d4 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -174,15 +174,11 @@
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
- if (service->open && service->modem_flow_on && service->phy_flow_on)
- return true;
if (!service->open) {
*err = -ENOTCONN;
return false;
}
- caif_assert(!(service->modem_flow_on && service->phy_flow_on));
- *err = -EAGAIN;
- return false;
+ return true;
}
u8 cfsrvl_getphyid(struct cflayer *layer)
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 53e49f3..86d2dad 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -84,8 +84,11 @@
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (!cfsrvl_ready(service, &ret))
+
+ if (!cfsrvl_ready(service, &ret)) {
+ cfpkt_destroy(pkt);
return ret;
+ }
cfpkt_add_head(pkt, &zero, 1);
/* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index e3f37db..a8e2a2d7 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -50,8 +50,12 @@
struct caif_payload_info *info;
u32 videoheader = 0;
int ret;
- if (!cfsrvl_ready(service, &ret))
+
+ if (!cfsrvl_ready(service, &ret)) {
+ cfpkt_destroy(pkt);
return ret;
+ }
+
cfpkt_add_head(pkt, &videoheader, 4);
/* Add info for MUX-layer to route the packet out */
info = cfpkt_info(pkt);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index e866234..20618dd 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -28,6 +28,7 @@
/* 5 sec. connect timeout */
#define CONNECT_TIMEOUT (5 * HZ)
#define CAIF_NET_DEFAULT_QUEUE_LEN 500
+#define UNDEF_CONNID 0xffffffff
/*This list is protected by the rtnl lock. */
static LIST_HEAD(chnl_net_list);
@@ -408,7 +409,7 @@
priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
priv->conn_req.priority = CAIF_PRIO_LOW;
/* Insert illegal value */
- priv->conn_req.sockaddr.u.dgm.connection_id = 0;
+ priv->conn_req.sockaddr.u.dgm.connection_id = UNDEF_CONNID;
priv->flowenabled = false;
init_waitqueue_head(&priv->netmgmt_wq);
@@ -471,9 +472,11 @@
else
list_add(&caifdev->list_field, &chnl_net_list);
- /* Take ifindex as connection-id if null */
- if (caifdev->conn_req.sockaddr.u.dgm.connection_id == 0)
+ /* Use ifindex as connection id, and use loopback channel default. */
+ if (caifdev->conn_req.sockaddr.u.dgm.connection_id == UNDEF_CONNID) {
caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex;
+ caifdev->conn_req.protocol = CAIFPROTO_DATAGRAM_LOOP;
+ }
return ret;
}
diff --git a/net/compat.c b/net/compat.c
index 6def90e..64b4515 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -79,7 +79,7 @@
/* I've named the args so it is easy to tell whose space the pointers are in. */
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
- struct sockaddr *kern_address, int mode)
+ struct sockaddr_storage *kern_address, int mode)
{
int tot_len;
diff --git a/net/core/iovec.c b/net/core/iovec.c
index c40f27e..7e7aeb0 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,7 +35,7 @@
* in any case.
*/
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
{
int size, ct, err;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e588a34..fdf49fd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -65,6 +65,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "IPv4: " fmt
+
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -1085,13 +1087,11 @@
return;
out_permanent:
- printk(KERN_ERR "Attempt to override permanent protocol %d.\n",
- protocol);
+ pr_err("Attempt to override permanent protocol %d\n", protocol);
goto out;
out_illegal:
- printk(KERN_ERR
- "Ignoring attempt to register invalid socket type %d.\n",
+ pr_err("Ignoring attempt to register invalid socket type %d\n",
p->type);
goto out;
}
@@ -1100,8 +1100,7 @@
void inet_unregister_protosw(struct inet_protosw *p)
{
if (INET_PROTOSW_PERMANENT & p->flags) {
- printk(KERN_ERR
- "Attempt to unregister permanent protocol %d.\n",
+ pr_err("Attempt to unregister permanent protocol %d\n",
p->protocol);
} else {
spin_lock_bh(&inetsw_lock);
@@ -1150,8 +1149,8 @@
return 0;
if (sysctl_ip_dynaddr > 1) {
- printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n",
- __func__, &old_saddr, &new_saddr);
+ pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
+ __func__, &old_saddr, &new_saddr);
}
inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
@@ -1680,14 +1679,14 @@
*/
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
- printk(KERN_CRIT "inet_init: Cannot add ICMP protocol\n");
+ pr_crit("%s: Cannot add ICMP protocol\n", __func__);
if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
- printk(KERN_CRIT "inet_init: Cannot add UDP protocol\n");
+ pr_crit("%s: Cannot add UDP protocol\n", __func__);
if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
- printk(KERN_CRIT "inet_init: Cannot add TCP protocol\n");
+ pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
- printk(KERN_CRIT "inet_init: Cannot add IGMP protocol\n");
+ pr_crit("%s: Cannot add IGMP protocol\n", __func__);
#endif
/* Register the socket-side information for inet_create. */
@@ -1734,14 +1733,14 @@
*/
#if defined(CONFIG_IP_MROUTE)
if (ip_mr_init())
- printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n");
+ pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
#endif
/*
* Initialise per-cpu ipv4 mibs
*/
if (init_ipv4_mibs())
- printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n");
+ pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
ipv4_proc_init();
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 36d1440..fd508b5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "IPsec: " fmt
+
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -445,9 +447,10 @@
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_ahash_digestsize(ahash)) {
- printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
- x->aalg->alg_name, crypto_ahash_digestsize(ahash),
- aalg_desc->uinfo.auth.icv_fullbits/8);
+ pr_info("%s: %s digestsize %u != %hu\n",
+ __func__, x->aalg->alg_name,
+ crypto_ahash_digestsize(ahash),
+ aalg_desc->uinfo.auth.icv_fullbits / 8);
goto error;
}
@@ -510,11 +513,11 @@
static int __init ah4_init(void)
{
if (xfrm_register_type(&ah_type, AF_INET) < 0) {
- printk(KERN_INFO "ip ah init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
- printk(KERN_INFO "ip ah init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ah_type, AF_INET);
return -EAGAIN;
}
@@ -524,9 +527,9 @@
static void __exit ah4_fini(void)
{
if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
- printk(KERN_INFO "ip ah close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
- printk(KERN_INFO "ip ah close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(ah4_init);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index a5b4134..89a47b3 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "IPsec: " fmt
+
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/err.h>
@@ -706,11 +708,11 @@
static int __init esp4_init(void)
{
if (xfrm_register_type(&esp_type, AF_INET) < 0) {
- printk(KERN_INFO "ip esp init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
- printk(KERN_INFO "ip esp init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&esp_type, AF_INET);
return -EAGAIN;
}
@@ -720,9 +722,9 @@
static void __exit esp4_fini(void)
{
if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
- printk(KERN_INFO "ip esp close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
- printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(esp4_init);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 92fc5f6..76e72ba 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -695,7 +695,7 @@
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, prefix, mask);
if (prim == NULL) {
- printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n");
+ pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
}
@@ -749,11 +749,11 @@
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (prim == NULL) {
- printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
+ pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
if (iprim && iprim != prim) {
- printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n");
+ pr_warn("%s: bug: iprim != prim\n", __func__);
return;
}
} else if (!ipv4_is_zeronet(any) &&
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 80106d8..a8c5c1d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -154,7 +154,7 @@
void free_fib_info(struct fib_info *fi)
{
if (fi->fib_dead == 0) {
- pr_warning("Freeing alive fib_info %p\n", fi);
+ pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
change_nexthops(fi) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b555a5..da9b9cb 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1170,9 +1170,8 @@
}
if (tp && tp->pos + tp->bits > 32)
- pr_warning("fib_trie"
- " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
- tp, tp->pos, tp->bits, key, plen);
+ pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
+ tp, tp->pos, tp->bits, key, plen);
/* Rebalance the trie */
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index 8cb1ebb..42a4910 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
@@ -118,10 +120,10 @@
static int __init gre_init(void)
{
- pr_info("GRE over IPv4 demultiplexor driver");
+ pr_info("GRE over IPv4 demultiplexor driver\n");
if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
- pr_err("gre: can't add protocol\n");
+ pr_err("can't add protocol\n");
return -EAGAIN;
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ab188ae..9664d35 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -62,6 +62,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/jiffies.h>
@@ -670,7 +672,7 @@
break;
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
- LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n",
+ LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
&iph->daddr);
} else {
info = ip_rt_frag_needed(net, iph,
@@ -681,7 +683,7 @@
}
break;
case ICMP_SR_FAILED:
- LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n",
+ LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"),
&iph->daddr);
break;
default:
@@ -713,13 +715,10 @@
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
if (net_ratelimit())
- printk(KERN_WARNING "%pI4 sent an invalid ICMP "
- "type %u, code %u "
- "error to a broadcast: %pI4 on %s\n",
- &ip_hdr(skb)->saddr,
- icmph->type, icmph->code,
- &iph->daddr,
- skb->dev->name);
+ pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
+ &ip_hdr(skb)->saddr,
+ icmph->type, icmph->code,
+ &iph->daddr, skb->dev->name);
goto out;
}
@@ -946,8 +945,8 @@
break;
}
if (!ifa && net_ratelimit()) {
- printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
- mp, dev->name, &ip_hdr(skb)->saddr);
+ pr_info("Wrong address mask %pI4 from %s/%pI4\n",
+ mp, dev->name, &ip_hdr(skb)->saddr);
}
}
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index bf4a9c4..d4d61b6 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
+#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inetpeer.h>
#include <net/secure_seq.h>
@@ -66,6 +67,11 @@
static struct kmem_cache *peer_cachep __read_mostly;
+static LIST_HEAD(gc_list);
+static const int gc_delay = 60 * HZ;
+static struct delayed_work gc_work;
+static DEFINE_SPINLOCK(gc_lock);
+
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
@@ -102,6 +108,50 @@
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+static void inetpeer_gc_worker(struct work_struct *work)
+{
+ struct inet_peer *p, *n;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&gc_lock);
+ list_replace_init(&gc_list, &list);
+ spin_unlock_bh(&gc_lock);
+
+ if (list_empty(&list))
+ return;
+
+ list_for_each_entry_safe(p, n, &list, gc_list) {
+
+ if(need_resched())
+ cond_resched();
+
+ if (p->avl_left != peer_avl_empty) {
+ list_add_tail(&p->avl_left->gc_list, &list);
+ p->avl_left = peer_avl_empty;
+ }
+
+ if (p->avl_right != peer_avl_empty) {
+ list_add_tail(&p->avl_right->gc_list, &list);
+ p->avl_right = peer_avl_empty;
+ }
+
+ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
+
+ if (!atomic_read(&p->refcnt)) {
+ list_del(&p->gc_list);
+ kmem_cache_free(peer_cachep, p);
+ }
+ }
+
+ if (list_empty(&list))
+ return;
+
+ spin_lock_bh(&gc_lock);
+ list_splice(&list, &gc_list);
+ spin_unlock_bh(&gc_lock);
+
+ schedule_delayed_work(&gc_work, gc_delay);
+}
/* Called from ip_output.c:ip_init */
void __init inet_initpeers(void)
@@ -126,6 +176,7 @@
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
+ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
}
static int addr_compare(const struct inetpeer_addr *a,
@@ -447,9 +498,8 @@
p->rate_last = 0;
p->pmtu_expires = 0;
p->pmtu_orig = 0;
- p->redirect_genid = 0;
memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
-
+ INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
link_to_pool(p, base);
@@ -509,3 +559,30 @@
return rc;
}
EXPORT_SYMBOL(inet_peer_xrlim_allow);
+
+void inetpeer_invalidate_tree(int family)
+{
+ struct inet_peer *old, *new, *prev;
+ struct inet_peer_base *base = family_to_base(family);
+
+ write_seqlock_bh(&base->lock);
+
+ old = base->root;
+ if (old == peer_avl_empty_rcu)
+ goto out;
+
+ new = peer_avl_empty_rcu;
+
+ prev = cmpxchg(&base->root, old, new);
+ if (prev == old) {
+ base->total = 0;
+ spin_lock(&gc_lock);
+ list_add_tail(&prev->gc_list, &gc_list);
+ spin_unlock(&gc_lock);
+ schedule_delayed_work(&gc_work, gc_delay);
+ }
+
+out:
+ write_sequnlock_bh(&base->lock);
+}
+EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1f23a57..3727e23 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -20,6 +20,8 @@
* Patrick McHardy : LRU queue of frag heads for evictor.
*/
+#define pr_fmt(fmt) "IPv4: " fmt
+
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -299,7 +301,7 @@
return container_of(q, struct ipq, q);
out_nomem:
- LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
+ LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
return NULL;
}
@@ -637,14 +639,13 @@
return 0;
out_nomem:
- LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
- "queue %p\n", qp);
+ LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
+ qp);
err = -ENOMEM;
goto out_fail;
out_oversize:
if (net_ratelimit())
- printk(KERN_INFO "Oversized IP packet from %pI4.\n",
- &qp->saddr);
+ pr_info("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
return err;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6ef66af..b57532d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -1716,7 +1718,7 @@
{
int err;
- printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
+ pr_info("GRE over IPv4 tunneling driver\n");
err = register_pernet_device(&ipgre_net_ops);
if (err < 0)
@@ -1724,7 +1726,7 @@
err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
- printk(KERN_INFO "ipgre init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
}
@@ -1753,7 +1755,7 @@
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
- printk(KERN_INFO "ipgre close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
unregister_pernet_device(&ipgre_net_ops);
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 073a9b0..f3f1108 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -113,6 +113,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "IPv4: " fmt
+
#include <asm/system.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -148,7 +150,7 @@
/*
* Process Router Attention IP option (RFC 2113)
*/
-int ip_call_ra_chain(struct sk_buff *skb)
+bool ip_call_ra_chain(struct sk_buff *skb)
{
struct ip_ra_chain *ra;
u8 protocol = ip_hdr(skb)->protocol;
@@ -167,7 +169,7 @@
net_eq(sock_net(sk), dev_net(dev))) {
if (ip_is_fragment(ip_hdr(skb))) {
if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
- return 1;
+ return true;
}
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -180,9 +182,9 @@
if (last) {
raw_rcv(last, skb);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static int ip_local_deliver_finish(struct sk_buff *skb)
@@ -265,7 +267,7 @@
ip_local_deliver_finish);
}
-static inline int ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb)
{
struct ip_options *opt;
const struct iphdr *iph;
@@ -299,8 +301,8 @@
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
- printk(KERN_INFO "source route option %pI4 -> %pI4\n",
- &iph->saddr, &iph->daddr);
+ pr_info("source route option %pI4 -> %pI4\n",
+ &iph->saddr, &iph->daddr);
goto drop;
}
}
@@ -309,9 +311,9 @@
goto drop;
}
- return 0;
+ return false;
drop:
- return -1;
+ return true;
}
static int ip_rcv_finish(struct sk_buff *skb)
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 42dd1a9..a0d0d9d 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) "IPv4: " fmt
+
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -577,7 +579,7 @@
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
optptr[2] = srrptr+4;
} else if (net_ratelimit())
- printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
+ pr_crit("%s(): Argh! Destination lost!\n", __func__);
if (opt->ts_needaddr) {
optptr = raw + opt->ts;
ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c857f6f..63b64c4 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -156,11 +156,11 @@
static int __init ipcomp4_init(void)
{
if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) {
- printk(KERN_INFO "ipcomp init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
- printk(KERN_INFO "ipcomp init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ipcomp_type, AF_INET);
return -EAGAIN;
}
@@ -170,9 +170,9 @@
static void __exit ipcomp4_fini(void)
{
if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0)
- printk(KERN_INFO "ip ipcomp close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0)
- printk(KERN_INFO "ip ipcomp close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(ipcomp4_init);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 6e412a6..92ac7e7 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -214,7 +214,7 @@
if (!(dev->flags & IFF_LOOPBACK))
continue;
if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
- printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name);
+ pr_err("IP-Config: Failed to open %s\n", dev->name);
}
for_each_netdev(&init_net, dev) {
@@ -223,7 +223,8 @@
if (dev->mtu >= 364)
able |= IC_BOOTP;
else
- printk(KERN_WARNING "DHCP/BOOTP: Ignoring device %s, MTU %d too small", dev->name, dev->mtu);
+ pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small",
+ dev->name, dev->mtu);
if (!(dev->flags & IFF_NOARP))
able |= IC_RARP;
able &= ic_proto_enabled;
@@ -231,7 +232,8 @@
continue;
oflags = dev->flags;
if (dev_change_flags(dev, oflags | IFF_UP) < 0) {
- printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name);
+ pr_err("IP-Config: Failed to open %s\n",
+ dev->name);
continue;
}
if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) {
@@ -273,9 +275,10 @@
if (!ic_first_dev) {
if (user_dev_name[0])
- printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name);
+ pr_err("IP-Config: Device `%s' not found\n",
+ user_dev_name);
else
- printk(KERN_ERR "IP-Config: No network devices available.\n");
+ pr_err("IP-Config: No network devices available\n");
return -ENODEV;
}
return 0;
@@ -359,17 +362,20 @@
strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name);
set_sockaddr(sin, ic_myaddr, 0);
if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
- printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err);
+ pr_err("IP-Config: Unable to set interface address (%d)\n",
+ err);
return -1;
}
set_sockaddr(sin, ic_netmask, 0);
if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
- printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err);
+ pr_err("IP-Config: Unable to set interface netmask (%d)\n",
+ err);
return -1;
}
set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
- printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err);
+ pr_err("IP-Config: Unable to set interface broadcast address (%d)\n",
+ err);
return -1;
}
/* Handle the case where we need non-standard MTU on the boot link (a network
@@ -380,8 +386,8 @@
strcpy(ir.ifr_name, ic_dev->name);
ir.ifr_mtu = ic_dev_mtu;
if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
- printk(KERN_ERR "IP-Config: Unable to set interface mtu to %d (%d).\n",
- ic_dev_mtu, err);
+ pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n",
+ ic_dev_mtu, err);
}
return 0;
}
@@ -396,7 +402,7 @@
memset(&rm, 0, sizeof(rm));
if ((ic_gateway ^ ic_myaddr) & ic_netmask) {
- printk(KERN_ERR "IP-Config: Gateway not on directly connected network.\n");
+ pr_err("IP-Config: Gateway not on directly connected network\n");
return -1;
}
set_sockaddr((struct sockaddr_in *) &rm.rt_dst, 0, 0);
@@ -404,7 +410,8 @@
set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0);
rm.rt_flags = RTF_UP | RTF_GATEWAY;
if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) {
- printk(KERN_ERR "IP-Config: Cannot add default route (%d).\n", err);
+ pr_err("IP-Config: Cannot add default route (%d)\n",
+ err);
return -1;
}
}
@@ -437,8 +444,8 @@
else if (IN_CLASSC(ntohl(ic_myaddr)))
ic_netmask = htonl(IN_CLASSC_NET);
else {
- printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n",
- &ic_myaddr);
+ pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
+ &ic_myaddr);
return -1;
}
printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask);
@@ -688,8 +695,8 @@
e += len;
}
if (*vendor_class_identifier) {
- printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n",
- vendor_class_identifier);
+ pr_info("DHCP: sending class identifier \"%s\"\n",
+ vendor_class_identifier);
*e++ = 60; /* Class-identifier */
len = strlen(vendor_class_identifier);
*e++ = len;
@@ -949,8 +956,7 @@
/* Fragments are not supported */
if (ip_is_fragment(h)) {
if (net_ratelimit())
- printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented "
- "reply.\n");
+ pr_err("DHCP/BOOTP: Ignoring fragmented reply\n");
goto drop;
}
@@ -999,8 +1005,7 @@
if (b->op != BOOTP_REPLY ||
b->xid != d->xid) {
if (net_ratelimit())
- printk(KERN_ERR "DHCP/BOOTP: Reply not for us, "
- "op[%x] xid[%x]\n",
+ pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
b->op, b->xid);
goto drop_unlock;
}
@@ -1008,7 +1013,7 @@
/* Is it a reply for the device we are configuring? */
if (b->xid != ic_dev_xid) {
if (net_ratelimit())
- printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n");
+ pr_err("DHCP/BOOTP: Ignoring delayed packet\n");
goto drop_unlock;
}
@@ -1146,17 +1151,17 @@
* are missing, and without DHCP/BOOTP/RARP we are unable to get it.
*/
if (!ic_proto_enabled) {
- printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n");
+ pr_err("IP-Config: Incomplete network configuration information\n");
return -1;
}
#ifdef IPCONFIG_BOOTP
if ((ic_proto_enabled ^ ic_proto_have_if) & IC_BOOTP)
- printk(KERN_ERR "DHCP/BOOTP: No suitable device found.\n");
+ pr_err("DHCP/BOOTP: No suitable device found\n");
#endif
#ifdef IPCONFIG_RARP
if ((ic_proto_enabled ^ ic_proto_have_if) & IC_RARP)
- printk(KERN_ERR "RARP: No suitable device found.\n");
+ pr_err("RARP: No suitable device found\n");
#endif
if (!ic_proto_have_if)
@@ -1183,11 +1188,11 @@
* [Actually we could now, but the nothing else running note still
* applies.. - AC]
*/
- printk(KERN_NOTICE "Sending %s%s%s requests .",
- do_bootp
- ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "",
- (do_bootp && do_rarp) ? " and " : "",
- do_rarp ? "RARP" : "");
+ pr_notice("Sending %s%s%s requests .",
+ do_bootp
+ ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "",
+ (do_bootp && do_rarp) ? " and " : "",
+ do_rarp ? "RARP" : "");
start_jiffies = jiffies;
d = ic_first_dev;
@@ -1216,13 +1221,13 @@
(ic_proto_enabled & IC_USE_DHCP) &&
ic_dhcp_msgtype != DHCPACK) {
ic_got_reply = 0;
- printk(KERN_CONT ",");
+ pr_cont(",");
continue;
}
#endif /* IPCONFIG_DHCP */
if (ic_got_reply) {
- printk(KERN_CONT " OK\n");
+ pr_cont(" OK\n");
break;
}
@@ -1230,7 +1235,7 @@
continue;
if (! --retries) {
- printk(KERN_CONT " timed out!\n");
+ pr_cont(" timed out!\n");
break;
}
@@ -1240,7 +1245,7 @@
if (timeout > CONF_TIMEOUT_MAX)
timeout = CONF_TIMEOUT_MAX;
- printk(KERN_CONT ".");
+ pr_cont(".");
}
#ifdef IPCONFIG_BOOTP
@@ -1260,8 +1265,8 @@
printk("IP-Config: Got %s answer from %pI4, ",
((ic_got_reply & IC_RARP) ? "RARP"
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
- &ic_servaddr);
- printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
+ &ic_servaddr);
+ pr_cont("my address is %pI4\n", &ic_myaddr);
return 0;
}
@@ -1437,24 +1442,22 @@
*/
#ifdef CONFIG_ROOT_NFS
if (ROOT_DEV == Root_NFS) {
- printk(KERN_ERR
- "IP-Config: Retrying forever (NFS root)...\n");
+ pr_err("IP-Config: Retrying forever (NFS root)...\n");
goto try_try_again;
}
#endif
if (--retries) {
- printk(KERN_ERR
- "IP-Config: Reopening network devices...\n");
+ pr_err("IP-Config: Reopening network devices...\n");
goto try_try_again;
}
/* Oh, well. At least we tried. */
- printk(KERN_ERR "IP-Config: Auto-configuration of network failed.\n");
+ pr_err("IP-Config: Auto-configuration of network failed\n");
return -1;
}
#else /* !DYNAMIC */
- printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n");
+ pr_err("IP-Config: Incomplete network configuration information\n");
ic_close_devs();
return -1;
#endif /* IPCONFIG_DYNAMIC */
@@ -1492,19 +1495,16 @@
/*
* Clue in the operator.
*/
- printk("IP-Config: Complete:\n");
- printk(" device=%s", ic_dev->name);
- printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
- printk(KERN_CONT ", mask=%pI4", &ic_netmask);
- printk(KERN_CONT ", gw=%pI4", &ic_gateway);
- printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s",
- utsname()->nodename, ic_domain, utsname()->domainname);
- printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
- printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
- printk(KERN_CONT ", rootpath=%s", root_server_path);
+ pr_info("IP-Config: Complete:\n");
+ pr_info(" device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n",
+ ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway);
+ pr_info(" host=%s, domain=%s, nis-domain=%s\n",
+ utsname()->nodename, ic_domain, utsname()->domainname);
+ pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s",
+ &ic_servaddr, &root_server_addr, root_server_path);
if (ic_dev_mtu)
- printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
- printk(KERN_CONT "\n");
+ pr_cont(", mtu=%d", ic_dev_mtu);
+ pr_cont("\n");
#endif /* !SILENT */
return 0;
@@ -1637,8 +1637,8 @@
if (strlcpy(vendor_class_identifier, addrs,
sizeof(vendor_class_identifier))
>= sizeof(vendor_class_identifier))
- printk(KERN_WARNING "DHCP: vendorclass too long, truncated to \"%s\"",
- vendor_class_identifier);
+ pr_warn("DHCP: vendorclass too long, truncated to \"%s\"",
+ vendor_class_identifier);
return 1;
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index f84ebff..ae1413e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -892,7 +892,7 @@
err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
if (err < 0) {
unregister_pernet_device(&ipip_net_ops);
- printk(KERN_INFO "ipip init: can't register tunnel\n");
+ pr_info("%s: can't register tunnel\n", __func__);
}
return err;
}
@@ -900,7 +900,7 @@
static void __exit ipip_fini(void)
{
if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
- printk(KERN_INFO "ipip close: can't deregister tunnel\n");
+ pr_info("%s: can't deregister tunnel\n", __func__);
unregister_pernet_device(&ipip_net_ops);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7bc2db6..0518a4f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -951,7 +951,7 @@
rcu_read_unlock();
if (ret < 0) {
if (net_ratelimit())
- printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
+ pr_warn("mroute: pending queue full, dropping entries\n");
kfree_skb(skb);
}
@@ -2538,7 +2538,7 @@
goto reg_notif_fail;
#ifdef CONFIG_IP_PIMSM_V2
if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
- printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n");
+ pr_err("%s: can't add PIM protocol\n", __func__);
err = -EAGAIN;
goto add_proto_fail;
}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 74dfc9e..fcc543c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -123,15 +123,6 @@
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_TARGET_LOG
- tristate "LOG target support"
- default m if NETFILTER_ADVANCED=n
- help
- This option adds a `LOG' target, which allows you to create rules in
- any iptables table which records the packet header to the syslog.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_TARGET_ULOG
tristate "ULOG target support"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 213a462..240b684 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -54,7 +54,6 @@
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
-obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
deleted file mode 100644
index d76d6c9..0000000
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * This is a module which is used for logging packets.
- */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ip.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/tcp.h>
-#include <net/route.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ipt_LOG.h>
-#include <net/netfilter/nf_log.h>
-#include <net/netfilter/xt_log.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: IPv4 packet logging to syslog");
-
-/* One level of recursion won't kill us */
-static void dump_packet(struct sbuff *m,
- const struct nf_loginfo *info,
- const struct sk_buff *skb,
- unsigned int iphoff)
-{
- struct iphdr _iph;
- const struct iphdr *ih;
- unsigned int logflags;
-
- if (info->type == NF_LOG_TYPE_LOG)
- logflags = info->u.log.logflags;
- else
- logflags = NF_LOG_MASK;
-
- ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
- if (ih == NULL) {
- sb_add(m, "TRUNCATED");
- return;
- }
-
- /* Important fields:
- * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
- /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
- sb_add(m, "SRC=%pI4 DST=%pI4 ",
- &ih->saddr, &ih->daddr);
-
- /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
- sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
- ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
- ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
-
- /* Max length: 6 "CE DF MF " */
- if (ntohs(ih->frag_off) & IP_CE)
- sb_add(m, "CE ");
- if (ntohs(ih->frag_off) & IP_DF)
- sb_add(m, "DF ");
- if (ntohs(ih->frag_off) & IP_MF)
- sb_add(m, "MF ");
-
- /* Max length: 11 "FRAG:65535 " */
- if (ntohs(ih->frag_off) & IP_OFFSET)
- sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
-
- if ((logflags & IPT_LOG_IPOPT) &&
- ih->ihl * 4 > sizeof(struct iphdr)) {
- const unsigned char *op;
- unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
- unsigned int i, optsize;
-
- optsize = ih->ihl * 4 - sizeof(struct iphdr);
- op = skb_header_pointer(skb, iphoff+sizeof(_iph),
- optsize, _opt);
- if (op == NULL) {
- sb_add(m, "TRUNCATED");
- return;
- }
-
- /* Max length: 127 "OPT (" 15*4*2chars ") " */
- sb_add(m, "OPT (");
- for (i = 0; i < optsize; i++)
- sb_add(m, "%02X", op[i]);
- sb_add(m, ") ");
- }
-
- switch (ih->protocol) {
- case IPPROTO_TCP: {
- struct tcphdr _tcph;
- const struct tcphdr *th;
-
- /* Max length: 10 "PROTO=TCP " */
- sb_add(m, "PROTO=TCP ");
-
- if (ntohs(ih->frag_off) & IP_OFFSET)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- th = skb_header_pointer(skb, iphoff + ih->ihl * 4,
- sizeof(_tcph), &_tcph);
- if (th == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ",
- skb->len - iphoff - ih->ihl*4);
- break;
- }
-
- /* Max length: 20 "SPT=65535 DPT=65535 " */
- sb_add(m, "SPT=%u DPT=%u ",
- ntohs(th->source), ntohs(th->dest));
- /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
- if (logflags & IPT_LOG_TCPSEQ)
- sb_add(m, "SEQ=%u ACK=%u ",
- ntohl(th->seq), ntohl(th->ack_seq));
- /* Max length: 13 "WINDOW=65535 " */
- sb_add(m, "WINDOW=%u ", ntohs(th->window));
- /* Max length: 9 "RES=0x3F " */
- sb_add(m, "RES=0x%02x ", (u8)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
- /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
- if (th->cwr)
- sb_add(m, "CWR ");
- if (th->ece)
- sb_add(m, "ECE ");
- if (th->urg)
- sb_add(m, "URG ");
- if (th->ack)
- sb_add(m, "ACK ");
- if (th->psh)
- sb_add(m, "PSH ");
- if (th->rst)
- sb_add(m, "RST ");
- if (th->syn)
- sb_add(m, "SYN ");
- if (th->fin)
- sb_add(m, "FIN ");
- /* Max length: 11 "URGP=65535 " */
- sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
-
- if ((logflags & IPT_LOG_TCPOPT) &&
- th->doff * 4 > sizeof(struct tcphdr)) {
- unsigned char _opt[4 * 15 - sizeof(struct tcphdr)];
- const unsigned char *op;
- unsigned int i, optsize;
-
- optsize = th->doff * 4 - sizeof(struct tcphdr);
- op = skb_header_pointer(skb,
- iphoff+ih->ihl*4+sizeof(_tcph),
- optsize, _opt);
- if (op == NULL) {
- sb_add(m, "TRUNCATED");
- return;
- }
-
- /* Max length: 127 "OPT (" 15*4*2chars ") " */
- sb_add(m, "OPT (");
- for (i = 0; i < optsize; i++)
- sb_add(m, "%02X", op[i]);
- sb_add(m, ") ");
- }
- break;
- }
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE: {
- struct udphdr _udph;
- const struct udphdr *uh;
-
- if (ih->protocol == IPPROTO_UDP)
- /* Max length: 10 "PROTO=UDP " */
- sb_add(m, "PROTO=UDP " );
- else /* Max length: 14 "PROTO=UDPLITE " */
- sb_add(m, "PROTO=UDPLITE ");
-
- if (ntohs(ih->frag_off) & IP_OFFSET)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- uh = skb_header_pointer(skb, iphoff+ih->ihl*4,
- sizeof(_udph), &_udph);
- if (uh == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ",
- skb->len - iphoff - ih->ihl*4);
- break;
- }
-
- /* Max length: 20 "SPT=65535 DPT=65535 " */
- sb_add(m, "SPT=%u DPT=%u LEN=%u ",
- ntohs(uh->source), ntohs(uh->dest),
- ntohs(uh->len));
- break;
- }
- case IPPROTO_ICMP: {
- struct icmphdr _icmph;
- const struct icmphdr *ich;
- static const size_t required_len[NR_ICMP_TYPES+1]
- = { [ICMP_ECHOREPLY] = 4,
- [ICMP_DEST_UNREACH]
- = 8 + sizeof(struct iphdr),
- [ICMP_SOURCE_QUENCH]
- = 8 + sizeof(struct iphdr),
- [ICMP_REDIRECT]
- = 8 + sizeof(struct iphdr),
- [ICMP_ECHO] = 4,
- [ICMP_TIME_EXCEEDED]
- = 8 + sizeof(struct iphdr),
- [ICMP_PARAMETERPROB]
- = 8 + sizeof(struct iphdr),
- [ICMP_TIMESTAMP] = 20,
- [ICMP_TIMESTAMPREPLY] = 20,
- [ICMP_ADDRESS] = 12,
- [ICMP_ADDRESSREPLY] = 12 };
-
- /* Max length: 11 "PROTO=ICMP " */
- sb_add(m, "PROTO=ICMP ");
-
- if (ntohs(ih->frag_off) & IP_OFFSET)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
- sizeof(_icmph), &_icmph);
- if (ich == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ",
- skb->len - iphoff - ih->ihl*4);
- break;
- }
-
- /* Max length: 18 "TYPE=255 CODE=255 " */
- sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- if (ich->type <= NR_ICMP_TYPES &&
- required_len[ich->type] &&
- skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
- sb_add(m, "INCOMPLETE [%u bytes] ",
- skb->len - iphoff - ih->ihl*4);
- break;
- }
-
- switch (ich->type) {
- case ICMP_ECHOREPLY:
- case ICMP_ECHO:
- /* Max length: 19 "ID=65535 SEQ=65535 " */
- sb_add(m, "ID=%u SEQ=%u ",
- ntohs(ich->un.echo.id),
- ntohs(ich->un.echo.sequence));
- break;
-
- case ICMP_PARAMETERPROB:
- /* Max length: 14 "PARAMETER=255 " */
- sb_add(m, "PARAMETER=%u ",
- ntohl(ich->un.gateway) >> 24);
- break;
- case ICMP_REDIRECT:
- /* Max length: 24 "GATEWAY=255.255.255.255 " */
- sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
- /* Fall through */
- case ICMP_DEST_UNREACH:
- case ICMP_SOURCE_QUENCH:
- case ICMP_TIME_EXCEEDED:
- /* Max length: 3+maxlen */
- if (!iphoff) { /* Only recurse once. */
- sb_add(m, "[");
- dump_packet(m, info, skb,
- iphoff + ih->ihl*4+sizeof(_icmph));
- sb_add(m, "] ");
- }
-
- /* Max length: 10 "MTU=65535 " */
- if (ich->type == ICMP_DEST_UNREACH &&
- ich->code == ICMP_FRAG_NEEDED)
- sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
- }
- break;
- }
- /* Max Length */
- case IPPROTO_AH: {
- struct ip_auth_hdr _ahdr;
- const struct ip_auth_hdr *ah;
-
- if (ntohs(ih->frag_off) & IP_OFFSET)
- break;
-
- /* Max length: 9 "PROTO=AH " */
- sb_add(m, "PROTO=AH ");
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
- sizeof(_ahdr), &_ahdr);
- if (ah == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ",
- skb->len - iphoff - ih->ihl*4);
- break;
- }
-
- /* Length: 15 "SPI=0xF1234567 " */
- sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
- break;
- }
- case IPPROTO_ESP: {
- struct ip_esp_hdr _esph;
- const struct ip_esp_hdr *eh;
-
- /* Max length: 10 "PROTO=ESP " */
- sb_add(m, "PROTO=ESP ");
-
- if (ntohs(ih->frag_off) & IP_OFFSET)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
- sizeof(_esph), &_esph);
- if (eh == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ",
- skb->len - iphoff - ih->ihl*4);
- break;
- }
-
- /* Length: 15 "SPI=0xF1234567 " */
- sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
- break;
- }
- /* Max length: 10 "PROTO 255 " */
- default:
- sb_add(m, "PROTO=%u ", ih->protocol);
- }
-
- /* Max length: 15 "UID=4294967295 " */
- if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file)
- sb_add(m, "UID=%u GID=%u ",
- skb->sk->sk_socket->file->f_cred->fsuid,
- skb->sk->sk_socket->file->f_cred->fsgid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
- }
-
- /* Max length: 16 "MARK=0xFFFFFFFF " */
- if (!iphoff && skb->mark)
- sb_add(m, "MARK=0x%x ", skb->mark);
-
- /* Proto Max log string length */
- /* IP: 40+46+6+11+127 = 230 */
- /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
- /* UDP: 10+max(25,20) = 35 */
- /* UDPLITE: 14+max(25,20) = 39 */
- /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
- /* ESP: 10+max(25)+15 = 50 */
- /* AH: 9+max(25)+15 = 49 */
- /* unknown: 10 */
-
- /* (ICMP allows recursion one level deep) */
- /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
- /* maxlen = 230+ 91 + 230 + 252 = 803 */
-}
-
-static void dump_mac_header(struct sbuff *m,
- const struct nf_loginfo *info,
- const struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- unsigned int logflags = 0;
-
- if (info->type == NF_LOG_TYPE_LOG)
- logflags = info->u.log.logflags;
-
- if (!(logflags & IPT_LOG_MACDECODE))
- goto fallback;
-
- switch (dev->type) {
- case ARPHRD_ETHER:
- sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
- ntohs(eth_hdr(skb)->h_proto));
- return;
- default:
- break;
- }
-
-fallback:
- sb_add(m, "MAC=");
- if (dev->hard_header_len &&
- skb->mac_header != skb->network_header) {
- const unsigned char *p = skb_mac_header(skb);
- unsigned int i;
-
- sb_add(m, "%02x", *p++);
- for (i = 1; i < dev->hard_header_len; i++, p++)
- sb_add(m, ":%02x", *p);
- }
- sb_add(m, " ");
-}
-
-static struct nf_loginfo default_loginfo = {
- .type = NF_LOG_TYPE_LOG,
- .u = {
- .log = {
- .level = 5,
- .logflags = NF_LOG_MASK,
- },
- },
-};
-
-static void
-ipt_log_packet(u_int8_t pf,
- unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo,
- const char *prefix)
-{
- struct sbuff *m = sb_open();
-
- if (!loginfo)
- loginfo = &default_loginfo;
-
- sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
- prefix,
- in ? in->name : "",
- out ? out->name : "");
-#ifdef CONFIG_BRIDGE_NETFILTER
- if (skb->nf_bridge) {
- const struct net_device *physindev;
- const struct net_device *physoutdev;
-
- physindev = skb->nf_bridge->physindev;
- if (physindev && in != physindev)
- sb_add(m, "PHYSIN=%s ", physindev->name);
- physoutdev = skb->nf_bridge->physoutdev;
- if (physoutdev && out != physoutdev)
- sb_add(m, "PHYSOUT=%s ", physoutdev->name);
- }
-#endif
-
- if (in != NULL)
- dump_mac_header(m, loginfo, skb);
-
- dump_packet(m, loginfo, skb, 0);
-
- sb_close(m);
-}
-
-static unsigned int
-log_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
- const struct ipt_log_info *loginfo = par->targinfo;
- struct nf_loginfo li;
-
- li.type = NF_LOG_TYPE_LOG;
- li.u.log.level = loginfo->level;
- li.u.log.logflags = loginfo->logflags;
-
- ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li,
- loginfo->prefix);
- return XT_CONTINUE;
-}
-
-static int log_tg_check(const struct xt_tgchk_param *par)
-{
- const struct ipt_log_info *loginfo = par->targinfo;
-
- if (loginfo->level >= 8) {
- pr_debug("level %u >= 8\n", loginfo->level);
- return -EINVAL;
- }
- if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
- pr_debug("prefix is not null-terminated\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static struct xt_target log_tg_reg __read_mostly = {
- .name = "LOG",
- .family = NFPROTO_IPV4,
- .target = log_tg,
- .targetsize = sizeof(struct ipt_log_info),
- .checkentry = log_tg_check,
- .me = THIS_MODULE,
-};
-
-static struct nf_logger ipt_log_logger __read_mostly = {
- .name = "ipt_LOG",
- .logfn = &ipt_log_packet,
- .me = THIS_MODULE,
-};
-
-static int __init log_tg_init(void)
-{
- int ret;
-
- ret = xt_register_target(&log_tg_reg);
- if (ret < 0)
- return ret;
- nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
- return 0;
-}
-
-static void __exit log_tg_exit(void)
-{
- nf_log_unregister(&ipt_log_logger);
- xt_unregister_target(&log_tg_reg);
-}
-
-module_init(log_tg_init);
-module_exit(log_tg_exit);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index ab5b27a..7cbe9cb 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -75,25 +75,31 @@
ntohs(tuple->src.u.icmp.id));
}
+static unsigned int *icmp_get_timeouts(struct net *net)
+{
+ return &nf_ct_icmp_timeout;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
static int icmp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeout)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout);
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
static const u_int8_t valid_new[] = {
[ICMP_ECHO] = 1,
@@ -263,6 +269,44 @@
}
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeout = data;
+
+ if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
+ *timeout =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
+ } else {
+ /* Set default ICMP timeout. */
+ *timeout = nf_ct_icmp_timeout;
+ }
+ return 0;
+}
+
+static int
+icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeout = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
+ [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *icmp_sysctl_header;
static struct ctl_table icmp_sysctl_table[] = {
@@ -298,6 +342,7 @@
.invert_tuple = icmp_invert_tuple,
.print_tuple = icmp_print_tuple,
.packet = icmp_packet,
+ .get_timeouts = icmp_get_timeouts,
.new = icmp_new,
.error = icmp_error,
.destroy = NULL,
@@ -308,6 +353,15 @@
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = icmp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = icmp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_ICMP_MAX,
+ .obj_size = sizeof(unsigned int),
+ .nla_policy = icmp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &icmp_sysctl_header,
.ctl_table = icmp_sysctl_table,
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index a708933d..abb52ad 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -686,6 +686,11 @@
.exit = nf_nat_net_exit,
};
+static struct nf_ct_helper_expectfn follow_master_nat = {
+ .name = "nat-follow-master",
+ .expectfn = nf_nat_follow_master,
+};
+
static int __init nf_nat_init(void)
{
size_t i;
@@ -717,6 +722,8 @@
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
+ nf_ct_helper_expectfn_register(&follow_master_nat);
+
BUG_ON(nf_nat_seq_adjust_hook != NULL);
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -736,6 +743,7 @@
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_l3proto_put(l3proto);
nf_ct_extend_unregister(&nat_extend);
+ nf_ct_helper_expectfn_unregister(&follow_master_nat);
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index dc1dd91..8253670 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -568,6 +568,16 @@
return 0;
}
+static struct nf_ct_helper_expectfn q931_nat = {
+ .name = "Q.931",
+ .expectfn = ip_nat_q931_expect,
+};
+
+static struct nf_ct_helper_expectfn callforwarding_nat = {
+ .name = "callforwarding",
+ .expectfn = ip_nat_callforwarding_expect,
+};
+
/****************************************************************************/
static int __init init(void)
{
@@ -590,6 +600,8 @@
RCU_INIT_POINTER(nat_h245_hook, nat_h245);
RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
RCU_INIT_POINTER(nat_q931_hook, nat_q931);
+ nf_ct_helper_expectfn_register(&q931_nat);
+ nf_ct_helper_expectfn_register(&callforwarding_nat);
return 0;
}
@@ -605,6 +617,8 @@
RCU_INIT_POINTER(nat_h245_hook, NULL);
RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
RCU_INIT_POINTER(nat_q931_hook, NULL);
+ nf_ct_helper_expectfn_unregister(&q931_nat);
+ nf_ct_helper_expectfn_unregister(&callforwarding_nat);
synchronize_rcu();
}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index d0319f9..57932c4 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -526,6 +526,11 @@
return NF_DROP;
}
+static struct nf_ct_helper_expectfn sip_nat = {
+ .name = "sip",
+ .expectfn = ip_nat_sip_expected,
+};
+
static void __exit nf_nat_sip_fini(void)
{
RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
@@ -535,6 +540,7 @@
RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
+ nf_ct_helper_expectfn_unregister(&sip_nat);
synchronize_rcu();
}
@@ -554,6 +560,7 @@
RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
+ nf_ct_helper_expectfn_register(&sip_nat);
return 0;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 4398a45..ab6b36e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,7 +156,7 @@
struct hlist_nulls_node *hnode;
pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
- (int)ident, &daddr, dif);
+ (int)ident, &daddr, dif);
read_lock_bh(&ping_table.lock);
ping_portaddr_for_each_entry(sk, hnode, hslot) {
@@ -229,7 +229,7 @@
static void ping_close(struct sock *sk, long timeout)
{
pr_debug("ping_close(sk=%p,sk->num=%u)\n",
- inet_sk(sk), inet_sk(sk)->inet_num);
+ inet_sk(sk), inet_sk(sk)->inet_num);
pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
sk_common_release(sk);
@@ -252,7 +252,7 @@
return -EINVAL;
pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
- sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
+ sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
@@ -280,9 +280,9 @@
}
pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
- (int)isk->inet_num,
- &isk->inet_rcv_saddr,
- (int)sk->sk_bound_dev_if);
+ (int)isk->inet_num,
+ &isk->inet_rcv_saddr,
+ (int)sk->sk_bound_dev_if);
err = 0;
if (isk->inet_rcv_saddr)
@@ -335,7 +335,7 @@
return;
pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
- code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
+ code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
ntohs(icmph->un.echo.id), skb->dev->ifindex);
@@ -679,7 +679,7 @@
static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
- inet_sk(sk), inet_sk(sk)->inet_num, skb);
+ inet_sk(sk), inet_sk(sk)->inet_num, skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
pr_debug("ping_queue_rcv_skb -> failed\n");
@@ -705,7 +705,7 @@
/* We assume the packet has already been checked by icmp_rcv */
pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
- skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
+ skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
/* Push ICMP header back */
skb_push(skb, skb->data - (u8 *)icmph);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ab46630..bbd604c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -491,11 +491,8 @@
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
- static int complained;
- if (!complained++)
- printk(KERN_INFO "%s forgot to set AF_INET in "
- "raw sendmsg. Fix it!\n",
- current->comm);
+ pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n",
+ __func__, current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0489ced..12ccf88 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -62,6 +62,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "IPv4: " fmt
+
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -132,7 +134,6 @@
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
-static int redirect_genid;
static struct delayed_work expires_work;
static unsigned long expires_ljiffies;
@@ -937,7 +938,7 @@
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- redirect_genid++;
+ inetpeer_invalidate_tree(AF_INET);
}
/*
@@ -960,7 +961,7 @@
static void rt_emergency_hash_rebuild(struct net *net)
{
if (net_ratelimit())
- printk(KERN_WARNING "Route hash chain too long!\n");
+ pr_warn("Route hash chain too long!\n");
rt_cache_invalidate(net);
}
@@ -1084,7 +1085,7 @@
if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
goto out;
if (net_ratelimit())
- printk(KERN_WARNING "dst cache overflow\n");
+ pr_warn("dst cache overflow\n");
RT_CACHE_STAT_INC(gc_dst_overflow);
return 1;
@@ -1182,8 +1183,7 @@
int err = rt_bind_neighbour(rt);
if (err) {
if (net_ratelimit())
- printk(KERN_WARNING
- "Neighbour table failure & not caching routes.\n");
+ pr_warn("Neighbour table failure & not caching routes\n");
ip_rt_put(rt);
return ERR_PTR(err);
}
@@ -1259,7 +1259,7 @@
struct net *net = dev_net(rt->dst.dev);
int num = ++net->ipv4.current_rt_cache_rebuild_count;
if (!rt_caching(net)) {
- printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
+ pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
rt->dst.dev->name, num);
}
rt_emergency_hash_rebuild(net);
@@ -1300,7 +1300,7 @@
}
if (net_ratelimit())
- printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
+ pr_warn("Neighbour table overflow\n");
rt_drop(rt);
return ERR_PTR(-ENOBUFS);
}
@@ -1490,10 +1490,8 @@
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw ||
- peer->redirect_genid != redirect_genid) {
+ if (peer->redirect_learned.a4 != new_gw) {
peer->redirect_learned.a4 = new_gw;
- peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
@@ -1506,10 +1504,10 @@
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
+ pr_info("Redirect from %pI4 on %s about %pI4 ignored\n"
" Advised path = %pI4 -> %pI4\n",
- &old_gw, dev->name, &new_gw,
- &saddr, &daddr);
+ &old_gw, dev->name, &new_gw,
+ &saddr, &daddr);
#endif
;
}
@@ -1621,8 +1619,8 @@
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
- printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
- &ip_hdr(skb)->saddr, rt->rt_iif,
+ pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, rt->rt_iif,
&rt->rt_dst, &rt->rt_gateway);
#endif
}
@@ -1798,8 +1796,6 @@
if (peer) {
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway)
check_peer_redir(&rt->dst, peer);
@@ -1963,8 +1959,7 @@
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
+
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
@@ -2111,18 +2106,13 @@
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
- printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
+ pr_warn("martian source %pI4 from %pI4, on dev %s\n",
&daddr, &saddr, dev->name);
if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
- int i;
- const unsigned char *p = skb_mac_header(skb);
- printk(KERN_WARNING "ll header: ");
- for (i = 0; i < dev->hard_header_len; i++, p++) {
- printk("%02x", *p);
- if (i < (dev->hard_header_len - 1))
- printk(":");
- }
- printk("\n");
+ print_hex_dump(KERN_WARNING, "ll header: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ skb_mac_header(skb),
+ dev->hard_header_len, true);
}
}
#endif
@@ -2146,8 +2136,7 @@
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
if (net_ratelimit())
- printk(KERN_CRIT "Bug in ip_route_input" \
- "_slow(). Please, report\n");
+ pr_crit("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
@@ -2419,7 +2408,7 @@
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
+ pr_warn("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
@@ -3496,7 +3485,7 @@
net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
if (ip_rt_proc_init())
- printk(KERN_ERR "Unable to create route proc files\n");
+ pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init(ip_rt_max_size);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 22ef5f9..cfd7edd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -245,6 +245,8 @@
* TCP_CLOSE socket is finished
*/
+#define pr_fmt(fmt) "TCP: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -1675,7 +1677,8 @@
if (tp->ucopy.dma_cookie < 0) {
- printk(KERN_ALERT "dma_cookie < 0\n");
+ pr_alert("%s: dma_cookie < 0\n",
+ __func__);
/* Exception. Bailout! */
if (!copied)
@@ -1884,9 +1887,9 @@
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans && net_ratelimit())
- pr_info("TCP: too many orphaned sockets\n");
+ pr_info("too many orphaned sockets\n");
if (out_of_socket_memory && net_ratelimit())
- pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
+ pr_info("out of memory -- consider tuning tcp_mem\n");
return too_many_orphans || out_of_socket_memory;
}
@@ -3311,9 +3314,8 @@
sysctl_tcp_rmem[1] = 87380;
sysctl_tcp_rmem[2] = max(87380, max_share);
- printk(KERN_INFO "TCP: Hash tables configured "
- "(established %u bind %u)\n",
- tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
+ pr_info("Hash tables configured (established %u bind %u)\n",
+ tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index fc6d475..272a845 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -6,6 +6,8 @@
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
+#define pr_fmt(fmt) "TCP: " fmt
+
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
@@ -41,18 +43,17 @@
/* all algorithms must implement ssthresh and cong_avoid ops */
if (!ca->ssthresh || !ca->cong_avoid) {
- printk(KERN_ERR "TCP %s does not implement required ops\n",
- ca->name);
+ pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
spin_lock(&tcp_cong_list_lock);
if (tcp_ca_find(ca->name)) {
- printk(KERN_NOTICE "TCP %s already registered\n", ca->name);
+ pr_notice("%s already registered\n", ca->name);
ret = -EEXIST;
} else {
list_add_tail_rcu(&ca->list, &tcp_cong_list);
- printk(KERN_INFO "TCP %s registered\n", ca->name);
+ pr_info("%s registered\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d9b83d1..68d4057 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -61,6 +61,8 @@
* Pasi Sarolahti: F-RTO for dealing with spurious RTOs
*/
+#define pr_fmt(fmt) "TCP: " fmt
+
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -1585,6 +1587,10 @@
}
}
+ /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
+ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
+ goto fallback;
+
if (!skb_shift(prev, skb, len))
goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
@@ -3863,9 +3869,9 @@
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
if (net_ratelimit())
- printk(KERN_INFO "tcp_parse_options: Illegal window "
- "scaling value %d >14 received.\n",
- snd_wscale);
+ pr_info("%s: Illegal window scaling value %d >14 received\n",
+ __func__,
+ snd_wscale);
snd_wscale = 14;
}
opt_rx->snd_wscale = snd_wscale;
@@ -4187,7 +4193,7 @@
/* Only TCP_LISTEN and TCP_CLOSE are left, in these
* cases we should never reach this piece of code.
*/
- printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
+ pr_err("%s: Impossible, sk->sk_state=%d\n",
__func__, sk->sk_state);
break;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94abee8..fe9f604 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -50,6 +50,7 @@
* a single port at the same time.
*/
+#define pr_fmt(fmt) "TCP: " fmt
#include <linux/bottom_half.h>
#include <linux/types.h>
@@ -876,8 +877,7 @@
lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
if (!lopt->synflood_warned) {
lopt->synflood_warned = 1;
- pr_info("%s: Possible SYN flooding on port %d. %s. "
- " Check SNMP counters.\n",
+ pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
}
return want_cookie;
@@ -927,7 +927,8 @@
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
- sock_owned_by_user(sk));
+ sock_owned_by_user(sk) ||
+ lockdep_is_held(&sk->sk_lock.slock));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
@@ -1226,10 +1227,10 @@
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
- printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
- &iph->saddr, ntohs(th->source),
- &iph->daddr, ntohs(th->dest),
- genhash ? " tcp_v4_calc_md5_hash failed" : "");
+ pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
+ &iph->saddr, ntohs(th->source),
+ &iph->daddr, ntohs(th->dest),
+ genhash ? " tcp_v4_calc_md5_hash failed" : "");
}
return 1;
}
@@ -1398,7 +1399,7 @@
* to destinations, already remembered
* to the moment of synflood.
*/
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
+ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
&saddr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
}
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 85ee7eb..a981cdc 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/socket.h>
@@ -239,7 +241,7 @@
if (ret)
goto err1;
- pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize);
+ pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
return 0;
err1:
proc_net_remove(&init_net, procname);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index cd2e072..34d4a02 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -333,16 +333,18 @@
*/
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
- &inet->inet_daddr, ntohs(inet->inet_dport),
- inet->inet_num, tp->snd_una, tp->snd_nxt);
+ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
+ &inet->inet_daddr,
+ ntohs(inet->inet_dport), inet->inet_num,
+ tp->snd_una, tp->snd_nxt);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
- &np->daddr, ntohs(inet->inet_dport),
- inet->inet_num, tp->snd_una, tp->snd_nxt);
+ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
+ &np->daddr,
+ ntohs(inet->inet_dport), inet->inet_num,
+ tp->snd_una, tp->snd_nxt);
}
#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 0177598..0d01718 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -164,12 +164,12 @@
static int __init tunnel4_init(void)
{
if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) {
- printk(KERN_ERR "tunnel4 init: can't add protocol\n");
+ pr_err("%s: can't add protocol\n", __func__);
return -EAGAIN;
}
#if IS_ENABLED(CONFIG_IPV6)
if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
- printk(KERN_ERR "tunnel64 init: can't add protocol\n");
+ pr_err("tunnel64 init: can't add protocol\n");
inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
return -EAGAIN;
}
@@ -181,10 +181,10 @@
{
#if IS_ENABLED(CONFIG_IPV6)
if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
- printk(KERN_ERR "tunnel64 close: can't remove protocol\n");
+ pr_err("tunnel64 close: can't remove protocol\n");
#endif
if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP))
- printk(KERN_ERR "tunnel4 close: can't remove protocol\n");
+ pr_err("tunnel4 close: can't remove protocol\n");
}
module_init(tunnel4_init);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7c41ab8..d6f5fee 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -77,6 +77,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "UDP: " fmt
+
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@@ -975,7 +977,7 @@
/* ... which is an evident application bug. --ANK */
release_sock(sk);
- LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
+ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n"));
err = -EINVAL;
goto out;
}
@@ -1054,7 +1056,7 @@
if (unlikely(!up->pending)) {
release_sock(sk);
- LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
+ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n"));
return -EINVAL;
}
@@ -1447,9 +1449,8 @@
* provided by the application."
*/
if (up->pcrlen == 0) { /* full coverage was set */
- LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
- "%d while full coverage %d requested\n",
- UDP_SKB_CB(skb)->cscov, skb->len);
+ LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
/* The next case involves violating the min. coverage requested
@@ -1459,9 +1460,8 @@
* Therefore the above ...()->partial_cov statement is essential.
*/
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
- LIMIT_NETDEBUG(KERN_WARNING
- "UDPLITE: coverage %d too small, need min %d\n",
- UDP_SKB_CB(skb)->cscov, up->pcrlen);
+ LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n",
+ UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
@@ -1689,13 +1689,10 @@
short_packet:
LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
- proto == IPPROTO_UDPLITE ? "-Lite" : "",
- &saddr,
- ntohs(uh->source),
- ulen,
- skb->len,
- &daddr,
- ntohs(uh->dest));
+ proto == IPPROTO_UDPLITE ? "Lite" : "",
+ &saddr, ntohs(uh->source),
+ ulen, skb->len,
+ &daddr, ntohs(uh->dest));
goto drop;
csum_error:
@@ -1704,11 +1701,8 @@
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
- proto == IPPROTO_UDPLITE ? "-Lite" : "",
- &saddr,
- ntohs(uh->source),
- &daddr,
- ntohs(uh->dest),
+ proto == IPPROTO_UDPLITE ? "Lite" : "",
+ &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
ulen);
drop:
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 12e9499..2c46acd 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -10,6 +10,9 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#define pr_fmt(fmt) "UDPLite: " fmt
+
#include <linux/export.h>
#include "udp_impl.h"
@@ -129,11 +132,11 @@
inet_register_protosw(&udplite4_protosw);
if (udplite4_proc_init())
- printk(KERN_ERR "%s: Cannot register /proc!\n", __func__);
+ pr_err("%s: Cannot register /proc!\n", __func__);
return;
out_unregister_proto:
proto_unregister(&udplite_prot);
out_register_err:
- printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__);
+ pr_crit("%s: Cannot add UDP-Lite protocol\n", __func__);
}
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 9247d9d..05a5df2 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -3,6 +3,8 @@
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*/
+#define pr_fmt(fmt) "IPsec: " fmt
+
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -75,18 +77,18 @@
static int __init ipip_init(void)
{
if (xfrm_register_type(&ipip_type, AF_INET) < 0) {
- printk(KERN_INFO "ipip init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) {
- printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET\n");
+ pr_info("%s: can't add xfrm handler for AF_INET\n", __func__);
xfrm_unregister_type(&ipip_type, AF_INET);
return -EAGAIN;
}
#if IS_ENABLED(CONFIG_IPV6)
if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
- printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n");
+ pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__);
xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
xfrm_unregister_type(&ipip_type, AF_INET);
return -EAGAIN;
@@ -99,12 +101,14 @@
{
#if IS_ENABLED(CONFIG_IPV6)
if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
- printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n");
+ pr_info("%s: can't remove xfrm handler for AF_INET6\n",
+ __func__);
#endif
if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET))
- printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET\n");
+ pr_info("%s: can't remove xfrm handler for AF_INET\n",
+ __func__);
if (xfrm_unregister_type(&ipip_type, AF_INET) < 0)
- printk(KERN_INFO "ipip close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(ipip_init);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c02280a..6a3bb60 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -434,6 +434,10 @@
/* Join all-node multicast group */
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+ /* Join all-router multicast group if forwarding is set */
+ if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+
return ndev;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 9a68fb5..d33cddd1 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -154,15 +154,6 @@
(e.g. when running oldconfig). It selects
CONFIG_NETFILTER_XT_TARGET_HL.
-config IP6_NF_TARGET_LOG
- tristate "LOG target support"
- default m if NETFILTER_ADVANCED=n
- help
- This option adds a `LOG' target, which allows you to create rules in
- any iptables table which records the packet header to the syslog.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2eaed96..d4dfd0a 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -31,5 +31,4 @@
obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
# targets
-obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
deleted file mode 100644
index e6af8d7..0000000
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * This is a module which is used for logging packets.
- */
-
-/* (C) 2001 Jan Rekorajski <baggins@pld.org.pl>
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ip.h>
-#include <linux/spinlock.h>
-#include <linux/icmpv6.h>
-#include <net/udp.h>
-#include <net/tcp.h>
-#include <net/ipv6.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <net/netfilter/nf_log.h>
-#include <net/netfilter/xt_log.h>
-
-MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
-MODULE_DESCRIPTION("Xtables: IPv6 packet logging to syslog");
-MODULE_LICENSE("GPL");
-
-struct in_device;
-#include <net/route.h>
-#include <linux/netfilter_ipv6/ip6t_LOG.h>
-
-/* One level of recursion won't kill us */
-static void dump_packet(struct sbuff *m,
- const struct nf_loginfo *info,
- const struct sk_buff *skb, unsigned int ip6hoff,
- int recurse)
-{
- u_int8_t currenthdr;
- int fragment;
- struct ipv6hdr _ip6h;
- const struct ipv6hdr *ih;
- unsigned int ptr;
- unsigned int hdrlen = 0;
- unsigned int logflags;
-
- if (info->type == NF_LOG_TYPE_LOG)
- logflags = info->u.log.logflags;
- else
- logflags = NF_LOG_MASK;
-
- ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
- if (ih == NULL) {
- sb_add(m, "TRUNCATED");
- return;
- }
-
- /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
- sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
-
- /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
- sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
- ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
- (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
- ih->hop_limit,
- (ntohl(*(__be32 *)ih) & 0x000fffff));
-
- fragment = 0;
- ptr = ip6hoff + sizeof(struct ipv6hdr);
- currenthdr = ih->nexthdr;
- while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
- struct ipv6_opt_hdr _hdr;
- const struct ipv6_opt_hdr *hp;
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- if (hp == NULL) {
- sb_add(m, "TRUNCATED");
- return;
- }
-
- /* Max length: 48 "OPT (...) " */
- if (logflags & IP6T_LOG_IPOPT)
- sb_add(m, "OPT ( ");
-
- switch (currenthdr) {
- case IPPROTO_FRAGMENT: {
- struct frag_hdr _fhdr;
- const struct frag_hdr *fh;
-
- sb_add(m, "FRAG:");
- fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
- &_fhdr);
- if (fh == NULL) {
- sb_add(m, "TRUNCATED ");
- return;
- }
-
- /* Max length: 6 "65535 " */
- sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
-
- /* Max length: 11 "INCOMPLETE " */
- if (fh->frag_off & htons(0x0001))
- sb_add(m, "INCOMPLETE ");
-
- sb_add(m, "ID:%08x ", ntohl(fh->identification));
-
- if (ntohs(fh->frag_off) & 0xFFF8)
- fragment = 1;
-
- hdrlen = 8;
-
- break;
- }
- case IPPROTO_DSTOPTS:
- case IPPROTO_ROUTING:
- case IPPROTO_HOPOPTS:
- if (fragment) {
- if (logflags & IP6T_LOG_IPOPT)
- sb_add(m, ")");
- return;
- }
- hdrlen = ipv6_optlen(hp);
- break;
- /* Max Length */
- case IPPROTO_AH:
- if (logflags & IP6T_LOG_IPOPT) {
- struct ip_auth_hdr _ahdr;
- const struct ip_auth_hdr *ah;
-
- /* Max length: 3 "AH " */
- sb_add(m, "AH ");
-
- if (fragment) {
- sb_add(m, ")");
- return;
- }
-
- ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
- &_ahdr);
- if (ah == NULL) {
- /*
- * Max length: 26 "INCOMPLETE [65535
- * bytes] )"
- */
- sb_add(m, "INCOMPLETE [%u bytes] )",
- skb->len - ptr);
- return;
- }
-
- /* Length: 15 "SPI=0xF1234567 */
- sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
-
- }
-
- hdrlen = (hp->hdrlen+2)<<2;
- break;
- case IPPROTO_ESP:
- if (logflags & IP6T_LOG_IPOPT) {
- struct ip_esp_hdr _esph;
- const struct ip_esp_hdr *eh;
-
- /* Max length: 4 "ESP " */
- sb_add(m, "ESP ");
-
- if (fragment) {
- sb_add(m, ")");
- return;
- }
-
- /*
- * Max length: 26 "INCOMPLETE [65535 bytes] )"
- */
- eh = skb_header_pointer(skb, ptr, sizeof(_esph),
- &_esph);
- if (eh == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] )",
- skb->len - ptr);
- return;
- }
-
- /* Length: 16 "SPI=0xF1234567 )" */
- sb_add(m, "SPI=0x%x )", ntohl(eh->spi) );
-
- }
- return;
- default:
- /* Max length: 20 "Unknown Ext Hdr 255" */
- sb_add(m, "Unknown Ext Hdr %u", currenthdr);
- return;
- }
- if (logflags & IP6T_LOG_IPOPT)
- sb_add(m, ") ");
-
- currenthdr = hp->nexthdr;
- ptr += hdrlen;
- }
-
- switch (currenthdr) {
- case IPPROTO_TCP: {
- struct tcphdr _tcph;
- const struct tcphdr *th;
-
- /* Max length: 10 "PROTO=TCP " */
- sb_add(m, "PROTO=TCP ");
-
- if (fragment)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- th = skb_header_pointer(skb, ptr, sizeof(_tcph), &_tcph);
- if (th == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
- return;
- }
-
- /* Max length: 20 "SPT=65535 DPT=65535 " */
- sb_add(m, "SPT=%u DPT=%u ",
- ntohs(th->source), ntohs(th->dest));
- /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
- if (logflags & IP6T_LOG_TCPSEQ)
- sb_add(m, "SEQ=%u ACK=%u ",
- ntohl(th->seq), ntohl(th->ack_seq));
- /* Max length: 13 "WINDOW=65535 " */
- sb_add(m, "WINDOW=%u ", ntohs(th->window));
- /* Max length: 9 "RES=0x3C " */
- sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
- /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
- if (th->cwr)
- sb_add(m, "CWR ");
- if (th->ece)
- sb_add(m, "ECE ");
- if (th->urg)
- sb_add(m, "URG ");
- if (th->ack)
- sb_add(m, "ACK ");
- if (th->psh)
- sb_add(m, "PSH ");
- if (th->rst)
- sb_add(m, "RST ");
- if (th->syn)
- sb_add(m, "SYN ");
- if (th->fin)
- sb_add(m, "FIN ");
- /* Max length: 11 "URGP=65535 " */
- sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
-
- if ((logflags & IP6T_LOG_TCPOPT) &&
- th->doff * 4 > sizeof(struct tcphdr)) {
- u_int8_t _opt[60 - sizeof(struct tcphdr)];
- const u_int8_t *op;
- unsigned int i;
- unsigned int optsize = th->doff * 4
- - sizeof(struct tcphdr);
-
- op = skb_header_pointer(skb,
- ptr + sizeof(struct tcphdr),
- optsize, _opt);
- if (op == NULL) {
- sb_add(m, "OPT (TRUNCATED)");
- return;
- }
-
- /* Max length: 127 "OPT (" 15*4*2chars ") " */
- sb_add(m, "OPT (");
- for (i =0; i < optsize; i++)
- sb_add(m, "%02X", op[i]);
- sb_add(m, ") ");
- }
- break;
- }
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE: {
- struct udphdr _udph;
- const struct udphdr *uh;
-
- if (currenthdr == IPPROTO_UDP)
- /* Max length: 10 "PROTO=UDP " */
- sb_add(m, "PROTO=UDP " );
- else /* Max length: 14 "PROTO=UDPLITE " */
- sb_add(m, "PROTO=UDPLITE ");
-
- if (fragment)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- uh = skb_header_pointer(skb, ptr, sizeof(_udph), &_udph);
- if (uh == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
- return;
- }
-
- /* Max length: 20 "SPT=65535 DPT=65535 " */
- sb_add(m, "SPT=%u DPT=%u LEN=%u ",
- ntohs(uh->source), ntohs(uh->dest),
- ntohs(uh->len));
- break;
- }
- case IPPROTO_ICMPV6: {
- struct icmp6hdr _icmp6h;
- const struct icmp6hdr *ic;
-
- /* Max length: 13 "PROTO=ICMPv6 " */
- sb_add(m, "PROTO=ICMPv6 ");
-
- if (fragment)
- break;
-
- /* Max length: 25 "INCOMPLETE [65535 bytes] " */
- ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
- if (ic == NULL) {
- sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
- return;
- }
-
- /* Max length: 18 "TYPE=255 CODE=255 " */
- sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
-
- switch (ic->icmp6_type) {
- case ICMPV6_ECHO_REQUEST:
- case ICMPV6_ECHO_REPLY:
- /* Max length: 19 "ID=65535 SEQ=65535 " */
- sb_add(m, "ID=%u SEQ=%u ",
- ntohs(ic->icmp6_identifier),
- ntohs(ic->icmp6_sequence));
- break;
- case ICMPV6_MGM_QUERY:
- case ICMPV6_MGM_REPORT:
- case ICMPV6_MGM_REDUCTION:
- break;
-
- case ICMPV6_PARAMPROB:
- /* Max length: 17 "POINTER=ffffffff " */
- sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
- /* Fall through */
- case ICMPV6_DEST_UNREACH:
- case ICMPV6_PKT_TOOBIG:
- case ICMPV6_TIME_EXCEED:
- /* Max length: 3+maxlen */
- if (recurse) {
- sb_add(m, "[");
- dump_packet(m, info, skb,
- ptr + sizeof(_icmp6h), 0);
- sb_add(m, "] ");
- }
-
- /* Max length: 10 "MTU=65535 " */
- if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
- sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
- }
- break;
- }
- /* Max length: 10 "PROTO=255 " */
- default:
- sb_add(m, "PROTO=%u ", currenthdr);
- }
-
- /* Max length: 15 "UID=4294967295 " */
- if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file)
- sb_add(m, "UID=%u GID=%u ",
- skb->sk->sk_socket->file->f_cred->fsuid,
- skb->sk->sk_socket->file->f_cred->fsgid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
- }
-
- /* Max length: 16 "MARK=0xFFFFFFFF " */
- if (!recurse && skb->mark)
- sb_add(m, "MARK=0x%x ", skb->mark);
-}
-
-static void dump_mac_header(struct sbuff *m,
- const struct nf_loginfo *info,
- const struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- unsigned int logflags = 0;
-
- if (info->type == NF_LOG_TYPE_LOG)
- logflags = info->u.log.logflags;
-
- if (!(logflags & IP6T_LOG_MACDECODE))
- goto fallback;
-
- switch (dev->type) {
- case ARPHRD_ETHER:
- sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
- ntohs(eth_hdr(skb)->h_proto));
- return;
- default:
- break;
- }
-
-fallback:
- sb_add(m, "MAC=");
- if (dev->hard_header_len &&
- skb->mac_header != skb->network_header) {
- const unsigned char *p = skb_mac_header(skb);
- unsigned int len = dev->hard_header_len;
- unsigned int i;
-
- if (dev->type == ARPHRD_SIT &&
- (p -= ETH_HLEN) < skb->head)
- p = NULL;
-
- if (p != NULL) {
- sb_add(m, "%02x", *p++);
- for (i = 1; i < len; i++)
- sb_add(m, ":%02x", *p++);
- }
- sb_add(m, " ");
-
- if (dev->type == ARPHRD_SIT) {
- const struct iphdr *iph =
- (struct iphdr *)skb_mac_header(skb);
- sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
- }
- } else
- sb_add(m, " ");
-}
-
-static struct nf_loginfo default_loginfo = {
- .type = NF_LOG_TYPE_LOG,
- .u = {
- .log = {
- .level = 5,
- .logflags = NF_LOG_MASK,
- },
- },
-};
-
-static void
-ip6t_log_packet(u_int8_t pf,
- unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo,
- const char *prefix)
-{
- struct sbuff *m = sb_open();
-
- if (!loginfo)
- loginfo = &default_loginfo;
-
- sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
- prefix,
- in ? in->name : "",
- out ? out->name : "");
-
- if (in != NULL)
- dump_mac_header(m, loginfo, skb);
-
- dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
-
- sb_close(m);
-}
-
-static unsigned int
-log_tg6(struct sk_buff *skb, const struct xt_action_param *par)
-{
- const struct ip6t_log_info *loginfo = par->targinfo;
- struct nf_loginfo li;
-
- li.type = NF_LOG_TYPE_LOG;
- li.u.log.level = loginfo->level;
- li.u.log.logflags = loginfo->logflags;
-
- ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out,
- &li, loginfo->prefix);
- return XT_CONTINUE;
-}
-
-
-static int log_tg6_check(const struct xt_tgchk_param *par)
-{
- const struct ip6t_log_info *loginfo = par->targinfo;
-
- if (loginfo->level >= 8) {
- pr_debug("level %u >= 8\n", loginfo->level);
- return -EINVAL;
- }
- if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
- pr_debug("prefix not null-terminated\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static struct xt_target log_tg6_reg __read_mostly = {
- .name = "LOG",
- .family = NFPROTO_IPV6,
- .target = log_tg6,
- .targetsize = sizeof(struct ip6t_log_info),
- .checkentry = log_tg6_check,
- .me = THIS_MODULE,
-};
-
-static struct nf_logger ip6t_logger __read_mostly = {
- .name = "ip6t_LOG",
- .logfn = &ip6t_log_packet,
- .me = THIS_MODULE,
-};
-
-static int __init log_tg6_init(void)
-{
- int ret;
-
- ret = xt_register_target(&log_tg6_reg);
- if (ret < 0)
- return ret;
- nf_log_register(NFPROTO_IPV6, &ip6t_logger);
- return 0;
-}
-
-static void __exit log_tg6_exit(void)
-{
- nf_log_unregister(&ip6t_logger);
- xt_unregister_target(&log_tg6_reg);
-}
-
-module_init(log_tg6_init);
-module_exit(log_tg6_exit);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 7c05e7e..92cc9f2 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -88,25 +88,31 @@
ntohs(tuple->src.u.icmp.id));
}
+static unsigned int *icmpv6_get_timeouts(struct net *net)
+{
+ return &nf_ct_icmpv6_timeout;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
static int icmpv6_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeout)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
static const u_int8_t valid_new[] = {
[ICMPV6_ECHO_REQUEST - 128] = 1,
@@ -270,6 +276,44 @@
}
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeout = data;
+
+ if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
+ *timeout =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
+ } else {
+ /* Set default ICMPv6 timeout. */
+ *timeout = nf_ct_icmpv6_timeout;
+ }
+ return 0;
+}
+
+static int
+icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeout = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
+ [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *icmpv6_sysctl_header;
static struct ctl_table icmpv6_sysctl_table[] = {
@@ -293,6 +337,7 @@
.invert_tuple = icmpv6_invert_tuple,
.print_tuple = icmpv6_print_tuple,
.packet = icmpv6_packet,
+ .get_timeouts = icmpv6_get_timeouts,
.new = icmpv6_new,
.error = icmpv6_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
@@ -301,6 +346,15 @@
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
+ .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_ICMP_MAX,
+ .obj_size = sizeof(unsigned int),
+ .nla_policy = icmpv6_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &icmpv6_sysctl_header,
.ctl_table = icmpv6_sysctl_table,
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 98d1f0b..07d7d55 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -165,8 +165,6 @@
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, node, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
- skb_queue_purge(&iucv->send_skb_q);
- skb_queue_purge(&iucv->backlog_skb_q);
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
@@ -405,7 +403,19 @@
static void iucv_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
+
+ sk_mem_reclaim(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_err("Attempt to release alive iucv socket %p\n", sk);
+ return;
+ }
+
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(sk->sk_wmem_queued);
+ WARN_ON(sk->sk_forward_alloc);
}
/* Cleanup Listen */
@@ -453,14 +463,28 @@
}
}
+/* Send FIN through an IUCV socket for HIPER transport */
+static int iucv_send_ctrl(struct sock *sk, u8 flags)
+{
+ int err = 0;
+ int blen;
+ struct sk_buff *skb;
+
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (skb) {
+ skb_reserve(skb, blen);
+ err = afiucv_hs_send(NULL, sk, skb, flags);
+ }
+ return err;
+}
+
/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
int err = 0;
- int blen;
- struct sk_buff *skb;
lock_sock(sk);
@@ -471,14 +495,7 @@
case IUCV_CONNECTED:
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
- /* send fin */
- blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
- skb = sock_alloc_send_skb(sk, blen, 1, &err);
- if (skb) {
- skb_reserve(skb, blen);
- err = afiucv_hs_send(NULL, sk, skb,
- AF_IUCV_FLAG_FIN);
- }
+ err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
@@ -782,26 +799,6 @@
return err;
}
-static int afiucv_hs_connect(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct sk_buff *skb;
- int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
- int err = 0;
-
- /* send syn */
- skb = sock_alloc_send_skb(sk, blen, 1, &err);
- if (!skb) {
- err = -ENOMEM;
- goto done;
- }
- skb->dev = NULL;
- skb_reserve(skb, blen);
- err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
-done:
- return err;
-}
-
static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
{
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
@@ -882,7 +879,7 @@
memcpy(iucv->dst_name, sa->siucv_name, 8);
if (iucv->transport == AF_IUCV_TRANS_HIPER)
- err = afiucv_hs_connect(sock);
+ err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
else
err = afiucv_path_connect(sock, addr);
if (err)
@@ -1332,8 +1329,7 @@
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
- struct sk_buff *skb, *rskb, *cskb, *sskb;
- int blen;
+ struct sk_buff *skb, *rskb, *cskb;
int err = 0;
if ((sk->sk_state == IUCV_DISCONN) &&
@@ -1356,6 +1352,8 @@
rlen = skb->len; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
+ if (!rlen)
+ sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
@@ -1422,15 +1420,7 @@
iucv_process_message_q(sk);
if (atomic_read(&iucv->msg_recv) >=
iucv->msglimit / 2) {
- /* send WIN to peer */
- blen = sizeof(struct af_iucv_trans_hdr) +
- ETH_HLEN;
- sskb = sock_alloc_send_skb(sk, blen, 1, &err);
- if (sskb) {
- skb_reserve(sskb, blen);
- err = afiucv_hs_send(NULL, sk, sskb,
- AF_IUCV_FLAG_WIN);
- }
+ err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
if (err) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
@@ -1515,42 +1505,47 @@
lock_sock(sk);
switch (sk->sk_state) {
+ case IUCV_LISTEN:
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
-
default:
- sk->sk_shutdown |= how;
break;
}
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
- txmsg.class = 0;
- txmsg.tag = 0;
- err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
- 0, (void *) iprm_shutdown, 8);
- if (err) {
- switch (err) {
- case 1:
- err = -ENOTCONN;
- break;
- case 2:
- err = -ECONNRESET;
- break;
- default:
- err = -ENOTCONN;
- break;
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ txmsg.class = 0;
+ txmsg.tag = 0;
+ err = pr_iucv->message_send(iucv->path, &txmsg,
+ IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
+ if (err) {
+ switch (err) {
+ case 1:
+ err = -ENOTCONN;
+ break;
+ case 2:
+ err = -ECONNRESET;
+ break;
+ default:
+ err = -ENOTCONN;
+ break;
+ }
}
- }
+ } else
+ iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
}
+ sk->sk_shutdown |= how;
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
- err = pr_iucv->path_quiesce(iucv->path, NULL);
- if (err)
- err = -ENOTCONN;
-
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ err = pr_iucv->path_quiesce(iucv->path, NULL);
+ if (err)
+ err = -ENOTCONN;
+/* skb_queue_purge(&sk->sk_receive_queue); */
+ }
skb_queue_purge(&sk->sk_receive_queue);
}
@@ -2088,8 +2083,13 @@
return NET_RX_SUCCESS;
}
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
/* write stuff from iucv_msg to skb cb */
- if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
+ if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
@@ -2195,7 +2195,10 @@
kfree_skb(skb);
break;
}
- /* fall through */
+ /* fall through and receive non-zero length data */
+ case (AF_IUCV_FLAG_SHT):
+ /* shutdown request */
+ /* fall through and receive zero length data */
case 0:
/* plain data frame */
memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
@@ -2289,6 +2292,44 @@
}
}
+
+/*
+ * afiucv_netdev_event: handle netdev notifier chain events
+ */
+static int afiucv_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = (struct net_device *)ptr;
+ struct hlist_node *node;
+ struct sock *sk;
+ struct iucv_sock *iucv;
+
+ switch (event) {
+ case NETDEV_REBOOT:
+ case NETDEV_GOING_DOWN:
+ sk_for_each(sk, node, &iucv_sk_list.head) {
+ iucv = iucv_sk(sk);
+ if ((iucv->hs_dev == event_dev) &&
+ (sk->sk_state == IUCV_CONNECTED)) {
+ if (event == NETDEV_GOING_DOWN)
+ iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ }
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block afiucv_netdev_notifier = {
+ .notifier_call = afiucv_netdev_event,
+};
+
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
@@ -2388,7 +2429,8 @@
err = afiucv_iucv_init();
if (err)
goto out_sock;
- }
+ } else
+ register_netdevice_notifier(&afiucv_netdev_notifier);
dev_add_pack(&iucv_packet_type);
return 0;
@@ -2409,7 +2451,8 @@
driver_unregister(&af_iucv_driver);
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
symbol_put(iucv_if);
- }
+ } else
+ unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6a77d4c..677d659 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -336,6 +336,20 @@
rate->mcs = idx;
}
+void sta_set_rate_info_tx(struct sta_info *sta,
+ const struct ieee80211_tx_rate *rate,
+ struct rate_info *rinfo)
+{
+ rinfo->flags = 0;
+ if (rate->flags & IEEE80211_TX_RC_MCS)
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate_idx_to_bitrate(rinfo, sta, rate->idx);
+}
+
static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -378,14 +392,7 @@
sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
}
- sinfo->txrate.flags = 0;
- if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
- sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
- if (sta->last_tx_rate.flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
- sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
+ sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
sinfo->rxrate.flags = 0;
if (sta->last_rx_rate_flag & RX_FLAG_HT)
@@ -1314,6 +1321,14 @@
}
if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
+ if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
+ /* our RSSI threshold implementation is supported only for
+ * devices that report signal in dBm.
+ */
+ if (!(sdata->local->hw.flags & IEEE80211_HW_SIGNAL_DBM))
+ return -ENOTSUPP;
+ conf->rssi_threshold = nconf->rssi_threshold;
+ }
return 0;
}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 510ed1d..f6de8a6 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -443,6 +443,7 @@
IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
+IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
#endif
@@ -537,11 +538,15 @@
#ifdef CONFIG_MAC80211_MESH
+static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
+{
+ DEBUGFS_ADD_MODE(tsf, 0600);
+}
+
static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_stats",
sdata->debugfs.dir);
-
#define MESHSTATS_ADD(name)\
debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
@@ -581,6 +586,7 @@
MESHPARAMS_ADD(dot11MeshHWMPRootMode);
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
+ MESHPARAMS_ADD(rssi_threshold);
#undef MESHPARAMS_ADD
}
#endif
@@ -593,6 +599,7 @@
switch (sdata->vif.type) {
case NL80211_IFTYPE_MESH_POINT:
#ifdef CONFIG_MAC80211_MESH
+ add_mesh_files(sdata);
add_mesh_stats(sdata);
add_mesh_config(sdata);
#endif
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 7f9ac57..33fd8d9 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,7 +66,7 @@
skb_reset_tail_pointer(skb);
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
- if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
+ if (compare_ether_addr(ifibss->bssid, bssid))
sta_info_flush(sdata->local, sdata);
/* if merging, indicate to driver that we leave the old IBSS */
@@ -403,7 +403,7 @@
return;
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- memcmp(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) {
+ compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) {
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
@@ -508,7 +508,7 @@
goto put_bss;
/* same BSSID */
- if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
+ if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0)
goto put_bss;
if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
@@ -831,8 +831,8 @@
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
- if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
- memcmp(mgmt->bssid, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0)
+ if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 &&
+ !is_broadcast_ether_addr(mgmt->bssid))
return;
end = ((u8 *) mgmt) + len;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index cee0c74..796b13b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -105,6 +105,44 @@
*/
bool has_erp_value;
u8 erp_value;
+
+ /* Keep track of the corruption of the last beacon/probe response. */
+ u8 corrupt_data;
+
+ /* Keep track of what bits of information we have valid info for. */
+ u8 valid_data;
+};
+
+/**
+ * enum ieee80211_corrupt_data_flags - BSS data corruption flags
+ * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
+ * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
+ *
+ * These are bss flags that are attached to a bss in the
+ * @corrupt_data field of &struct ieee80211_bss.
+ */
+enum ieee80211_bss_corrupt_data_flags {
+ IEEE80211_BSS_CORRUPT_BEACON = BIT(0),
+ IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1)
+};
+
+/**
+ * enum ieee80211_valid_data_flags - BSS valid data flags
+ * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
+ * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
+ * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
+ * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
+ *
+ * These are bss flags that are attached to a bss in the
+ * @valid_data field of &struct ieee80211_bss. They show which parts
+ * of the data structure were recieved as a result of an un-corrupted
+ * beacon/probe response.
+ */
+enum ieee80211_bss_valid_data_flags {
+ IEEE80211_BSS_VALID_DTIM = BIT(0),
+ IEEE80211_BSS_VALID_WMM = BIT(1),
+ IEEE80211_BSS_VALID_RATES = BIT(2),
+ IEEE80211_BSS_VALID_ERP = BIT(3)
};
static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
@@ -480,7 +518,7 @@
bool control_port;
- u8 bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN] __aligned(2);
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len, ie_len;
u8 *ie;
@@ -1120,6 +1158,9 @@
u8 quiet_elem_len;
u8 num_of_quiet_elem; /* can be more the one */
u8 timeout_int_len;
+
+ /* whether a parse error occurred while retrieving these elements */
+ bool parse_error;
};
static inline struct ieee80211_local *hw_to_local(
@@ -1348,7 +1389,8 @@
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
-void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
+void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
+ bool bss_notify);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 620ca8d..401c01f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -304,7 +304,7 @@
* need to initialise the hardware if the hardware
* doesn't start up with sane defaults
*/
- ieee80211_set_wmm_default(sdata);
+ ieee80211_set_wmm_default(sdata, true);
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c707c8b..e5fbb7cf 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -204,7 +204,7 @@
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum) &&
- (memcmp(sa, p->sa, ETH_ALEN) == 0))
+ (compare_ether_addr(sa, p->sa) == 0))
return -1;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c7e5c49..8d53b71 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -85,6 +85,8 @@
* @state_lock: mesh path state lock used to protect changes to the
* mpath itself. No need to take this lock when adding or removing
* an mpath to a hash bucket on a path table.
+ * @rann_snd_addr: the RANN sender address
+ * @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
*
*
@@ -109,6 +111,8 @@
u8 discovery_retries;
enum mesh_path_flags flags;
spinlock_t state_lock;
+ u8 rann_snd_addr[ETH_ALEN];
+ bool is_root;
bool is_gate;
};
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 31bc762..1c6f3d0 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,6 +8,7 @@
*/
#include <linux/slab.h>
+#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "wme.h"
#include "mesh.h"
@@ -323,6 +324,7 @@
struct sta_info *sta)
{
struct ieee80211_supported_band *sband;
+ struct rate_info rinfo;
/* This should be adjusted for each device */
int device_constant = 1 << ARITH_SHIFT;
int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
@@ -336,7 +338,9 @@
if (sta->fail_avg >= 100)
return MAX_METRIC;
- if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
+ sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
+ rate = cfg80211_calculate_bitrate(&rinfo);
+ if (WARN_ON(!rate))
return MAX_METRIC;
err = (sta->fail_avg << ARITH_SHIFT) / 100;
@@ -344,7 +348,6 @@
/* bitrate is in units of 100 Kbps, while we need rate in units of
* 1Mbps. This will be corrected on tx_time computation.
*/
- rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
tx_time = (device_constant + 10 * test_frame_len / rate);
estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
@@ -419,7 +422,7 @@
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
- if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
+ if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
@@ -469,7 +472,7 @@
/* Update and check transmitter routing info */
ta = mgmt->sa;
- if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
+ if (compare_ether_addr(orig_addr, ta) == 0)
fresh_info = false;
else {
fresh_info = true;
@@ -513,8 +516,9 @@
u8 *preq_elem, u32 metric)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct mesh_path *mpath;
+ struct mesh_path *mpath = NULL;
u8 *target_addr, *orig_addr;
+ const u8 *da;
u8 target_flags, ttl;
u32 orig_sn, target_sn, lifetime;
bool reply = false;
@@ -529,7 +533,7 @@
mhwmp_dbg("received PREQ from %pM", orig_addr);
- if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
+ if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) {
mhwmp_dbg("PREQ is for us");
forward = false;
reply = true;
@@ -591,9 +595,11 @@
flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
+ da = (mpath && mpath->is_root) ?
+ mpath->rann_snd_addr : broadcast_addr;
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
cpu_to_le32(orig_sn), target_flags, target_addr,
- cpu_to_le32(target_sn), broadcast_addr,
+ cpu_to_le32(target_sn), da,
hopcount, ttl, cpu_to_le32(lifetime),
cpu_to_le32(metric), cpu_to_le32(preq_id),
sdata);
@@ -615,6 +621,7 @@
struct ieee80211_mgmt *mgmt,
u8 *prep_elem, u32 metric)
{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
u8 *target_addr, *orig_addr;
u8 ttl, hopcount, flags;
@@ -624,10 +631,13 @@
mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
- if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
+ if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
/* destination, no forwarding required */
return;
+ if (!ifmsh->mshcfg.dot11MeshForwarding)
+ return;
+
ttl = PREP_IE_TTL(prep_elem);
if (ttl <= 1) {
sdata->u.mesh.mshstats.dropped_frames_ttl++;
@@ -694,21 +704,26 @@
rcu_read_lock();
mpath = mesh_path_lookup(target_addr, sdata);
if (mpath) {
+ struct sta_info *sta;
+
spin_lock_bh(&mpath->state_lock);
+ sta = next_hop_deref_protected(mpath);
if (mpath->flags & MESH_PATH_ACTIVE &&
- memcmp(ta, next_hop_deref_protected(mpath)->sta.addr,
- ETH_ALEN) == 0 &&
+ compare_ether_addr(ta, sta->sta.addr) == 0 &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
SN_GT(target_sn, mpath->sn))) {
mpath->flags &= ~MESH_PATH_ACTIVE;
mpath->sn = target_sn;
spin_unlock_bh(&mpath->state_lock);
+ if (!ifmsh->mshcfg.dot11MeshForwarding)
+ goto endperr;
mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
cpu_to_le16(target_rcode),
broadcast_addr, sdata);
} else
spin_unlock_bh(&mpath->state_lock);
}
+endperr:
rcu_read_unlock();
}
@@ -739,11 +754,11 @@
metric = rann->rann_metric;
/* Ignore our own RANNs */
- if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
+ if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
return;
- mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr,
- root_is_gate);
+ mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
+ orig_addr, mgmt->sa, root_is_gate);
rcu_read_lock();
mpath = mesh_path_lookup(orig_addr, sdata);
@@ -765,7 +780,7 @@
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
- if (mpath->sn < orig_sn) {
+ if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
@@ -774,6 +789,11 @@
0, sdata);
mpath->sn = orig_sn;
}
+
+ /* Using individually addressed PREQ for root node */
+ memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
+ mpath->is_root = true;
+
if (root_is_gate)
mesh_path_add_gate(mpath);
@@ -909,6 +929,7 @@
struct mesh_preq_queue *preq_node;
struct mesh_path *mpath;
u8 ttl, target_flags;
+ const u8 *da;
u32 lifetime;
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -971,9 +992,10 @@
target_flags = MP_F_RF;
spin_unlock_bh(&mpath->state_lock);
+ da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
- cpu_to_le32(mpath->sn), broadcast_addr, 0,
+ cpu_to_le32(mpath->sn), da, 0,
ttl, cpu_to_le32(lifetime), 0,
cpu_to_le32(ifmsh->preq_id++), sdata);
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
@@ -1064,7 +1086,7 @@
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
- !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
+ !compare_ether_addr(sdata->vif.addr, hdr->addr4) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED))
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index dc51669..be1361b 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -348,7 +348,7 @@
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
+ compare_ether_addr(dst, mpath->dst) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -523,7 +523,7 @@
int err = 0;
u32 hash_idx;
- if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
+ if (compare_ether_addr(dst, sdata->vif.addr) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -559,12 +559,13 @@
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
- spin_lock_bh(&tbl->hashwlock[hash_idx]);
+ spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
- if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
+ if (mpath->sdata == sdata &&
+ compare_ether_addr(dst, mpath->dst) == 0)
goto err_exists;
}
@@ -575,7 +576,7 @@
mesh_paths_generation++;
- spin_unlock_bh(&tbl->hashwlock[hash_idx]);
+ spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -584,7 +585,7 @@
return 0;
err_exists:
- spin_unlock_bh(&tbl->hashwlock[hash_idx]);
+ spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
@@ -655,7 +656,7 @@
int err = 0;
u32 hash_idx;
- if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
+ if (compare_ether_addr(dst, sdata->vif.addr) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -687,12 +688,13 @@
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
- spin_lock_bh(&tbl->hashwlock[hash_idx]);
+ spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
- if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
+ if (mpath->sdata == sdata &&
+ compare_ether_addr(dst, mpath->dst) == 0)
goto err_exists;
}
@@ -701,7 +703,7 @@
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
- spin_unlock_bh(&tbl->hashwlock[hash_idx]);
+ spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -710,7 +712,7 @@
return 0;
err_exists:
- spin_unlock_bh(&tbl->hashwlock[hash_idx]);
+ spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
@@ -809,9 +811,9 @@
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta) {
- spin_lock_bh(&tbl->hashwlock[i]);
+ spin_lock(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
- spin_unlock_bh(&tbl->hashwlock[i]);
+ spin_unlock(&tbl->hashwlock[i]);
}
}
read_unlock_bh(&pathtbl_resize_lock);
@@ -882,11 +884,11 @@
hash_idx = mesh_table_hash(addr, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
- spin_lock_bh(&tbl->hashwlock[hash_idx]);
+ spin_lock(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
+ compare_ether_addr(addr, mpath->dst) == 0) {
__mesh_path_del(tbl, node);
goto enddel;
}
@@ -895,7 +897,7 @@
err = -ENXIO;
enddel:
mesh_paths_generation++;
- spin_unlock_bh(&tbl->hashwlock[hash_idx]);
+ spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
return err;
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 8806e5e..4e53c4c 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -31,6 +31,12 @@
#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
+/* We only need a valid sta if user configured a minimum rssi_threshold. */
+#define rssi_threshold_check(sta, sdata) \
+ (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\
+ (sta && (s8) -ewma_read(&sta->avg_signal) > \
+ sdata->u.mesh.mshcfg.rssi_threshold))
+
enum plink_event {
PLINK_UNDEFINED,
OPN_ACPT,
@@ -301,7 +307,8 @@
if (mesh_peer_accepts_plinks(elems) &&
sta->plink_state == NL80211_PLINK_LISTEN &&
sdata->u.mesh.accepting_plinks &&
- sdata->u.mesh.mshcfg.auto_open_plinks)
+ sdata->u.mesh.mshcfg.auto_open_plinks &&
+ rssi_threshold_check(sta, sdata))
mesh_plink_open(sta);
rcu_read_unlock();
@@ -531,6 +538,14 @@
return;
}
+ if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
+ !rssi_threshold_check(sta, sdata)) {
+ mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n",
+ mgmt->sa);
+ rcu_read_unlock();
+ return;
+ }
+
if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index caf97f5..c08924a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1277,7 +1277,6 @@
/* enable WMM or activate new settings */
sdata->vif.bss_conf.qos = true;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
}
static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -1455,8 +1454,6 @@
changed |= BSS_CHANGED_ASSOC;
sdata->vif.bss_conf.assoc = false;
- ieee80211_set_wmm_default(sdata);
-
/* channel(_type) changes are handled by ieee80211_hw_config */
WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
@@ -1484,10 +1481,16 @@
changed |= BSS_CHANGED_ARP_FILTER;
}
+ sdata->vif.bss_conf.qos = false;
+ changed |= BSS_CHANGED_QOS;
+
/* The BSSID (not really interesting) and HT changed */
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
+ /* disassociated - set to defaults now */
+ ieee80211_set_wmm_default(sdata, false);
+
del_timer_sync(&sdata->u.mgd.conn_mon_timer);
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
del_timer_sync(&sdata->u.mgd.timer);
@@ -1822,7 +1825,7 @@
memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
- if (memcmp(bssid, mgmt->bssid, ETH_ALEN))
+ if (compare_ether_addr(bssid, mgmt->bssid))
return RX_MGMT_NONE;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
@@ -1903,7 +1906,7 @@
return RX_MGMT_NONE;
if (!ifmgd->associated ||
- memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN))
+ compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
return RX_MGMT_NONE;
bssid = ifmgd->associated->bssid;
@@ -1936,7 +1939,7 @@
return RX_MGMT_NONE;
if (!ifmgd->associated ||
- memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN))
+ compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
return RX_MGMT_NONE;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -2155,7 +2158,8 @@
ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
elems.wmm_param_len);
else
- ieee80211_set_wmm_default(sdata);
+ ieee80211_set_wmm_default(sdata, false);
+ changed |= BSS_CHANGED_QOS;
if (elems.ht_info_elem && elems.wmm_param &&
(sdata->local->hw.queues >= 4) &&
@@ -2203,7 +2207,7 @@
if (!assoc_data)
return RX_MGMT_NONE;
- if (memcmp(assoc_data->bss->bssid, mgmt->bssid, ETH_ALEN))
+ if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid))
return RX_MGMT_NONE;
/*
@@ -2291,8 +2295,8 @@
bool need_ps = false;
if (sdata->u.mgd.associated &&
- memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
- ETH_ALEN) == 0) {
+ compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid)
+ == 0) {
bss = (void *)sdata->u.mgd.associated->priv;
/* not previously set so we may need to recalc */
need_ps = !bss->dtim_period;
@@ -2347,7 +2351,7 @@
ASSERT_MGD_MTX(ifmgd);
- if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
+ if (compare_ether_addr(mgmt->da, sdata->vif.addr))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -2360,11 +2364,12 @@
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
if (ifmgd->associated &&
- memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0)
+ compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0)
ieee80211_reset_ap_probe(sdata);
if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
- memcmp(mgmt->bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN) == 0) {
+ compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid)
+ == 0) {
/* got probe response, continue with auth */
printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
ifmgd->auth_data->tries = 0;
@@ -2421,7 +2426,8 @@
return;
if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
- memcmp(mgmt->bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN) == 0) {
+ compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid)
+ == 0) {
ieee802_11_parse_elems(mgmt->u.beacon.variable,
len - baselen, &elems);
@@ -2436,7 +2442,7 @@
}
if (!ifmgd->associated ||
- memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN))
+ compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
return;
bssid = ifmgd->associated->bssid;
@@ -3299,7 +3305,7 @@
bool match;
/* keep sta info, bssid if matching */
- match = memcmp(ifmgd->bssid, req->bss->bssid, ETH_ALEN) == 0;
+ match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0;
ieee80211_destroy_auth_data(sdata, match);
}
@@ -3421,7 +3427,7 @@
goto err_clear;
}
} else
- WARN_ON_ONCE(memcmp(ifmgd->bssid, req->bss->bssid, ETH_ALEN));
+ WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, req->bss->bssid));
if (!bss->dtim_period &&
sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
@@ -3440,6 +3446,20 @@
}
run_again(ifmgd, assoc_data->timeout);
+ if (bss->corrupt_data) {
+ char *corrupt_type = "data";
+ if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) {
+ if (bss->corrupt_data &
+ IEEE80211_BSS_CORRUPT_PROBE_RESP)
+ corrupt_type = "beacon and probe response";
+ else
+ corrupt_type = "beacon";
+ } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
+ corrupt_type = "probe response";
+ printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n",
+ sdata->name, corrupt_type);
+ }
+
err = 0;
goto out;
err_clear:
@@ -3471,7 +3491,7 @@
sdata->name, req->bssid, req->reason_code);
if (ifmgd->associated &&
- memcmp(ifmgd->associated->bssid, req->bssid, ETH_ALEN) == 0)
+ compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0)
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, true, frame_buf);
else
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7a4ff02..5f6e32c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -177,7 +177,8 @@
pos += 2;
/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
+ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
+ !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
*pos = status->signal;
rthdr->it_present |=
cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
@@ -227,7 +228,7 @@
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
struct ieee80211_sub_if_data *sdata;
- int needed_headroom = 0;
+ int needed_headroom;
struct sk_buff *skb, *skb2;
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
@@ -489,12 +490,12 @@
if (ieee80211_has_tods(hdr->frame_control) ||
!ieee80211_has_fromds(hdr->frame_control))
return RX_DROP_MONITOR;
- if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
+ if (compare_ether_addr(hdr->addr3, dev_addr) == 0)
return RX_DROP_MONITOR;
} else {
if (!ieee80211_has_a4(hdr->frame_control))
return RX_DROP_MONITOR;
- if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
+ if (compare_ether_addr(hdr->addr4, dev_addr) == 0)
return RX_DROP_MONITOR;
}
}
@@ -1309,8 +1310,10 @@
sta->rx_fragments++;
sta->rx_bytes += rx->skb->len;
- sta->last_signal = status->signal;
- ewma_add(&sta->avg_signal, -status->signal);
+ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
+ sta->last_signal = status->signal;
+ ewma_add(&sta->avg_signal, -status->signal);
+ }
/*
* Change STA power saving mode only at the end of a frame
@@ -1957,6 +1960,9 @@
return RX_DROP_MONITOR;
}
+ if (!ifmsh->mshcfg.dot11MeshForwarding)
+ goto out;
+
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb) {
if (net_ratelimit())
@@ -2182,9 +2188,14 @@
if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
ieee80211_is_beacon(mgmt->frame_control) &&
!(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
+ int sig = 0;
+
+ if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ sig = status->signal;
+
cfg80211_report_obss_beacon(rx->local->hw.wiphy,
rx->skb->data, rx->skb->len,
- status->freq, GFP_ATOMIC);
+ status->freq, sig, GFP_ATOMIC);
rx->flags |= IEEE80211_RX_BEACON_REPORTED;
}
@@ -2336,7 +2347,7 @@
if (sdata->vif.type != NL80211_IFTYPE_STATION)
break;
- if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
+ if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid))
break;
goto queue;
@@ -2408,6 +2419,7 @@
ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ int sig = 0;
/* skip known-bad action frames and return them in the next handler */
if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
@@ -2420,7 +2432,10 @@
* it transmitted were processed or returned.
*/
- if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
+ if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ sig = status->signal;
+
+ if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig,
rx->skb->data, rx->skb->len,
GFP_ATOMIC)) {
if (rx->sta)
@@ -2538,16 +2553,10 @@
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_local *local = rx->local;
- struct ieee80211_rtap_hdr {
- struct ieee80211_radiotap_header hdr;
- u8 flags;
- u8 rate_or_pad;
- __le16 chan_freq;
- __le16 chan_flags;
- } __packed *rthdr;
struct sk_buff *skb = rx->skb, *skb2;
struct net_device *prev_dev = NULL;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ int needed_headroom;
/*
* If cooked monitor has been processed already, then
@@ -2561,30 +2570,15 @@
if (!local->cooked_mntrs)
goto out_free_skb;
- if (skb_headroom(skb) < sizeof(*rthdr) &&
- pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
+ /* room for the radiotap header based on driver features */
+ needed_headroom = ieee80211_rx_radiotap_len(local, status);
+
+ if (skb_headroom(skb) < needed_headroom &&
+ pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
goto out_free_skb;
- rthdr = (void *)skb_push(skb, sizeof(*rthdr));
- memset(rthdr, 0, sizeof(*rthdr));
- rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
- rthdr->hdr.it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
- (1 << IEEE80211_RADIOTAP_CHANNEL));
-
- if (rate) {
- rthdr->rate_or_pad = rate->bitrate / 5;
- rthdr->hdr.it_present |=
- cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- }
- rthdr->chan_freq = cpu_to_le16(status->freq);
-
- if (status->band == IEEE80211_BAND_5GHZ)
- rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
- IEEE80211_CHAN_5GHZ);
- else
- rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
- IEEE80211_CHAN_2GHZ);
+ /* prepend radiotap information */
+ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
skb_set_mac_header(skb, 0);
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 9270771..33cd169 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -13,6 +13,7 @@
*/
#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/pm_qos.h>
#include <net/sch_generic.h>
@@ -103,16 +104,35 @@
cbss->free_priv = ieee80211_rx_bss_free;
bss = (void *)cbss->priv;
- /* save the ERP value so that it is available at association time */
- if (elems->erp_info && elems->erp_info_len >= 1) {
- bss->erp_value = elems->erp_info[0];
- bss->has_erp_value = true;
+ if (elems->parse_error) {
+ if (beacon)
+ bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
+ else
+ bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP;
+ } else {
+ if (beacon)
+ bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON;
+ else
+ bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP;
}
- if (elems->tim) {
+ /* save the ERP value so that it is available at association time */
+ if (elems->erp_info && elems->erp_info_len >= 1 &&
+ (!elems->parse_error ||
+ !(bss->valid_data & IEEE80211_BSS_VALID_ERP))) {
+ bss->erp_value = elems->erp_info[0];
+ bss->has_erp_value = true;
+ if (!elems->parse_error)
+ bss->valid_data |= IEEE80211_BSS_VALID_ERP;
+ }
+
+ if (elems->tim && (!elems->parse_error ||
+ !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
struct ieee80211_tim_ie *tim_ie =
(struct ieee80211_tim_ie *)elems->tim;
bss->dtim_period = tim_ie->dtim_period;
+ if (!elems->parse_error)
+ bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
}
/* If the beacon had no TIM IE, or it was invalid, use 1 */
@@ -120,26 +140,38 @@
bss->dtim_period = 1;
/* replace old supported rates if we get new values */
- srlen = 0;
- if (elems->supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES;
- if (clen > elems->supp_rates_len)
- clen = elems->supp_rates_len;
- memcpy(bss->supp_rates, elems->supp_rates, clen);
- srlen += clen;
+ if (!elems->parse_error ||
+ !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
+ srlen = 0;
+ if (elems->supp_rates) {
+ clen = IEEE80211_MAX_SUPP_RATES;
+ if (clen > elems->supp_rates_len)
+ clen = elems->supp_rates_len;
+ memcpy(bss->supp_rates, elems->supp_rates, clen);
+ srlen += clen;
+ }
+ if (elems->ext_supp_rates) {
+ clen = IEEE80211_MAX_SUPP_RATES - srlen;
+ if (clen > elems->ext_supp_rates_len)
+ clen = elems->ext_supp_rates_len;
+ memcpy(bss->supp_rates + srlen, elems->ext_supp_rates,
+ clen);
+ srlen += clen;
+ }
+ if (srlen) {
+ bss->supp_rates_len = srlen;
+ if (!elems->parse_error)
+ bss->valid_data |= IEEE80211_BSS_VALID_RATES;
+ }
}
- if (elems->ext_supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - srlen;
- if (clen > elems->ext_supp_rates_len)
- clen = elems->ext_supp_rates_len;
- memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen);
- srlen += clen;
- }
- if (srlen)
- bss->supp_rates_len = srlen;
- bss->wmm_used = elems->wmm_param || elems->wmm_info;
- bss->uapsd_supported = is_uapsd_supported(elems);
+ if (!elems->parse_error ||
+ !(bss->valid_data & IEEE80211_BSS_VALID_WMM)) {
+ bss->wmm_used = elems->wmm_param || elems->wmm_info;
+ bss->uapsd_supported = is_uapsd_supported(elems);
+ if (!elems->parse_error)
+ bss->valid_data |= IEEE80211_BSS_VALID_WMM;
+ }
if (!beacon)
bss->last_probe_resp = jiffies;
@@ -176,7 +208,7 @@
presp = ieee80211_is_probe_resp(fc);
if (presp) {
/* ignore ProbeResp to foreign address */
- if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
+ if (compare_ether_addr(mgmt->da, sdata->vif.addr))
return RX_DROP_MONITOR;
presp = true;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index cd0f265..38137cb 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -101,7 +102,7 @@
lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata &&
- memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ compare_ether_addr(sta->sta.addr, addr) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
lockdep_is_held(&local->sta_mtx));
@@ -124,7 +125,7 @@
while (sta) {
if ((sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
- memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ compare_ether_addr(sta->sta.addr, addr) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
lockdep_is_held(&local->sta_mtx));
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 23a97c9..ab05768 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -14,6 +14,7 @@
#include <linux/if_ether.h>
#include <linux/workqueue.h>
#include <linux/average.h>
+#include <linux/etherdevice.h>
#include "key.h"
/**
@@ -489,7 +490,7 @@
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
/* compare address and run code only if it matches */ \
- if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
+ if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0)
/*
* Get STA info by index, BROKEN!
@@ -528,6 +529,9 @@
void sta_info_stop(struct ieee80211_local *local);
int sta_info_flush(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
+void sta_set_rate_info_tx(struct sta_info *sta,
+ const struct ieee80211_tx_rate *rate,
+ struct rate_info *rinfo);
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
unsigned long exp_time);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index c928e4a..5f8f89e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -10,6 +10,7 @@
*/
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
@@ -377,7 +378,7 @@
for_each_sta_info(local, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
- if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
+ if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr))
continue;
if (info->flags & IEEE80211_TX_STATUS_EOSP)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index f6e4cef..32f7a3b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -572,24 +572,40 @@
size_t left = len;
u8 *pos = start;
bool calc_crc = filter != 0;
+ DECLARE_BITMAP(seen_elems, 256);
+ bitmap_zero(seen_elems, 256);
memset(elems, 0, sizeof(*elems));
elems->ie_start = start;
elems->total_len = len;
while (left >= 2) {
u8 id, elen;
+ bool elem_parse_failed;
id = *pos++;
elen = *pos++;
left -= 2;
- if (elen > left)
+ if (elen > left) {
+ elems->parse_error = true;
break;
+ }
+
+ if (id != WLAN_EID_VENDOR_SPECIFIC &&
+ id != WLAN_EID_QUIET &&
+ test_bit(id, seen_elems)) {
+ elems->parse_error = true;
+ left -= elen;
+ pos += elen;
+ continue;
+ }
if (calc_crc && id < 64 && (filter & (1ULL << id)))
crc = crc32_be(crc, pos - 2, elen + 2);
+ elem_parse_failed = false;
+
switch (id) {
case WLAN_EID_SSID:
elems->ssid = pos;
@@ -615,7 +631,8 @@
if (elen >= sizeof(struct ieee80211_tim_ie)) {
elems->tim = (void *)pos;
elems->tim_len = elen;
- }
+ } else
+ elem_parse_failed = true;
break;
case WLAN_EID_IBSS_PARAMS:
elems->ibss_params = pos;
@@ -664,10 +681,14 @@
case WLAN_EID_HT_CAPABILITY:
if (elen >= sizeof(struct ieee80211_ht_cap))
elems->ht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed = true;
break;
case WLAN_EID_HT_INFORMATION:
if (elen >= sizeof(struct ieee80211_ht_info))
elems->ht_info_elem = (void *)pos;
+ else
+ elem_parse_failed = true;
break;
case WLAN_EID_MESH_ID:
elems->mesh_id = pos;
@@ -676,6 +697,8 @@
case WLAN_EID_MESH_CONFIG:
if (elen >= sizeof(struct ieee80211_meshconf_ie))
elems->mesh_config = (void *)pos;
+ else
+ elem_parse_failed = true;
break;
case WLAN_EID_PEER_MGMT:
elems->peering = pos;
@@ -696,6 +719,8 @@
case WLAN_EID_RANN:
if (elen >= sizeof(struct ieee80211_rann_ie))
elems->rann = (void *)pos;
+ else
+ elem_parse_failed = true;
break;
case WLAN_EID_CHANNEL_SWITCH:
elems->ch_switch_elem = pos;
@@ -724,10 +749,18 @@
break;
}
+ if (elem_parse_failed)
+ elems->parse_error = true;
+ else
+ set_bit(id, seen_elems);
+
left -= elen;
pos += elen;
}
+ if (left != 0)
+ elems->parse_error = true;
+
return crc;
}
@@ -737,7 +770,8 @@
ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
}
-void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
+void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
+ bool bss_notify)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_queue_params qparam;
@@ -807,7 +841,9 @@
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
sdata->vif.bss_conf.qos =
sdata->vif.type != NL80211_IFTYPE_STATION;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
+ if (bss_notify)
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_QOS);
}
}
@@ -829,7 +865,7 @@
else
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
- ieee80211_set_wmm_default(sdata);
+ ieee80211_set_wmm_default(sdata, true);
}
u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f8ac4ef..0c6f67e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -103,6 +103,16 @@
If unsure, say `N'.
+config NF_CONNTRACK_TIMEOUT
+ bool 'Connection tracking timeout'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timeout
+ extension. This allows you to attach timeout policies to flow
+ via the CT target.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
@@ -314,6 +324,17 @@
help
This option enables support for a netlink-based userspace interface
+config NF_CT_NETLINK_TIMEOUT
+ tristate 'Connection tracking timeout tuning via Netlink'
+ select NETFILTER_NETLINK
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timeout
+ fine-grain tuning. This allows you to attach specific timeout
+ policies to flows, instead of using the global timeout policy.
+
+ If unsure, say `N'.
+
endif # NF_CONNTRACK
# transparent proxy support
@@ -524,6 +545,15 @@
For more information on the LEDs available on your system, see
Documentation/leds/leds-class.txt
+config NETFILTER_XT_TARGET_LOG
+ tristate "LOG target support"
+ default m if NETFILTER_ADVANCED=n
+ help
+ This option adds a `LOG' target, which allows you to create rules in
+ any iptables table which records the packet header to the syslog.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_MARK
tristate '"MARK" target support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 40f4c3d..ca36765 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
@@ -22,6 +23,7 @@
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
+obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
# connection tracking helpers
nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
@@ -58,6 +60,7 @@
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index e3e7399..a72a4df 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -442,7 +442,7 @@
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
- set->family = AF_INET;
+ set->family = NFPROTO_IPV4;
return true;
}
@@ -550,7 +550,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
- .family = AF_INET,
+ .family = NFPROTO_IPV4,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_ip_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 56096f5..81324c1 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -543,7 +543,7 @@
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
- set->family = AF_INET;
+ set->family = NFPROTO_IPV4;
return true;
}
@@ -623,7 +623,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
- .family = AF_INET,
+ .family = NFPROTO_IPV4,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_ipmac_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 29ba93b..382ec28 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -422,7 +422,7 @@
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
- set->family = AF_UNSPEC;
+ set->family = NFPROTO_UNSPEC;
return true;
}
@@ -483,7 +483,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_PORT,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_port_create,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e7f90e7..e6c1c96 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -69,7 +69,7 @@
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name) &&
- (type->family == family || type->family == AF_UNSPEC) &&
+ (type->family == family || type->family == NFPROTO_UNSPEC) &&
revision >= type->revision_min &&
revision <= type->revision_max)
return type;
@@ -149,7 +149,7 @@
rcu_read_lock();
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name) &&
- (type->family == family || type->family == AF_UNSPEC)) {
+ (type->family == family || type->family == NFPROTO_UNSPEC)) {
found = true;
if (type->revision_min < *min)
*min = type->revision_min;
@@ -164,8 +164,8 @@
__find_set_type_minmax(name, family, min, max, true);
}
-#define family_name(f) ((f) == AF_INET ? "inet" : \
- (f) == AF_INET6 ? "inet6" : "any")
+#define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
+ (f) == NFPROTO_IPV6 ? "inet6" : "any")
/* Register a set type structure. The type is identified by
* the unique triple of name, family and revision.
@@ -354,7 +354,7 @@
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
- !(opt->family == set->family || set->family == AF_UNSPEC))
+ !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
read_lock_bh(&set->lock);
@@ -387,7 +387,7 @@
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
- !(opt->family == set->family || set->family == AF_UNSPEC))
+ !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
write_lock_bh(&set->lock);
@@ -410,7 +410,7 @@
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
- !(opt->family == set->family || set->family == AF_UNSPEC))
+ !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
write_lock_bh(&set->lock);
@@ -575,7 +575,7 @@
return NULL;
nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_INET;
+ nfmsg->nfgen_family = NFPROTO_IPV4;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 1f03556..6fdf88a 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -136,10 +136,10 @@
u8 proto;
switch (pf) {
- case AF_INET:
+ case NFPROTO_IPV4:
ret = ip_set_get_ip4_port(skb, src, port, &proto);
break;
- case AF_INET6:
+ case NFPROTO_IPV6:
ret = ip_set_get_ip6_port(skb, src, port, &proto);
break;
default:
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 4015fca..5139dea 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -366,11 +366,11 @@
u8 netmask, hbits;
struct ip_set_hash *h;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
- netmask = set->family == AF_INET ? 32 : 128;
+ netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
pr_debug("Create set %s with family %s\n",
- set->name, set->family == AF_INET ? "inet" : "inet6");
+ set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
@@ -389,8 +389,8 @@
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
- if ((set->family == AF_INET && netmask > 32) ||
- (set->family == AF_INET6 && netmask > 128) ||
+ if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
+ (set->family == NFPROTO_IPV6 && netmask > 128) ||
netmask == 0)
return -IPSET_ERR_INVALID_NETMASK;
}
@@ -419,15 +419,15 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_tvariant : &hash_ip6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ip4_gc_init(set);
else
hash_ip6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_variant : &hash_ip6_variant;
}
@@ -443,7 +443,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = hash_ip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 37d667e..9c27e24 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -450,7 +450,7 @@
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -490,15 +490,15 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ipport4_gc_init(set);
else
hash_ipport6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_variant : &hash_ipport6_variant;
}
@@ -514,7 +514,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* SCTP and UDPLITE support added */
.create = hash_ipport_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index e69e271..9134057 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -468,7 +468,7 @@
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -508,15 +508,15 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ipportip4_gc_init(set);
else
hash_ipportip6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_variant : &hash_ipportip6_variant;
}
@@ -532,7 +532,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* SCTP and UDPLITE support added */
.create = hash_ipportip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 64199b4..5d05e69 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -41,12 +41,19 @@
/* The type variant functions: IPv4 */
+/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
+ * However this way we have to store internally cidr - 1,
+ * dancing back and forth.
+ */
+#define IP_SET_HASH_WITH_NETS_PACKED
+
/* Member elements without timeout */
struct hash_ipportnet4_elem {
__be32 ip;
__be32 ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
};
@@ -55,7 +62,8 @@
__be32 ip;
__be32 ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
@@ -86,10 +94,22 @@
}
static inline void
+hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
{
elem->ip2 &= ip_set_netmask(cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static inline void
@@ -102,11 +122,15 @@
hash_ipportnet4_data_list(struct sk_buff *skb,
const struct hash_ipportnet4_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -119,14 +143,17 @@
{
const struct hash_ipportnet4_telem *tdata =
(const struct hash_ipportnet4_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
@@ -158,13 +185,11 @@
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
@@ -172,7 +197,7 @@
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
- data.ip2 &= ip_set_netmask(data.cidr);
+ data.ip2 &= ip_set_netmask(data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -183,17 +208,19 @@
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+ struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -208,9 +235,10 @@
return ret;
if (tb[IPSET_ATTR_CIDR2]) {
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!data.cidr)
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
}
if (tb[IPSET_ATTR_PORT])
@@ -236,12 +264,18 @@
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
tb[IPSET_ATTR_IP2_TO])) {
data.ip = htonl(ip);
- data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr));
+ data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1));
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -275,7 +309,7 @@
if (ip2_from + UINT_MAX == ip2_to)
return -IPSET_ERR_HASH_RANGE;
} else {
- ip_set_mask_from_to(ip2_from, ip2_to, data.cidr);
+ ip_set_mask_from_to(ip2_from, ip2_to, data.cidr + 1);
}
if (retried)
@@ -290,7 +324,8 @@
while (!after(ip2, ip2_to)) {
data.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
- &data.cidr);
+ &cidr);
+ data.cidr = cidr - 1;
ret = adtfn(set, &data, timeout, flags);
if (ret && !ip_set_eexist(ret, flags))
@@ -321,7 +356,8 @@
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
};
@@ -329,7 +365,8 @@
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
@@ -360,6 +397,18 @@
}
static inline void
+hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
{
elem->proto = 0;
@@ -378,18 +427,22 @@
hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip2, cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static bool
hash_ipportnet6_data_list(struct sk_buff *skb,
const struct hash_ipportnet6_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -402,14 +455,17 @@
{
const struct hash_ipportnet6_telem *e =
(const struct hash_ipportnet6_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -438,13 +494,11 @@
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
@@ -452,7 +506,7 @@
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
- ip6_netmask(&data.ip2, data.cidr);
+ ip6_netmask(&data.ip2, data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -463,16 +517,18 @@
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+ struct hash_ipportnet6_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -490,13 +546,14 @@
if (ret)
return ret;
- if (tb[IPSET_ATTR_CIDR2])
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (tb[IPSET_ATTR_CIDR2]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
+ }
- if (!data.cidr)
- return -IPSET_ERR_INVALID_CIDR;
-
- ip6_netmask(&data.ip2, data.cidr);
+ ip6_netmask(&data.ip2, data.cidr + 1);
if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -521,6 +578,12 @@
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -554,7 +617,7 @@
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -573,7 +636,7 @@
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -596,16 +659,16 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_tvariant
: &hash_ipportnet6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ipportnet4_gc_init(set);
else
hash_ipportnet6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
}
@@ -621,10 +684,11 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
/* 1 SCTP and UDPLITE support added */
- .revision_max = 2, /* Range as input support for IPv4 added */
+ /* 2 Range as input support for IPv4 added */
+ .revision_max = 3, /* nomatch flag support added */
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -643,6 +707,7 @@
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
},
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 2898819..7c3d945 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -43,7 +43,7 @@
struct hash_net4_elem {
__be32 ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
};
@@ -51,7 +51,7 @@
struct hash_net4_telem {
__be32 ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
unsigned long timeout;
};
@@ -61,7 +61,8 @@
const struct hash_net4_elem *ip2,
u32 *multi)
{
- return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+ return ip1->ip == ip2->ip &&
+ ip1->cidr == ip2->cidr;
}
static inline bool
@@ -76,6 +77,19 @@
{
dst->ip = src->ip;
dst->cidr = src->cidr;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_net4_data_match(const struct hash_net4_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
@@ -95,8 +109,12 @@
static bool
hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -108,11 +126,14 @@
{
const struct hash_net4_telem *tdata =
(const struct hash_net4_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
@@ -167,7 +188,8 @@
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -179,7 +201,7 @@
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -189,6 +211,12 @@
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr));
ret = adtfn(set, &data, timeout, flags);
@@ -236,14 +264,14 @@
struct hash_net6_elem {
union nf_inet_addr ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
};
struct hash_net6_telem {
union nf_inet_addr ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
unsigned long timeout;
};
@@ -269,6 +297,19 @@
{
dst->ip.in6 = src->ip.in6;
dst->cidr = src->cidr;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_net6_data_match(const struct hash_net6_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
@@ -296,8 +337,12 @@
static bool
hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -309,11 +354,14 @@
{
const struct hash_net6_telem *e =
(const struct hash_net6_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -366,7 +414,8 @@
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -381,7 +430,7 @@
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
@@ -392,6 +441,12 @@
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -406,7 +461,7 @@
struct ip_set_hash *h;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -425,7 +480,7 @@
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -448,15 +503,15 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_net4_tvariant : &hash_net6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_net4_gc_init(set);
else
hash_net6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_net4_variant : &hash_net6_variant;
}
@@ -472,9 +527,10 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
- .revision_max = 1, /* Range as input support for IPv4 added */
+ /* = 1 Range as input support for IPv4 added */
+ .revision_max = 2, /* nomatch flag support added */
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -488,6 +544,7 @@
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index e13095d..f24037f 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -163,7 +163,8 @@
__be32 ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
};
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
@@ -173,7 +174,8 @@
__be32 ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
};
@@ -182,7 +184,8 @@
__be32 ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
unsigned long timeout;
};
@@ -207,11 +210,25 @@
static inline void
hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
- const struct hash_netiface4_elem *src) {
+ const struct hash_netiface4_elem *src)
+{
dst->ip = src->ip;
dst->cidr = src->cidr;
dst->physdev = src->physdev;
dst->iface = src->iface;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
@@ -233,11 +250,13 @@
{
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -252,11 +271,13 @@
(const struct hash_netiface4_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
@@ -361,7 +382,7 @@
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -387,6 +408,8 @@
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1;
+ if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
+ flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
@@ -440,7 +463,8 @@
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
};
#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
@@ -449,7 +473,8 @@
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
};
@@ -457,7 +482,8 @@
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
unsigned long timeout;
};
@@ -488,8 +514,21 @@
}
static inline void
+hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
{
+ elem->cidr = 0;
}
static inline void
@@ -514,11 +553,13 @@
{
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -533,11 +574,13 @@
(const struct hash_netiface6_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
return 0;
@@ -636,7 +679,7 @@
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
@@ -662,6 +705,8 @@
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1;
+ if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
+ flags |= (cadt_flags << 16);
}
ret = adtfn(set, &data, timeout, flags);
@@ -678,7 +723,7 @@
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -697,7 +742,7 @@
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -722,15 +767,15 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_netiface4_gc_init(set);
else
hash_netiface6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_variant : &hash_netiface6_variant;
}
@@ -746,8 +791,9 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
.dimension = IPSET_DIM_TWO,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
+ .revision_max = 1, /* nomatch flag support added */
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8f9de72..ce2e771 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -40,12 +40,19 @@
/* The type variant functions: IPv4 */
+/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
+ * However this way we have to store internally cidr - 1,
+ * dancing back and forth.
+ */
+#define IP_SET_HASH_WITH_NETS_PACKED
+
/* Member elements without timeout */
struct hash_netport4_elem {
__be32 ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
};
/* Member elements with timeout support */
@@ -53,7 +60,8 @@
__be32 ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
unsigned long timeout;
};
@@ -82,13 +90,26 @@
dst->port = src->port;
dst->proto = src->proto;
dst->cidr = src->cidr;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_netport4_data_match(const struct hash_netport4_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static inline void
@@ -101,10 +122,14 @@
hash_netport4_data_list(struct sk_buff *skb,
const struct hash_netport4_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -117,13 +142,16 @@
{
const struct hash_netport4_telem *tdata =
(const struct hash_netport4_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
@@ -154,20 +182,18 @@
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
- data.ip &= ip_set_netmask(data.cidr);
+ data.ip &= ip_set_netmask(data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -178,16 +204,18 @@
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netport4_elem data = { .cidr = HOST_MASK };
+ struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to, p = 0, ip = 0, ip_to, last;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -198,9 +226,10 @@
return ret;
if (tb[IPSET_ATTR_CIDR]) {
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
}
if (tb[IPSET_ATTR_PORT])
@@ -227,8 +256,15 @@
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
- data.ip = htonl(ip & ip_set_hostmask(data.cidr));
+ data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1));
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -248,14 +284,15 @@
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
} else {
- ip_set_mask_from_to(ip, ip_to, data.cidr);
+ ip_set_mask_from_to(ip, ip_to, data.cidr + 1);
}
if (retried)
ip = h->next.ip;
while (!after(ip, ip_to)) {
data.ip = htonl(ip);
- last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
+ last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+ data.cidr = cidr - 1;
p = retried && ip == h->next.ip ? h->next.port : port;
for (; p <= port_to; p++) {
data.port = htons(p);
@@ -288,14 +325,16 @@
union nf_inet_addr ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
};
struct hash_netport6_telem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
unsigned long timeout;
};
@@ -324,6 +363,18 @@
}
static inline void
+hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_netport6_data_match(const struct hash_netport6_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
{
elem->proto = 0;
@@ -342,17 +393,21 @@
hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static bool
hash_netport6_data_list(struct sk_buff *skb,
const struct hash_netport6_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -365,13 +420,16 @@
{
const struct hash_netport6_telem *e =
(const struct hash_netport6_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -400,20 +458,18 @@
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
- ip6_netmask(&data.ip, data.cidr);
+ ip6_netmask(&data.ip, data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -424,16 +480,18 @@
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netport6_elem data = { .cidr = HOST_MASK };
+ struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -445,11 +503,13 @@
if (ret)
return ret;
- if (tb[IPSET_ATTR_CIDR])
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
- return -IPSET_ERR_INVALID_CIDR;
- ip6_netmask(&data.ip, data.cidr);
+ if (tb[IPSET_ATTR_CIDR]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
+ }
+ ip6_netmask(&data.ip, data.cidr + 1);
if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -474,6 +534,12 @@
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -507,7 +573,7 @@
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -526,7 +592,7 @@
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -549,15 +615,15 @@
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_tvariant : &hash_netport6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_netport4_gc_init(set);
else
hash_netport6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_variant : &hash_netport6_variant;
}
@@ -573,10 +639,11 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
/* 1 SCTP and UDPLITE support added */
- .revision_max = 2, /* Range as input support for IPv4 added */
+ /* 2, Range as input support for IPv4 added */
+ .revision_max = 3, /* nomatch flag support added */
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -595,6 +662,7 @@
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4d10819..7e095f9 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -575,7 +575,7 @@
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = list_set_create,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ed86a3b..7b48035 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -44,6 +44,7 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -635,8 +636,12 @@
if (del_timer(&ct->timeout)) {
death_by_timeout((unsigned long)ct);
- dropped = 1;
- NF_CT_STAT_INC_ATOMIC(net, early_drop);
+ /* Check if we indeed killed this entry. Reliable event
+ delivery may have inserted it into the dying list. */
+ if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ dropped = 1;
+ NF_CT_STAT_INC_ATOMIC(net, early_drop);
+ }
}
nf_ct_put(ct);
return dropped;
@@ -763,7 +768,8 @@
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
- unsigned int dataoff, u32 hash)
+ unsigned int dataoff, u32 hash,
+ unsigned int *timeouts)
{
struct nf_conn *ct;
struct nf_conn_help *help;
@@ -782,7 +788,7 @@
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
- if (!l4proto->new(ct, skb, dataoff)) {
+ if (!l4proto->new(ct, skb, dataoff, timeouts)) {
nf_conntrack_free(ct);
pr_debug("init conntrack: can't track with proto module\n");
return NULL;
@@ -848,7 +854,8 @@
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
int *set_reply,
- enum ip_conntrack_info *ctinfo)
+ enum ip_conntrack_info *ctinfo,
+ unsigned int *timeouts)
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
@@ -868,7 +875,7 @@
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
- skb, dataoff, hash);
+ skb, dataoff, hash, timeouts);
if (!h)
return NULL;
if (IS_ERR(h))
@@ -909,6 +916,8 @@
enum ip_conntrack_info ctinfo;
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
+ struct nf_conn_timeout *timeout_ext;
+ unsigned int *timeouts;
unsigned int dataoff;
u_int8_t protonum;
int set_reply = 0;
@@ -955,8 +964,19 @@
goto out;
}
+ /* Decide what timeout policy we want to apply to this flow. */
+ if (tmpl) {
+ timeout_ext = nf_ct_timeout_find(tmpl);
+ if (timeout_ext)
+ timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
+ else
+ timeouts = l4proto->get_timeouts(net);
+ } else
+ timeouts = l4proto->get_timeouts(net);
+
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
- l3proto, l4proto, &set_reply, &ctinfo);
+ l3proto, l4proto, &set_reply, &ctinfo,
+ timeouts);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(net, invalid);
@@ -973,7 +993,7 @@
NF_CT_ASSERT(skb->nfct);
- ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
+ ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
@@ -1327,6 +1347,7 @@
}
nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
+ nf_conntrack_timeout_fini(net);
nf_conntrack_ecache_fini(net);
nf_conntrack_tstamp_fini(net);
nf_conntrack_acct_fini(net);
@@ -1558,9 +1579,14 @@
ret = nf_conntrack_ecache_init(net);
if (ret < 0)
goto err_ecache;
+ ret = nf_conntrack_timeout_init(net);
+ if (ret < 0)
+ goto err_timeout;
return 0;
+err_timeout:
+ nf_conntrack_timeout_fini(net);
err_ecache:
nf_conntrack_tstamp_fini(net);
err_tstamp:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 14af632..5bd3047d 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -32,9 +32,11 @@
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- unsigned long events;
+ unsigned long events, missed;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
+ struct nf_ct_event item;
+ int ret;
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
@@ -47,31 +49,32 @@
events = xchg(&e->cache, 0);
- if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
- struct nf_ct_event item = {
- .ct = ct,
- .pid = 0,
- .report = 0
- };
- int ret;
- /* We make a copy of the missed event cache without taking
- * the lock, thus we may send missed events twice. However,
- * this does not harm and it happens very rarely. */
- unsigned long missed = e->missed;
+ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
+ goto out_unlock;
- if (!((events | missed) & e->ctmask))
- goto out_unlock;
+ /* We make a copy of the missed event cache without taking
+ * the lock, thus we may send missed events twice. However,
+ * this does not harm and it happens very rarely. */
+ missed = e->missed;
- ret = notify->fcn(events | missed, &item);
- if (unlikely(ret < 0 || missed)) {
- spin_lock_bh(&ct->lock);
- if (ret < 0)
- e->missed |= events;
- else
- e->missed &= ~missed;
- spin_unlock_bh(&ct->lock);
- }
- }
+ if (!((events | missed) & e->ctmask))
+ goto out_unlock;
+
+ item.ct = ct;
+ item.pid = 0;
+ item.report = 0;
+
+ ret = notify->fcn(events | missed, &item);
+
+ if (likely(ret >= 0 && !missed))
+ goto out_unlock;
+
+ spin_lock_bh(&ct->lock);
+ if (ret < 0)
+ e->missed |= events;
+ else
+ e->missed &= ~missed;
+ spin_unlock_bh(&ct->lock);
out_unlock:
rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index bbe23ba..436b7cb 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -181,6 +181,60 @@
}
}
+static LIST_HEAD(nf_ct_helper_expectfn_list);
+
+void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
+{
+ spin_lock_bh(&nf_conntrack_lock);
+ list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
+ spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
+
+void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
+{
+ spin_lock_bh(&nf_conntrack_lock);
+ list_del_rcu(&n->head);
+ spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
+
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_name(const char *name)
+{
+ struct nf_ct_helper_expectfn *cur;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
+ if (!strcmp(cur->name, name)) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? cur : NULL;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
+
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
+{
+ struct nf_ct_helper_expectfn *cur;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
+ if (cur->expectfn == symbol) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? cur : NULL;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
+
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
unsigned int h = helper_hash(&me->tuple);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 04fb409..2124977 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -110,15 +110,16 @@
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
+ rcu_read_lock();
l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
- if (unlikely(ret < 0))
- return ret;
-
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
- ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
-
+ if (ret >= 0) {
+ l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
+ tuple->dst.protonum);
+ ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
+ }
+ rcu_read_unlock();
return ret;
}
@@ -712,9 +713,11 @@
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
+ int res;
#ifdef CONFIG_NF_CONNTRACK_MARK
const struct ctnetlink_dump_filter *filter = cb->data;
#endif
+
spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
@@ -740,11 +743,14 @@
continue;
}
#endif
- if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- NFNL_MSG_TYPE(
- cb->nlh->nlmsg_type),
- ct) < 0) {
+ rcu_read_lock();
+ res =
+ ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ ct);
+ rcu_read_unlock();
+ if (res < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
goto out;
@@ -1078,16 +1084,13 @@
if (!parse_nat_setup) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
- spin_unlock_bh(&nf_conntrack_lock);
nfnl_unlock();
if (request_module("nf-nat-ipv4") < 0) {
nfnl_lock();
- spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
return -EOPNOTSUPP;
}
nfnl_lock();
- spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
if (nfnetlink_parse_nat_setup_hook)
return -EAGAIN;
@@ -1649,14 +1652,16 @@
if (!nest_parms)
goto nla_put_failure;
+ rcu_read_lock();
l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
-
- if (unlikely(ret < 0))
- goto nla_put_failure;
-
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
+ if (ret >= 0) {
+ l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
+ tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
+ }
+ rcu_read_unlock();
+
if (unlikely(ret < 0))
goto nla_put_failure;
@@ -1675,6 +1680,11 @@
struct nf_conn *master = exp->master;
long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
struct nf_conn_help *help;
+#ifdef CONFIG_NF_NAT_NEEDED
+ struct nlattr *nest_parms;
+ struct nf_conntrack_tuple nat_tuple = {};
+#endif
+ struct nf_ct_helper_expectfn *expfn;
if (timeout < 0)
timeout = 0;
@@ -1688,9 +1698,29 @@
CTA_EXPECT_MASTER) < 0)
goto nla_put_failure;
+#ifdef CONFIG_NF_NAT_NEEDED
+ if (exp->saved_ip || exp->saved_proto.all) {
+ nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
+
+ nat_tuple.src.l3num = nf_ct_l3num(master);
+ nat_tuple.src.u3.ip = exp->saved_ip;
+ nat_tuple.dst.protonum = nf_ct_protonum(master);
+ nat_tuple.src.u = exp->saved_proto;
+
+ if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
+ CTA_EXPECT_NAT_TUPLE) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest_parms);
+ }
+#endif
NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
+ NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
help = nfct_help(master);
if (help) {
struct nf_conntrack_helper *helper;
@@ -1699,6 +1729,9 @@
if (helper)
NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
}
+ expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
+ if (expfn != NULL)
+ NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
return 0;
@@ -1856,6 +1889,9 @@
[CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
[CTA_EXPECT_ZONE] = { .type = NLA_U16 },
[CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
+ [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
+ [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
};
static int
@@ -2031,6 +2067,41 @@
return -EOPNOTSUPP;
}
+static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+ [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+};
+
+static int
+ctnetlink_parse_expect_nat(const struct nlattr *attr,
+ struct nf_conntrack_expect *exp,
+ u_int8_t u3)
+{
+#ifdef CONFIG_NF_NAT_NEEDED
+ struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
+ struct nf_conntrack_tuple nat_tuple = {};
+ int err;
+
+ nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
+
+ if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
+ return -EINVAL;
+
+ err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
+ &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+ if (err < 0)
+ return err;
+
+ exp->saved_ip = nat_tuple.src.u3.ip;
+ exp->saved_proto = nat_tuple.src.u;
+ exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int
ctnetlink_create_expect(struct net *net, u16 zone,
const struct nlattr * const cda[],
@@ -2042,6 +2113,8 @@
struct nf_conntrack_expect *exp;
struct nf_conn *ct;
struct nf_conn_help *help;
+ struct nf_conntrack_helper *helper = NULL;
+ u_int32_t class = 0;
int err = 0;
/* caller guarantees that those three CTA_EXPECT_* exist */
@@ -2060,6 +2133,40 @@
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* Look for helper of this expectation */
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper == NULL) {
+#ifdef CONFIG_MODULES
+ if (request_module("nfct-helper-%s", helpname) < 0) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ helper = __nf_conntrack_helper_find(helpname,
+ nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper) {
+ err = -EAGAIN;
+ goto out;
+ }
+#endif
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (cda[CTA_EXPECT_CLASS] && helper) {
+ class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
+ if (class > helper->expect_class_max) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
exp = nf_ct_expect_alloc(ct);
if (!exp) {
err = -ENOMEM;
@@ -2086,18 +2193,35 @@
} else
exp->flags = 0;
}
+ if (cda[CTA_EXPECT_FN]) {
+ const char *name = nla_data(cda[CTA_EXPECT_FN]);
+ struct nf_ct_helper_expectfn *expfn;
- exp->class = 0;
- exp->expectfn = NULL;
+ expfn = nf_ct_helper_expectfn_find_by_name(name);
+ if (expfn == NULL) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ exp->expectfn = expfn->expectfn;
+ } else
+ exp->expectfn = NULL;
+
+ exp->class = class;
exp->master = ct;
- exp->helper = NULL;
+ exp->helper = helper;
memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
exp->mask.src.u.all = mask.src.u.all;
+ if (cda[CTA_EXPECT_NAT]) {
+ err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
+ exp, u3);
+ if (err < 0)
+ goto err_out;
+ }
err = nf_ct_expect_related_report(exp, pid, report);
+err_out:
nf_ct_expect_put(exp);
-
out:
nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
return err;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index d6dde6d..24fdce2 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -423,7 +423,7 @@
}
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
@@ -472,12 +472,17 @@
ntohl(dhack->dccph_ack_nr_low);
}
+static unsigned int *dccp_get_timeouts(struct net *net)
+{
+ return dccp_pernet(net)->dccp_timeout;
+}
+
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
- u_int8_t pf, unsigned int hooknum)
+ u_int8_t pf, unsigned int hooknum,
+ unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
- struct dccp_net *dn;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
@@ -559,8 +564,7 @@
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
- dn = dccp_pernet(net);
- nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
@@ -702,8 +706,60 @@
return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
+ nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
}
+
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ struct dccp_net *dn = dccp_pernet(&init_net);
+ unsigned int *timeouts = data;
+ int i;
+
+ /* set default DCCP timeouts. */
+ for (i=0; i<CT_DCCP_MAX; i++)
+ timeouts[i] = dn->dccp_timeout[i];
+
+ /* there's a 1:1 mapping between attributes and protocol states. */
+ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
+ if (tb[i]) {
+ timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
+ }
+ }
+ return 0;
+}
+
+static int
+dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+ int i;
+
+ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
+ NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
+ [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
static struct ctl_table dccp_sysctl_table[] = {
@@ -767,6 +823,7 @@
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
+ .get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
@@ -779,6 +836,15 @@
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
+ .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
+ .nla_policy = dccp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -789,6 +855,7 @@
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
+ .get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
@@ -801,6 +868,15 @@
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
+ .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
+ .nla_policy = dccp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static __net_init int dccp_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e2091d0..835e24c 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -40,25 +40,70 @@
return 0;
}
-/* Returns verdict for packet, or -1 for invalid. */
-static int packet(struct nf_conn *ct,
- const struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- u_int8_t pf,
- unsigned int hooknum)
+static unsigned int *generic_get_timeouts(struct net *net)
{
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout);
+ return &nf_ct_generic_timeout;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int generic_packet(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ u_int8_t pf,
+ unsigned int hooknum,
+ unsigned int *timeout)
+{
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
-static bool new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
+ unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeout = data;
+
+ if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
+ *timeout =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
+ else {
+ /* Set default generic timeout. */
+ *timeout = nf_ct_generic_timeout;
+ }
+
+ return 0;
+}
+
+static int
+generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeout = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
+ [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *generic_sysctl_header;
static struct ctl_table generic_sysctl_table[] = {
@@ -93,8 +138,18 @@
.pkt_to_tuple = generic_pkt_to_tuple,
.invert_tuple = generic_invert_tuple,
.print_tuple = generic_print_tuple,
- .packet = packet,
- .new = new,
+ .packet = generic_packet,
+ .get_timeouts = generic_get_timeouts,
+ .new = generic_new,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = generic_timeout_nlattr_to_obj,
+ .obj_to_nlattr = generic_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_GENERIC_MAX,
+ .obj_size = sizeof(unsigned int),
+ .nla_policy = generic_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &generic_sysctl_header,
.ctl_table = generic_sysctl_table,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index f033879..659648c 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -41,8 +41,16 @@
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
-#define GRE_TIMEOUT (30 * HZ)
-#define GRE_STREAM_TIMEOUT (180 * HZ)
+enum grep_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+static unsigned int gre_timeouts[GRE_CT_MAX] = {
+ [GRE_CT_UNREPLIED] = 30*HZ,
+ [GRE_CT_REPLIED] = 180*HZ,
+};
static int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
@@ -227,13 +235,19 @@
(ct->proto.gre.stream_timeout / HZ));
}
+static unsigned int *gre_get_timeouts(struct net *net)
+{
+ return gre_timeouts;
+}
+
/* Returns verdict for packet, and may modify conntrack */
static int gre_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
@@ -252,15 +266,15 @@
/* Called when a new connection for this protocol found. */
static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
pr_debug(": ");
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
/* initialize to sane value. Ideally a conntrack helper
* (e.g. in case of pptp) is increasing them */
- ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
- ct->proto.gre.timeout = GRE_TIMEOUT;
+ ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
+ ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
return true;
}
@@ -278,6 +292,52 @@
nf_ct_gre_keymap_destroy(master);
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+
+ /* set default timeouts for GRE. */
+ timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
+ timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
+
+ if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
+ timeouts[GRE_CT_UNREPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
+ }
+ if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
+ timeouts[GRE_CT_REPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
+ }
+ return 0;
+}
+
+static int
+gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
+ htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
+ htonl(timeouts[GRE_CT_REPLIED] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
+ [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
/* protocol helper struct */
static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.l3proto = AF_INET,
@@ -287,6 +347,7 @@
.invert_tuple = gre_invert_tuple,
.print_tuple = gre_print_tuple,
.print_conntrack = gre_print_conntrack,
+ .get_timeouts = gre_get_timeouts,
.packet = gre_packet,
.new = gre_new,
.destroy = gre_destroy,
@@ -297,6 +358,15 @@
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = gre_timeout_nlattr_to_obj,
+ .obj_to_nlattr = gre_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_GRE_MAX,
+ .obj_size = sizeof(unsigned int) * GRE_CT_MAX,
+ .nla_policy = gre_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static int proto_gre_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index afa6913..72b5088 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -279,13 +279,19 @@
return sctp_conntracks[dir][i][cur_state];
}
+static unsigned int *sctp_get_timeouts(struct net *net)
+{
+ return sctp_timeouts;
+}
+
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
static int sctp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -370,7 +376,7 @@
}
spin_unlock_bh(&ct->lock);
- nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
dir == IP_CT_DIR_REPLY &&
@@ -390,7 +396,7 @@
/* Called when a new connection for this protocol found. */
static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
enum sctp_conntrack new_state;
const struct sctphdr *sh;
@@ -543,6 +549,57 @@
}
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+ int i;
+
+ /* set default SCTP timeouts. */
+ for (i=0; i<SCTP_CONNTRACK_MAX; i++)
+ timeouts[i] = sctp_timeouts[i];
+
+ /* there's a 1:1 mapping between attributes and protocol states. */
+ for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
+ if (tb[i]) {
+ timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
+ }
+ }
+ return 0;
+}
+
+static int
+sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+ int i;
+
+ for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
+ NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
+ [CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
+
#ifdef CONFIG_SYSCTL
static unsigned int sctp_sysctl_table_users;
static struct ctl_table_header *sctp_sysctl_header;
@@ -664,6 +721,7 @@
.print_tuple = sctp_print_tuple,
.print_conntrack = sctp_print_conntrack,
.packet = sctp_packet,
+ .get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -675,6 +733,15 @@
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
+ .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
+ .nla_policy = sctp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &sctp_sysctl_table_users,
.ctl_table_header = &sctp_sysctl_header,
@@ -694,6 +761,7 @@
.print_tuple = sctp_print_tuple,
.print_conntrack = sctp_print_conntrack,
.packet = sctp_packet,
+ .get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -704,6 +772,15 @@
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
+ .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
+ .nla_policy = sctp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
#ifdef CONFIG_SYSCTL
.ctl_table_users = &sctp_sysctl_table_users,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97b9f3e..361eade 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -64,13 +64,7 @@
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-/* RFC1122 says the R2 limit should be at least 100 seconds.
- Linux uses 15 packets as limit, which corresponds
- to ~13-30min depending on RTO. */
-static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
-static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS;
-
-static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
+static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
@@ -80,6 +74,11 @@
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
+/* RFC1122 says the R2 limit should be at least 100 seconds.
+ Linux uses 15 packets as limit, which corresponds
+ to ~13-30min depending on RTO. */
+ [TCP_CONNTRACK_RETRANS] = 5 MINS,
+ [TCP_CONNTRACK_UNACK] = 5 MINS,
};
#define sNO TCP_CONNTRACK_NONE
@@ -814,13 +813,19 @@
return NF_ACCEPT;
}
+static unsigned int *tcp_get_timeouts(struct net *net)
+{
+ return tcp_timeouts;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple *tuple;
@@ -1015,14 +1020,14 @@
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
- tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans)
- timeout = nf_ct_tcp_timeout_max_retrans;
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
+ timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
- tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged)
- timeout = nf_ct_tcp_timeout_unacknowledged;
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
+ timeout = timeouts[TCP_CONNTRACK_UNACK];
else
- timeout = tcp_timeouts[new_state];
+ timeout = timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
@@ -1054,7 +1059,7 @@
/* Called when a new connection for this protocol found. */
static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
enum tcp_conntrack new_state;
const struct tcphdr *th;
@@ -1239,6 +1244,113 @@
}
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+ int i;
+
+ /* set default TCP timeouts. */
+ for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
+ timeouts[i] = tcp_timeouts[i];
+
+ if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
+ timeouts[TCP_CONNTRACK_SYN_SENT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
+ timeouts[TCP_CONNTRACK_SYN_RECV] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
+ timeouts[TCP_CONNTRACK_ESTABLISHED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
+ timeouts[TCP_CONNTRACK_FIN_WAIT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
+ timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
+ timeouts[TCP_CONNTRACK_LAST_ACK] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
+ timeouts[TCP_CONNTRACK_TIME_WAIT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
+ timeouts[TCP_CONNTRACK_CLOSE] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
+ timeouts[TCP_CONNTRACK_SYN_SENT2] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
+ timeouts[TCP_CONNTRACK_RETRANS] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_UNACK]) {
+ timeouts[TCP_CONNTRACK_UNACK] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
+ }
+ return 0;
+}
+
+static int
+tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
+ htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
+ htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
+ htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
+ htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
+ htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
+ htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
+ htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
+ htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
+ [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static unsigned int tcp_sysctl_table_users;
static struct ctl_table_header *tcp_sysctl_header;
@@ -1301,14 +1413,14 @@
},
{
.procname = "nf_conntrack_tcp_timeout_max_retrans",
- .data = &nf_ct_tcp_timeout_max_retrans,
+ .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
- .data = &nf_ct_tcp_timeout_unacknowledged,
+ .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -1404,7 +1516,7 @@
},
{
.procname = "ip_conntrack_tcp_timeout_max_retrans",
- .data = &nf_ct_tcp_timeout_max_retrans,
+ .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -1445,6 +1557,7 @@
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
+ .get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1456,6 +1569,16 @@
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_TCP_MAX,
+ .obj_size = sizeof(unsigned int) *
+ TCP_CONNTRACK_TIMEOUT_MAX,
+ .nla_policy = tcp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
@@ -1477,6 +1600,7 @@
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
+ .get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1488,6 +1612,16 @@
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_TCP_MAX,
+ .obj_size = sizeof(unsigned int) *
+ TCP_CONNTRACK_TIMEOUT_MAX,
+ .nla_policy = tcp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5f35757..a9073dc 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,8 +25,16 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
-static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
+enum udp_conntrack {
+ UDP_CT_UNREPLIED,
+ UDP_CT_REPLIED,
+ UDP_CT_MAX
+};
+
+static unsigned int udp_timeouts[UDP_CT_MAX] = {
+ [UDP_CT_UNREPLIED] = 30*HZ,
+ [UDP_CT_REPLIED] = 180*HZ,
+};
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
@@ -63,30 +71,38 @@
ntohs(tuple->dst.u.udp.port));
}
+static unsigned int *udp_get_timeouts(struct net *net)
+{
+ return udp_timeouts;
+}
+
/* Returns verdict for packet, and may modify conntracktype */
static int udp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDP_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
- } else
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
-
+ } else {
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDP_CT_UNREPLIED]);
+ }
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
@@ -136,20 +152,66 @@
return NF_ACCEPT;
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+
+ /* set default timeouts for UDP. */
+ timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
+ timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
+
+ if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
+ timeouts[UDP_CT_UNREPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
+ }
+ if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
+ timeouts[UDP_CT_REPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
+ }
+ return 0;
+}
+
+static int
+udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
+ htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
+ htonl(timeouts[UDP_CT_REPLIED] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
+ [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static unsigned int udp_sysctl_table_users;
static struct ctl_table_header *udp_sysctl_header;
static struct ctl_table udp_sysctl_table[] = {
{
.procname = "nf_conntrack_udp_timeout",
- .data = &nf_ct_udp_timeout,
+ .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udp_timeout_stream",
- .data = &nf_ct_udp_timeout_stream,
+ .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -160,14 +222,14 @@
static struct ctl_table udp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_udp_timeout",
- .data = &nf_ct_udp_timeout,
+ .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_udp_timeout_stream",
- .data = &nf_ct_udp_timeout_stream,
+ .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -186,6 +248,7 @@
.invert_tuple = udp_invert_tuple,
.print_tuple = udp_print_tuple,
.packet = udp_packet,
+ .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -194,6 +257,15 @@
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDP_MAX,
+ .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
+ .nla_policy = udp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udp_sysctl_table_users,
.ctl_table_header = &udp_sysctl_header,
@@ -214,6 +286,7 @@
.invert_tuple = udp_invert_tuple,
.print_tuple = udp_print_tuple,
.packet = udp_packet,
+ .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -222,6 +295,15 @@
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDP_MAX,
+ .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
+ .nla_policy = udp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udp_sysctl_table_users,
.ctl_table_header = &udp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index f52ca11..e060639 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -24,8 +24,16 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_log.h>
-static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ;
-static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ;
+enum udplite_conntrack {
+ UDPLITE_CT_UNREPLIED,
+ UDPLITE_CT_REPLIED,
+ UDPLITE_CT_MAX
+};
+
+static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
+ [UDPLITE_CT_UNREPLIED] = 30*HZ,
+ [UDPLITE_CT_REPLIED] = 180*HZ,
+};
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
@@ -60,31 +68,38 @@
ntohs(tuple->dst.u.udp.port));
}
+static unsigned int *udplite_get_timeouts(struct net *net)
+{
+ return udplite_timeouts;
+}
+
/* Returns verdict for packet, and may modify conntracktype */
static int udplite_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
- nf_ct_udplite_timeout_stream);
+ timeouts[UDPLITE_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
- } else
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
-
+ } else {
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDPLITE_CT_UNREPLIED]);
+ }
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
@@ -141,20 +156,66 @@
return NF_ACCEPT;
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+
+ /* set default timeouts for UDPlite. */
+ timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
+ timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
+
+ if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
+ timeouts[UDPLITE_CT_UNREPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
+ }
+ if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
+ timeouts[UDPLITE_CT_REPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
+ }
+ return 0;
+}
+
+static int
+udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
+ htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
+ htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
+ [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static unsigned int udplite_sysctl_table_users;
static struct ctl_table_header *udplite_sysctl_header;
static struct ctl_table udplite_sysctl_table[] = {
{
.procname = "nf_conntrack_udplite_timeout",
- .data = &nf_ct_udplite_timeout,
+ .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udplite_timeout_stream",
- .data = &nf_ct_udplite_timeout_stream,
+ .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -172,6 +233,7 @@
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
+ .get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -180,6 +242,16 @@
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
+ .obj_size = sizeof(unsigned int) *
+ CTA_TIMEOUT_UDPLITE_MAX,
+ .nla_policy = udplite_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udplite_sysctl_table_users,
.ctl_table_header = &udplite_sysctl_header,
@@ -196,6 +268,7 @@
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
+ .get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -204,6 +277,16 @@
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
+ .obj_size = sizeof(unsigned int) *
+ CTA_TIMEOUT_UDPLITE_MAX,
+ .nla_policy = udplite_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udplite_sysctl_table_users,
.ctl_table_header = &udplite_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
new file mode 100644
index 0000000..a878ce5b
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -0,0 +1,60 @@
+/*
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+
+struct ctnl_timeout *
+(*nf_ct_timeout_find_get_hook)(const char *name) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook);
+
+void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook);
+
+static struct nf_ct_ext_type timeout_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_timeout),
+ .align = __alignof__(struct nf_conn_timeout),
+ .id = NF_CT_EXT_TIMEOUT,
+};
+
+int nf_conntrack_timeout_init(struct net *net)
+{
+ int ret = 0;
+
+ if (net_eq(net, &init_net)) {
+ ret = nf_ct_extend_register(&timeout_extend);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_ct_timeout: Unable to register "
+ "timeout extension.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void nf_conntrack_timeout_fini(struct net *net)
+{
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&timeout_extend);
+}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
new file mode 100644
index 0000000..fec29a4
--- /dev/null
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -0,0 +1,429 @@
+/*
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/rculist.h>
+#include <linux/rculist_nulls.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/security.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/netfilter.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tuning");
+
+static LIST_HEAD(cttimeout_list);
+
+static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
+ [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
+ [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
+ [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
+ [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
+};
+
+static int
+ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
+ struct nf_conntrack_l4proto *l4proto,
+ const struct nlattr *attr)
+{
+ int ret = 0;
+
+ if (likely(l4proto->ctnl_timeout.nlattr_to_obj)) {
+ struct nlattr *tb[l4proto->ctnl_timeout.nlattr_max+1];
+
+ nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
+ attr, l4proto->ctnl_timeout.nla_policy);
+
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
+ }
+ return ret;
+}
+
+static int
+cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ __u16 l3num;
+ __u8 l4num;
+ struct nf_conntrack_l4proto *l4proto;
+ struct ctnl_timeout *timeout, *matching = NULL;
+ char *name;
+ int ret;
+
+ if (!cda[CTA_TIMEOUT_NAME] ||
+ !cda[CTA_TIMEOUT_L3PROTO] ||
+ !cda[CTA_TIMEOUT_L4PROTO] ||
+ !cda[CTA_TIMEOUT_DATA])
+ return -EINVAL;
+
+ name = nla_data(cda[CTA_TIMEOUT_NAME]);
+ l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+ l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+
+ list_for_each_entry(timeout, &cttimeout_list, head) {
+ if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ matching = timeout;
+ break;
+ }
+
+ l4proto = __nf_ct_l4proto_find(l3num, l4num);
+
+ /* This protocol is not supportted, skip. */
+ if (l4proto->l4proto != l4num)
+ return -EOPNOTSUPP;
+
+ if (matching) {
+ if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+ /* You cannot replace one timeout policy by another of
+ * different kind, sorry.
+ */
+ if (matching->l3num != l3num ||
+ matching->l4num != l4num)
+ return -EINVAL;
+
+ ret = ctnl_timeout_parse_policy(matching, l4proto,
+ cda[CTA_TIMEOUT_DATA]);
+ return ret;
+ }
+ return -EBUSY;
+ }
+
+ timeout = kzalloc(sizeof(struct ctnl_timeout) +
+ l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
+ if (timeout == NULL)
+ return -ENOMEM;
+
+ ret = ctnl_timeout_parse_policy(timeout, l4proto,
+ cda[CTA_TIMEOUT_DATA]);
+ if (ret < 0)
+ goto err;
+
+ strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
+ timeout->l3num = l3num;
+ timeout->l4num = l4num;
+ atomic_set(&timeout->refcnt, 1);
+ list_add_tail_rcu(&timeout->head, &cttimeout_list);
+
+ return 0;
+err:
+ kfree(timeout);
+ return ret;
+}
+
+static int
+ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ int event, struct ctnl_timeout *timeout)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0;
+ struct nf_conntrack_l4proto *l4proto;
+
+ event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
+ NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
+ NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4num);
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
+ htonl(atomic_read(&timeout->refcnt)));
+
+ l4proto = __nf_ct_l4proto_find(timeout->l3num, timeout->l4num);
+
+ /* If the timeout object does not match the layer 4 protocol tracker,
+ * then skip dumping the data part since we don't know how to
+ * interpret it. This may happen for UPDlite, SCTP and DCCP since
+ * you can unload the module.
+ */
+ if (timeout->l4num != l4proto->l4proto)
+ goto out;
+
+ if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
+ struct nlattr *nest_parms;
+ int ret;
+
+ nest_parms = nla_nest_start(skb,
+ CTA_TIMEOUT_DATA | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
+ if (ret < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+ }
+out:
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ctnl_timeout *cur, *last;
+
+ if (cb->args[2])
+ return 0;
+
+ last = (struct ctnl_timeout *)cb->args[1];
+ if (cb->args[1])
+ cb->args[1] = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &cttimeout_list, head) {
+ if (last && cur != last)
+ continue;
+
+ if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
+ cb->args[1] = (unsigned long)cur;
+ break;
+ }
+ }
+ if (!cb->args[1])
+ cb->args[2] = 1;
+ rcu_read_unlock();
+ return skb->len;
+}
+
+static int
+cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ int ret = -ENOENT;
+ char *name;
+ struct ctnl_timeout *cur;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnl_timeout_dump,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
+
+ if (!cda[CTA_TIMEOUT_NAME])
+ return -EINVAL;
+ name = nla_data(cda[CTA_TIMEOUT_NAME]);
+
+ list_for_each_entry(cur, &cttimeout_list, head) {
+ struct sk_buff *skb2;
+
+ if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ IPCTNL_MSG_TIMEOUT_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+ }
+ return ret;
+}
+
+/* try to delete object, fail if it is still in use. */
+static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
+{
+ int ret = 0;
+
+ /* we want to avoid races with nf_ct_timeout_find_get. */
+ if (atomic_dec_and_test(&timeout->refcnt)) {
+ /* We are protected by nfnl mutex. */
+ list_del_rcu(&timeout->head);
+ kfree_rcu(timeout, rcu_head);
+ } else {
+ /* still in use, restore reference counter. */
+ atomic_inc(&timeout->refcnt);
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int
+cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ char *name;
+ struct ctnl_timeout *cur;
+ int ret = -ENOENT;
+
+ if (!cda[CTA_TIMEOUT_NAME]) {
+ list_for_each_entry(cur, &cttimeout_list, head)
+ ctnl_timeout_try_del(cur);
+
+ return 0;
+ }
+ name = nla_data(cda[CTA_TIMEOUT_NAME]);
+
+ list_for_each_entry(cur, &cttimeout_list, head) {
+ if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ ret = ctnl_timeout_try_del(cur);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
+{
+ struct ctnl_timeout *timeout, *matching = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(timeout, &cttimeout_list, head) {
+ if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err;
+
+ if (!atomic_inc_not_zero(&timeout->refcnt)) {
+ module_put(THIS_MODULE);
+ goto err;
+ }
+ matching = timeout;
+ break;
+ }
+err:
+ rcu_read_unlock();
+ return matching;
+}
+
+static void ctnl_timeout_put(struct ctnl_timeout *timeout)
+{
+ atomic_dec(&timeout->refcnt);
+ module_put(THIS_MODULE);
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+
+static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
+ [IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+ [IPCTNL_MSG_TIMEOUT_GET] = { .call = cttimeout_get_timeout,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+ [IPCTNL_MSG_TIMEOUT_DELETE] = { .call = cttimeout_del_timeout,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+};
+
+static const struct nfnetlink_subsystem cttimeout_subsys = {
+ .name = "conntrack_timeout",
+ .subsys_id = NFNL_SUBSYS_CTNETLINK_TIMEOUT,
+ .cb_count = IPCTNL_MSG_TIMEOUT_MAX,
+ .cb = cttimeout_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_TIMEOUT);
+
+static int __init cttimeout_init(void)
+{
+ int ret;
+
+ ret = nfnetlink_subsys_register(&cttimeout_subsys);
+ if (ret < 0) {
+ pr_err("cttimeout_init: cannot register cttimeout with "
+ "nfnetlink.\n");
+ goto err_out;
+ }
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get);
+ RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put);
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+ return 0;
+
+err_out:
+ return ret;
+}
+
+static void __exit cttimeout_exit(void)
+{
+ struct ctnl_timeout *cur, *tmp;
+
+ pr_info("cttimeout: unregistering from nfnetlink.\n");
+
+ nfnetlink_subsys_unregister(&cttimeout_subsys);
+ list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
+ list_del_rcu(&cur->head);
+ /* We are sure that our objects have no clients at this point,
+ * it's safe to release them all without checking refcnt.
+ */
+ kfree_rcu(cur, rcu_head);
+ }
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
+ RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+}
+
+module_init(cttimeout_init);
+module_exit(cttimeout_exit);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0221d10..b873445 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -16,10 +16,11 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
-static unsigned int xt_ct_target(struct sk_buff *skb,
- const struct xt_action_param *par)
+static unsigned int xt_ct_target_v0(struct sk_buff *skb,
+ const struct xt_action_param *par)
{
const struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
@@ -35,6 +36,23 @@
return XT_CONTINUE;
}
+static unsigned int xt_ct_target_v1(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct xt_ct_target_info_v1 *info = par->targinfo;
+ struct nf_conn *ct = info->ct;
+
+ /* Previously seen (loopback)? Ignore. */
+ if (skb->nfct != NULL)
+ return XT_CONTINUE;
+
+ atomic_inc(&ct->ct_general.use);
+ skb->nfct = &ct->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+
+ return XT_CONTINUE;
+}
+
static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
{
if (par->family == NFPROTO_IPV4) {
@@ -53,7 +71,7 @@
return 0;
}
-static int xt_ct_tg_check(const struct xt_tgchk_param *par)
+static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conntrack_tuple t;
@@ -130,7 +148,137 @@
return ret;
}
-static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
+static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
+{
+ struct xt_ct_target_info_v1 *info = par->targinfo;
+ struct nf_conntrack_tuple t;
+ struct nf_conn_help *help;
+ struct nf_conn *ct;
+ int ret = 0;
+ u8 proto;
+
+ if (info->flags & ~XT_CT_NOTRACK)
+ return -EINVAL;
+
+ if (info->flags & XT_CT_NOTRACK) {
+ ct = nf_ct_untracked_get();
+ atomic_inc(&ct->ct_general.use);
+ goto out;
+ }
+
+#ifndef CONFIG_NF_CONNTRACK_ZONES
+ if (info->zone)
+ goto err1;
+#endif
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ goto err1;
+
+ memset(&t, 0, sizeof(t));
+ ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+ ret = PTR_ERR(ct);
+ if (IS_ERR(ct))
+ goto err2;
+
+ ret = 0;
+ if ((info->ct_events || info->exp_events) &&
+ !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
+ GFP_KERNEL))
+ goto err3;
+
+ if (info->helper[0]) {
+ ret = -ENOENT;
+ proto = xt_ct_find_proto(par);
+ if (!proto) {
+ pr_info("You must specify a L4 protocol, "
+ "and not use inversions on it.\n");
+ goto err3;
+ }
+
+ ret = -ENOMEM;
+ help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ ret = -ENOENT;
+ help->helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (help->helper == NULL) {
+ pr_info("No such helper \"%s\"\n", info->helper);
+ goto err3;
+ }
+ }
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ if (info->timeout) {
+ typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
+ struct ctnl_timeout *timeout;
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_find_get =
+ rcu_dereference(nf_ct_timeout_find_get_hook);
+
+ if (timeout_find_get) {
+ const struct ipt_entry *e = par->entryinfo;
+
+ if (e->ip.invflags & IPT_INV_PROTO) {
+ ret = -EINVAL;
+ pr_info("You cannot use inversion on "
+ "L4 protocol\n");
+ goto err3;
+ }
+ timeout = timeout_find_get(info->timeout);
+ if (timeout == NULL) {
+ ret = -ENOENT;
+ pr_info("No such timeout policy \"%s\"\n",
+ info->timeout);
+ goto err3;
+ }
+ if (timeout->l3num != par->family) {
+ ret = -EINVAL;
+ pr_info("Timeout policy `%s' can only be "
+ "used by L3 protocol number %d\n",
+ info->timeout, timeout->l3num);
+ goto err3;
+ }
+ if (timeout->l4num != e->ip.proto) {
+ ret = -EINVAL;
+ pr_info("Timeout policy `%s' can only be "
+ "used by L4 protocol number %d\n",
+ info->timeout, timeout->l4num);
+ goto err3;
+ }
+ timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
+ GFP_KERNEL);
+ if (timeout_ext == NULL) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ } else {
+ ret = -ENOENT;
+ pr_info("Timeout policy base is empty\n");
+ goto err3;
+ }
+ }
+#endif
+
+ __set_bit(IPS_TEMPLATE_BIT, &ct->status);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+out:
+ info->ct = ct;
+ return 0;
+
+err3:
+ nf_conntrack_free(ct);
+err2:
+ nf_ct_l3proto_module_put(par->family);
+err1:
+ return ret;
+}
+
+static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
@@ -146,25 +294,67 @@
nf_ct_put(info->ct);
}
-static struct xt_target xt_ct_tg __read_mostly = {
- .name = "CT",
- .family = NFPROTO_UNSPEC,
- .targetsize = sizeof(struct xt_ct_target_info),
- .checkentry = xt_ct_tg_check,
- .destroy = xt_ct_tg_destroy,
- .target = xt_ct_target,
- .table = "raw",
- .me = THIS_MODULE,
+static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+{
+ struct xt_ct_target_info_v1 *info = par->targinfo;
+ struct nf_conn *ct = info->ct;
+ struct nf_conn_help *help;
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+ typeof(nf_ct_timeout_put_hook) timeout_put;
+#endif
+ if (!nf_ct_is_untracked(ct)) {
+ help = nfct_help(ct);
+ if (help)
+ module_put(help->helper->me);
+
+ nf_ct_l3proto_module_put(par->family);
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
+
+ if (timeout_put) {
+ timeout_ext = nf_ct_timeout_find(ct);
+ if (timeout_ext)
+ timeout_put(timeout_ext->timeout);
+ }
+#endif
+ }
+ nf_ct_put(info->ct);
+}
+
+static struct xt_target xt_ct_tg_reg[] __read_mostly = {
+ {
+ .name = "CT",
+ .family = NFPROTO_UNSPEC,
+ .targetsize = sizeof(struct xt_ct_target_info),
+ .checkentry = xt_ct_tg_check_v0,
+ .destroy = xt_ct_tg_destroy_v0,
+ .target = xt_ct_target_v0,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CT",
+ .family = NFPROTO_UNSPEC,
+ .revision = 1,
+ .targetsize = sizeof(struct xt_ct_target_info_v1),
+ .checkentry = xt_ct_tg_check_v1,
+ .destroy = xt_ct_tg_destroy_v1,
+ .target = xt_ct_target_v1,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
};
static int __init xt_ct_tg_init(void)
{
- return xt_register_target(&xt_ct_tg);
+ return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
static void __exit xt_ct_tg_exit(void)
{
- xt_unregister_target(&xt_ct_tg);
+ xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
module_init(xt_ct_tg_init);
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
new file mode 100644
index 0000000..f99f8de
--- /dev/null
+++ b/net/netfilter/xt_LOG.c
@@ -0,0 +1,925 @@
+/*
+ * This is a module which is used for logging packets.
+ */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <net/netfilter/xt_log.h>
+
+static struct nf_loginfo default_loginfo = {
+ .type = NF_LOG_TYPE_LOG,
+ .u = {
+ .log = {
+ .level = 5,
+ .logflags = NF_LOG_MASK,
+ },
+ },
+};
+
+static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
+ u8 proto, int fragment, unsigned int offset)
+{
+ struct udphdr _udph;
+ const struct udphdr *uh;
+
+ if (proto == IPPROTO_UDP)
+ /* Max length: 10 "PROTO=UDP " */
+ sb_add(m, "PROTO=UDP ");
+ else /* Max length: 14 "PROTO=UDPLITE " */
+ sb_add(m, "PROTO=UDPLITE ");
+
+ if (fragment)
+ goto out;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+
+ return 1;
+ }
+
+ /* Max length: 20 "SPT=65535 DPT=65535 " */
+ sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
+ ntohs(uh->len));
+
+out:
+ return 0;
+}
+
+static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
+ u8 proto, int fragment, unsigned int offset,
+ unsigned int logflags)
+{
+ struct tcphdr _tcph;
+ const struct tcphdr *th;
+
+ /* Max length: 10 "PROTO=TCP " */
+ sb_add(m, "PROTO=TCP ");
+
+ if (fragment)
+ return 0;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+ return 1;
+ }
+
+ /* Max length: 20 "SPT=65535 DPT=65535 " */
+ sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
+ /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
+ if (logflags & XT_LOG_TCPSEQ)
+ sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
+
+ /* Max length: 13 "WINDOW=65535 " */
+ sb_add(m, "WINDOW=%u ", ntohs(th->window));
+ /* Max length: 9 "RES=0x3C " */
+ sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
+ TCP_RESERVED_BITS) >> 22));
+ /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
+ if (th->cwr)
+ sb_add(m, "CWR ");
+ if (th->ece)
+ sb_add(m, "ECE ");
+ if (th->urg)
+ sb_add(m, "URG ");
+ if (th->ack)
+ sb_add(m, "ACK ");
+ if (th->psh)
+ sb_add(m, "PSH ");
+ if (th->rst)
+ sb_add(m, "RST ");
+ if (th->syn)
+ sb_add(m, "SYN ");
+ if (th->fin)
+ sb_add(m, "FIN ");
+ /* Max length: 11 "URGP=65535 " */
+ sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
+
+ if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
+ u_int8_t _opt[60 - sizeof(struct tcphdr)];
+ const u_int8_t *op;
+ unsigned int i;
+ unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
+
+ op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
+ optsize, _opt);
+ if (op == NULL) {
+ sb_add(m, "OPT (TRUNCATED)");
+ return 1;
+ }
+
+ /* Max length: 127 "OPT (" 15*4*2chars ") " */
+ sb_add(m, "OPT (");
+ for (i = 0; i < optsize; i++)
+ sb_add(m, "%02X", op[i]);
+
+ sb_add(m, ") ");
+ }
+
+ return 0;
+}
+
+/* One level of recursion won't kill us */
+static void dump_ipv4_packet(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb,
+ unsigned int iphoff)
+{
+ struct iphdr _iph;
+ const struct iphdr *ih;
+ unsigned int logflags;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+ else
+ logflags = NF_LOG_MASK;
+
+ ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
+ if (ih == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Important fields:
+ * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
+ /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
+ sb_add(m, "SRC=%pI4 DST=%pI4 ",
+ &ih->saddr, &ih->daddr);
+
+ /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
+ sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
+ ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
+ ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
+
+ /* Max length: 6 "CE DF MF " */
+ if (ntohs(ih->frag_off) & IP_CE)
+ sb_add(m, "CE ");
+ if (ntohs(ih->frag_off) & IP_DF)
+ sb_add(m, "DF ");
+ if (ntohs(ih->frag_off) & IP_MF)
+ sb_add(m, "MF ");
+
+ /* Max length: 11 "FRAG:65535 " */
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
+
+ if ((logflags & XT_LOG_IPOPT) &&
+ ih->ihl * 4 > sizeof(struct iphdr)) {
+ const unsigned char *op;
+ unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
+ unsigned int i, optsize;
+
+ optsize = ih->ihl * 4 - sizeof(struct iphdr);
+ op = skb_header_pointer(skb, iphoff+sizeof(_iph),
+ optsize, _opt);
+ if (op == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Max length: 127 "OPT (" 15*4*2chars ") " */
+ sb_add(m, "OPT (");
+ for (i = 0; i < optsize; i++)
+ sb_add(m, "%02X", op[i]);
+ sb_add(m, ") ");
+ }
+
+ switch (ih->protocol) {
+ case IPPROTO_TCP:
+ if (dump_tcp_header(m, skb, ih->protocol,
+ ntohs(ih->frag_off) & IP_OFFSET,
+ iphoff+ih->ihl*4, logflags))
+ return;
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ if (dump_udp_header(m, skb, ih->protocol,
+ ntohs(ih->frag_off) & IP_OFFSET,
+ iphoff+ih->ihl*4))
+ return;
+ break;
+ case IPPROTO_ICMP: {
+ struct icmphdr _icmph;
+ const struct icmphdr *ich;
+ static const size_t required_len[NR_ICMP_TYPES+1]
+ = { [ICMP_ECHOREPLY] = 4,
+ [ICMP_DEST_UNREACH]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_SOURCE_QUENCH]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_REDIRECT]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_ECHO] = 4,
+ [ICMP_TIME_EXCEEDED]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_PARAMETERPROB]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_TIMESTAMP] = 20,
+ [ICMP_TIMESTAMPREPLY] = 20,
+ [ICMP_ADDRESS] = 12,
+ [ICMP_ADDRESSREPLY] = 12 };
+
+ /* Max length: 11 "PROTO=ICMP " */
+ sb_add(m, "PROTO=ICMP ");
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
+ sizeof(_icmph), &_icmph);
+ if (ich == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ /* Max length: 18 "TYPE=255 CODE=255 " */
+ sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (ich->type <= NR_ICMP_TYPES &&
+ required_len[ich->type] &&
+ skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ switch (ich->type) {
+ case ICMP_ECHOREPLY:
+ case ICMP_ECHO:
+ /* Max length: 19 "ID=65535 SEQ=65535 " */
+ sb_add(m, "ID=%u SEQ=%u ",
+ ntohs(ich->un.echo.id),
+ ntohs(ich->un.echo.sequence));
+ break;
+
+ case ICMP_PARAMETERPROB:
+ /* Max length: 14 "PARAMETER=255 " */
+ sb_add(m, "PARAMETER=%u ",
+ ntohl(ich->un.gateway) >> 24);
+ break;
+ case ICMP_REDIRECT:
+ /* Max length: 24 "GATEWAY=255.255.255.255 " */
+ sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
+ /* Fall through */
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_TIME_EXCEEDED:
+ /* Max length: 3+maxlen */
+ if (!iphoff) { /* Only recurse once. */
+ sb_add(m, "[");
+ dump_ipv4_packet(m, info, skb,
+ iphoff + ih->ihl*4+sizeof(_icmph));
+ sb_add(m, "] ");
+ }
+
+ /* Max length: 10 "MTU=65535 " */
+ if (ich->type == ICMP_DEST_UNREACH &&
+ ich->code == ICMP_FRAG_NEEDED)
+ sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
+ }
+ break;
+ }
+ /* Max Length */
+ case IPPROTO_AH: {
+ struct ip_auth_hdr _ahdr;
+ const struct ip_auth_hdr *ah;
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 9 "PROTO=AH " */
+ sb_add(m, "PROTO=AH ");
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
+ sizeof(_ahdr), &_ahdr);
+ if (ah == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 " */
+ sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
+ break;
+ }
+ case IPPROTO_ESP: {
+ struct ip_esp_hdr _esph;
+ const struct ip_esp_hdr *eh;
+
+ /* Max length: 10 "PROTO=ESP " */
+ sb_add(m, "PROTO=ESP ");
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
+ sizeof(_esph), &_esph);
+ if (eh == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 " */
+ sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
+ break;
+ }
+ /* Max length: 10 "PROTO 255 " */
+ default:
+ sb_add(m, "PROTO=%u ", ih->protocol);
+ }
+
+ /* Max length: 15 "UID=4294967295 " */
+ if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
+ read_lock_bh(&skb->sk->sk_callback_lock);
+ if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+ sb_add(m, "UID=%u GID=%u ",
+ skb->sk->sk_socket->file->f_cred->fsuid,
+ skb->sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ }
+
+ /* Max length: 16 "MARK=0xFFFFFFFF " */
+ if (!iphoff && skb->mark)
+ sb_add(m, "MARK=0x%x ", skb->mark);
+
+ /* Proto Max log string length */
+ /* IP: 40+46+6+11+127 = 230 */
+ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
+ /* UDP: 10+max(25,20) = 35 */
+ /* UDPLITE: 14+max(25,20) = 39 */
+ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
+ /* ESP: 10+max(25)+15 = 50 */
+ /* AH: 9+max(25)+15 = 49 */
+ /* unknown: 10 */
+
+ /* (ICMP allows recursion one level deep) */
+ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
+ /* maxlen = 230+ 91 + 230 + 252 = 803 */
+}
+
+static void dump_ipv4_mac_header(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ unsigned int logflags = 0;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+
+ if (!(logflags & XT_LOG_MACDECODE))
+ goto fallback;
+
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+ sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+ return;
+ default:
+ break;
+ }
+
+fallback:
+ sb_add(m, "MAC=");
+ if (dev->hard_header_len &&
+ skb->mac_header != skb->network_header) {
+ const unsigned char *p = skb_mac_header(skb);
+ unsigned int i;
+
+ sb_add(m, "%02x", *p++);
+ for (i = 1; i < dev->hard_header_len; i++, p++)
+ sb_add(m, ":%02x", *p);
+ }
+ sb_add(m, " ");
+}
+
+static void
+log_packet_common(struct sbuff *m,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
+ prefix,
+ in ? in->name : "",
+ out ? out->name : "");
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge) {
+ const struct net_device *physindev;
+ const struct net_device *physoutdev;
+
+ physindev = skb->nf_bridge->physindev;
+ if (physindev && in != physindev)
+ sb_add(m, "PHYSIN=%s ", physindev->name);
+ physoutdev = skb->nf_bridge->physoutdev;
+ if (physoutdev && out != physoutdev)
+ sb_add(m, "PHYSOUT=%s ", physoutdev->name);
+ }
+#endif
+}
+
+
+static void
+ipt_log_packet(u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ struct sbuff *m = sb_open();
+
+ if (!loginfo)
+ loginfo = &default_loginfo;
+
+ log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
+
+ if (in != NULL)
+ dump_ipv4_mac_header(m, loginfo, skb);
+
+ dump_ipv4_packet(m, loginfo, skb, 0);
+
+ sb_close(m);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* One level of recursion won't kill us */
+static void dump_ipv6_packet(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb, unsigned int ip6hoff,
+ int recurse)
+{
+ u_int8_t currenthdr;
+ int fragment;
+ struct ipv6hdr _ip6h;
+ const struct ipv6hdr *ih;
+ unsigned int ptr;
+ unsigned int hdrlen = 0;
+ unsigned int logflags;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+ else
+ logflags = NF_LOG_MASK;
+
+ ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
+ if (ih == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
+ sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
+
+ /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
+ sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
+ ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
+ (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
+ ih->hop_limit,
+ (ntohl(*(__be32 *)ih) & 0x000fffff));
+
+ fragment = 0;
+ ptr = ip6hoff + sizeof(struct ipv6hdr);
+ currenthdr = ih->nexthdr;
+ while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
+ struct ipv6_opt_hdr _hdr;
+ const struct ipv6_opt_hdr *hp;
+
+ hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
+ if (hp == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Max length: 48 "OPT (...) " */
+ if (logflags & XT_LOG_IPOPT)
+ sb_add(m, "OPT ( ");
+
+ switch (currenthdr) {
+ case IPPROTO_FRAGMENT: {
+ struct frag_hdr _fhdr;
+ const struct frag_hdr *fh;
+
+ sb_add(m, "FRAG:");
+ fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
+ &_fhdr);
+ if (fh == NULL) {
+ sb_add(m, "TRUNCATED ");
+ return;
+ }
+
+ /* Max length: 6 "65535 " */
+ sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
+
+ /* Max length: 11 "INCOMPLETE " */
+ if (fh->frag_off & htons(0x0001))
+ sb_add(m, "INCOMPLETE ");
+
+ sb_add(m, "ID:%08x ", ntohl(fh->identification));
+
+ if (ntohs(fh->frag_off) & 0xFFF8)
+ fragment = 1;
+
+ hdrlen = 8;
+
+ break;
+ }
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_HOPOPTS:
+ if (fragment) {
+ if (logflags & XT_LOG_IPOPT)
+ sb_add(m, ")");
+ return;
+ }
+ hdrlen = ipv6_optlen(hp);
+ break;
+ /* Max Length */
+ case IPPROTO_AH:
+ if (logflags & XT_LOG_IPOPT) {
+ struct ip_auth_hdr _ahdr;
+ const struct ip_auth_hdr *ah;
+
+ /* Max length: 3 "AH " */
+ sb_add(m, "AH ");
+
+ if (fragment) {
+ sb_add(m, ")");
+ return;
+ }
+
+ ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
+ &_ahdr);
+ if (ah == NULL) {
+ /*
+ * Max length: 26 "INCOMPLETE [65535
+ * bytes] )"
+ */
+ sb_add(m, "INCOMPLETE [%u bytes] )",
+ skb->len - ptr);
+ return;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 */
+ sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
+
+ }
+
+ hdrlen = (hp->hdrlen+2)<<2;
+ break;
+ case IPPROTO_ESP:
+ if (logflags & XT_LOG_IPOPT) {
+ struct ip_esp_hdr _esph;
+ const struct ip_esp_hdr *eh;
+
+ /* Max length: 4 "ESP " */
+ sb_add(m, "ESP ");
+
+ if (fragment) {
+ sb_add(m, ")");
+ return;
+ }
+
+ /*
+ * Max length: 26 "INCOMPLETE [65535 bytes] )"
+ */
+ eh = skb_header_pointer(skb, ptr, sizeof(_esph),
+ &_esph);
+ if (eh == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] )",
+ skb->len - ptr);
+ return;
+ }
+
+ /* Length: 16 "SPI=0xF1234567 )" */
+ sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
+
+ }
+ return;
+ default:
+ /* Max length: 20 "Unknown Ext Hdr 255" */
+ sb_add(m, "Unknown Ext Hdr %u", currenthdr);
+ return;
+ }
+ if (logflags & XT_LOG_IPOPT)
+ sb_add(m, ") ");
+
+ currenthdr = hp->nexthdr;
+ ptr += hdrlen;
+ }
+
+ switch (currenthdr) {
+ case IPPROTO_TCP:
+ if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
+ logflags))
+ return;
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
+ return;
+ break;
+ case IPPROTO_ICMPV6: {
+ struct icmp6hdr _icmp6h;
+ const struct icmp6hdr *ic;
+
+ /* Max length: 13 "PROTO=ICMPv6 " */
+ sb_add(m, "PROTO=ICMPv6 ");
+
+ if (fragment)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
+ if (ic == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
+ return;
+ }
+
+ /* Max length: 18 "TYPE=255 CODE=255 " */
+ sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
+
+ switch (ic->icmp6_type) {
+ case ICMPV6_ECHO_REQUEST:
+ case ICMPV6_ECHO_REPLY:
+ /* Max length: 19 "ID=65535 SEQ=65535 " */
+ sb_add(m, "ID=%u SEQ=%u ",
+ ntohs(ic->icmp6_identifier),
+ ntohs(ic->icmp6_sequence));
+ break;
+ case ICMPV6_MGM_QUERY:
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MGM_REDUCTION:
+ break;
+
+ case ICMPV6_PARAMPROB:
+ /* Max length: 17 "POINTER=ffffffff " */
+ sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
+ /* Fall through */
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ /* Max length: 3+maxlen */
+ if (recurse) {
+ sb_add(m, "[");
+ dump_ipv6_packet(m, info, skb,
+ ptr + sizeof(_icmp6h), 0);
+ sb_add(m, "] ");
+ }
+
+ /* Max length: 10 "MTU=65535 " */
+ if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
+ sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
+ }
+ break;
+ }
+ /* Max length: 10 "PROTO=255 " */
+ default:
+ sb_add(m, "PROTO=%u ", currenthdr);
+ }
+
+ /* Max length: 15 "UID=4294967295 " */
+ if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
+ read_lock_bh(&skb->sk->sk_callback_lock);
+ if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+ sb_add(m, "UID=%u GID=%u ",
+ skb->sk->sk_socket->file->f_cred->fsuid,
+ skb->sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ }
+
+ /* Max length: 16 "MARK=0xFFFFFFFF " */
+ if (!recurse && skb->mark)
+ sb_add(m, "MARK=0x%x ", skb->mark);
+}
+
+static void dump_ipv6_mac_header(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ unsigned int logflags = 0;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+
+ if (!(logflags & XT_LOG_MACDECODE))
+ goto fallback;
+
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+ sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+ return;
+ default:
+ break;
+ }
+
+fallback:
+ sb_add(m, "MAC=");
+ if (dev->hard_header_len &&
+ skb->mac_header != skb->network_header) {
+ const unsigned char *p = skb_mac_header(skb);
+ unsigned int len = dev->hard_header_len;
+ unsigned int i;
+
+ if (dev->type == ARPHRD_SIT) {
+ p -= ETH_HLEN;
+
+ if (p < skb->head)
+ p = NULL;
+ }
+
+ if (p != NULL) {
+ sb_add(m, "%02x", *p++);
+ for (i = 1; i < len; i++)
+ sb_add(m, ":%02x", *p++);
+ }
+ sb_add(m, " ");
+
+ if (dev->type == ARPHRD_SIT) {
+ const struct iphdr *iph =
+ (struct iphdr *)skb_mac_header(skb);
+ sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
+ &iph->daddr);
+ }
+ } else
+ sb_add(m, " ");
+}
+
+static void
+ip6t_log_packet(u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ struct sbuff *m = sb_open();
+
+ if (!loginfo)
+ loginfo = &default_loginfo;
+
+ log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
+
+ if (in != NULL)
+ dump_ipv6_mac_header(m, loginfo, skb);
+
+ dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
+
+ sb_close(m);
+}
+#endif
+
+static unsigned int
+log_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_log_info *loginfo = par->targinfo;
+ struct nf_loginfo li;
+
+ li.type = NF_LOG_TYPE_LOG;
+ li.u.log.level = loginfo->level;
+ li.u.log.logflags = loginfo->logflags;
+
+ if (par->family == NFPROTO_IPV4)
+ ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
+ par->out, &li, loginfo->prefix);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (par->family == NFPROTO_IPV6)
+ ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
+ par->out, &li, loginfo->prefix);
+#endif
+ else
+ WARN_ON_ONCE(1);
+
+ return XT_CONTINUE;
+}
+
+static int log_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_log_info *loginfo = par->targinfo;
+
+ if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
+ return -EINVAL;
+
+ if (loginfo->level >= 8) {
+ pr_debug("level %u >= 8\n", loginfo->level);
+ return -EINVAL;
+ }
+
+ if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
+ pr_debug("prefix is not null-terminated\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct xt_target log_tg_regs[] __read_mostly = {
+ {
+ .name = "LOG",
+ .family = NFPROTO_IPV4,
+ .target = log_tg,
+ .targetsize = sizeof(struct xt_log_info),
+ .checkentry = log_tg_check,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .name = "LOG",
+ .family = NFPROTO_IPV6,
+ .target = log_tg,
+ .targetsize = sizeof(struct xt_log_info),
+ .checkentry = log_tg_check,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static struct nf_logger ipt_log_logger __read_mostly = {
+ .name = "ipt_LOG",
+ .logfn = &ipt_log_packet,
+ .me = THIS_MODULE,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct nf_logger ip6t_log_logger __read_mostly = {
+ .name = "ip6t_LOG",
+ .logfn = &ip6t_log_packet,
+ .me = THIS_MODULE,
+};
+#endif
+
+static int __init log_tg_init(void)
+{
+ int ret;
+
+ ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
+ if (ret < 0)
+ return ret;
+
+ nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
+#endif
+ return 0;
+}
+
+static void __exit log_tg_exit(void)
+{
+ nf_log_unregister(&ipt_log_logger);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_log_unregister(&ip6t_log_logger);
+#endif
+ xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
+}
+
+module_init(log_tg_init);
+module_exit(log_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
+MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging");
+MODULE_ALIAS("ipt_LOG");
+MODULE_ALIAS("ip6t_LOG");
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index da67756..9d68441 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -30,7 +30,7 @@
static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX];
static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
- int kern)
+ int kern)
{
int rc = -EPROTONOSUPPORT;
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 6089aca..295d129 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -181,13 +181,13 @@
return rc;
}
-int nfc_dep_link_up(struct nfc_dev *dev, int target_index,
- u8 comm_mode, u8 rf_mode)
+int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
{
int rc = 0;
+ u8 *gb;
+ size_t gb_len;
- pr_debug("dev_name=%s comm:%d rf:%d\n",
- dev_name(&dev->dev), comm_mode, rf_mode);
+ pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
if (!dev->ops->dep_link_up)
return -EOPNOTSUPP;
@@ -204,7 +204,13 @@
goto error;
}
- rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode);
+ gb = nfc_llcp_general_bytes(dev, &gb_len);
+ if (gb_len > NFC_MAX_GT_LEN) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
error:
device_unlock(&dev->dev);
@@ -250,7 +256,7 @@
}
int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
- u8 comm_mode, u8 rf_mode)
+ u8 comm_mode, u8 rf_mode)
{
dev->dep_link_up = true;
dev->dep_rf_mode = rf_mode;
@@ -330,10 +336,8 @@
*
* The user must wait for the callback before calling this function again.
*/
-int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
- struct sk_buff *skb,
- data_exchange_cb_t cb,
- void *cb_context)
+int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
int rc;
@@ -357,8 +361,7 @@
int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
{
- pr_debug("dev_name=%s gb_len=%d\n",
- dev_name(&dev->dev), gb_len);
+ pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len);
if (gb_len > NFC_MAX_GT_LEN)
return -EINVAL;
@@ -367,12 +370,6 @@
}
EXPORT_SYMBOL(nfc_set_remote_general_bytes);
-u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len)
-{
- return nfc_llcp_general_bytes(dev, gt_len);
-}
-EXPORT_SYMBOL(nfc_get_local_general_bytes);
-
/**
* nfc_alloc_send_skb - allocate a skb for data exchange responses
*
@@ -380,8 +377,8 @@
* @gfp: gfp flags
*/
struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
- unsigned int flags, unsigned int size,
- unsigned int *err)
+ unsigned int flags, unsigned int size,
+ unsigned int *err)
{
struct sk_buff *skb;
unsigned int total_size;
@@ -428,8 +425,8 @@
* are found. After calling this function, the device driver must stop
* polling for targets.
*/
-int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
- int n_targets)
+int nfc_targets_found(struct nfc_dev *dev,
+ struct nfc_target *targets, int n_targets)
{
pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
@@ -441,7 +438,7 @@
kfree(dev->targets);
dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target),
- GFP_ATOMIC);
+ GFP_ATOMIC);
if (!dev->targets) {
dev->n_targets = 0;
@@ -501,15 +498,14 @@
* @supported_protocols: NFC protocols supported by the device
*/
struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
- u32 supported_protocols,
- int tx_headroom,
- int tx_tailroom)
+ u32 supported_protocols,
+ int tx_headroom, int tx_tailroom)
{
static atomic_t dev_no = ATOMIC_INIT(0);
struct nfc_dev *dev;
if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
- !ops->deactivate_target || !ops->data_exchange)
+ !ops->deactivate_target || !ops->data_exchange)
return NULL;
if (!supported_protocols)
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 151f2ef..7b76eb7 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -118,7 +118,7 @@
}
int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len)
+ u8 *tlv_array, u16 tlv_array_len)
{
u8 *tlv = tlv_array, type, length, offset = 0;
@@ -152,6 +152,8 @@
case LLCP_TLV_RW:
local->remote_rw = llcp_tlv_rw(tlv);
break;
+ case LLCP_TLV_SN:
+ break;
default:
pr_err("Invalid gt tlv value 0x%x\n", type);
break;
@@ -162,15 +164,15 @@
}
pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
- local->remote_version, local->remote_miu,
- local->remote_lto, local->remote_opt,
- local->remote_wks, local->remote_rw);
+ local->remote_version, local->remote_miu,
+ local->remote_lto, local->remote_opt,
+ local->remote_wks, local->remote_rw);
return 0;
}
static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
- u8 dsap, u8 ssap, u8 ptype)
+ u8 dsap, u8 ssap, u8 ptype)
{
u8 header[2];
@@ -186,7 +188,8 @@
return pdu;
}
-static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length)
+static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv,
+ u8 tlv_length)
{
/* XXX Add an skb length check */
@@ -199,7 +202,7 @@
}
static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
- u8 cmd, u16 size)
+ u8 cmd, u16 size)
{
struct sk_buff *skb;
int err;
@@ -208,7 +211,7 @@
return NULL;
skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
- size + LLCP_HEADER_SIZE, &err);
+ size + LLCP_HEADER_SIZE, &err);
if (skb == NULL) {
pr_err("Could not allocate PDU\n");
return NULL;
@@ -276,7 +279,7 @@
skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
return nfc_data_exchange(dev, local->target_idx, skb,
- nfc_llcp_recv, local);
+ nfc_llcp_recv, local);
}
int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
@@ -284,6 +287,9 @@
struct nfc_llcp_local *local;
struct sk_buff *skb;
u8 *service_name_tlv = NULL, service_name_tlv_length;
+ u8 *miux_tlv = NULL, miux_tlv_length;
+ u8 *rw_tlv = NULL, rw_tlv_length, rw;
+ __be16 miux;
int err;
u16 size = 0;
@@ -295,12 +301,21 @@
if (sock->service_name != NULL) {
service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
- sock->service_name,
- sock->service_name_len,
- &service_name_tlv_length);
+ sock->service_name,
+ sock->service_name_len,
+ &service_name_tlv_length);
size += service_name_tlv_length;
}
+ miux = cpu_to_be16(LLCP_MAX_MIUX);
+ miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ &miux_tlv_length);
+ size += miux_tlv_length;
+
+ rw = LLCP_MAX_RW;
+ rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ size += rw_tlv_length;
+
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
@@ -311,7 +326,10 @@
if (service_name_tlv != NULL)
skb = llcp_add_tlv(skb, service_name_tlv,
- service_name_tlv_length);
+ service_name_tlv_length);
+
+ skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
+ skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
skb_queue_tail(&local->tx_queue, skb);
@@ -321,6 +339,8 @@
pr_err("error %d\n", err);
kfree(service_name_tlv);
+ kfree(miux_tlv);
+ kfree(rw_tlv);
return err;
}
@@ -329,6 +349,11 @@
{
struct nfc_llcp_local *local;
struct sk_buff *skb;
+ u8 *miux_tlv = NULL, miux_tlv_length;
+ u8 *rw_tlv = NULL, rw_tlv_length, rw;
+ __be16 miux;
+ int err;
+ u16 size = 0;
pr_debug("Sending CC\n");
@@ -336,13 +361,35 @@
if (local == NULL)
return -ENODEV;
- skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0);
- if (skb == NULL)
- return -ENOMEM;
+ miux = cpu_to_be16(LLCP_MAX_MIUX);
+ miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ &miux_tlv_length);
+ size += miux_tlv_length;
+
+ rw = LLCP_MAX_RW;
+ rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ size += rw_tlv_length;
+
+ skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
+ if (skb == NULL) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
+
+ skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
+ skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
skb_queue_tail(&local->tx_queue, skb);
return 0;
+
+error_tlv:
+ pr_err("error %d\n", err);
+
+ kfree(miux_tlv);
+ kfree(rw_tlv);
+
+ return err;
}
int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
@@ -397,3 +444,87 @@
return 0;
}
+
+int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sk_buff *pdu;
+ struct sock *sk = &sock->sk;
+ struct nfc_llcp_local *local;
+ size_t frag_len = 0, remaining_len;
+ u8 *msg_data, *msg_ptr;
+
+ pr_debug("Send I frame len %zd\n", len);
+
+ local = sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ msg_data = kzalloc(len, GFP_KERNEL);
+ if (msg_data == NULL)
+ return -ENOMEM;
+
+ if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
+ kfree(msg_data);
+ return -EFAULT;
+ }
+
+ remaining_len = len;
+ msg_ptr = msg_data;
+
+ while (remaining_len > 0) {
+
+ frag_len = min_t(u16, local->remote_miu, remaining_len);
+
+ pr_debug("Fragment %zd bytes remaining %zd",
+ frag_len, remaining_len);
+
+ pdu = llcp_allocate_pdu(sock, LLCP_PDU_I,
+ frag_len + LLCP_SEQUENCE_SIZE);
+ if (pdu == NULL)
+ return -ENOMEM;
+
+ skb_put(pdu, LLCP_SEQUENCE_SIZE);
+
+ memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
+
+ skb_queue_head(&sock->tx_queue, pdu);
+
+ lock_sock(sk);
+
+ nfc_llcp_queue_i_frames(sock);
+
+ release_sock(sk);
+
+ remaining_len -= frag_len;
+ msg_ptr += len;
+ }
+
+ kfree(msg_data);
+
+ return 0;
+}
+
+int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
+{
+ struct sk_buff *skb;
+ struct nfc_llcp_local *local;
+
+ pr_debug("Send rr nr %d\n", sock->recv_n);
+
+ local = sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_put(skb, LLCP_SEQUENCE_SIZE);
+
+ skb->data[2] = sock->recv_n % 16;
+
+ skb_queue_head(&local->tx_queue, skb);
+
+ return 0;
+}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 1d32680..17a578f 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -37,7 +37,6 @@
struct sock *sk, *parent_sk;
int i;
-
mutex_lock(&local->socket_lock);
for (i = 0; i < LLCP_MAX_SAP; i++) {
@@ -47,7 +46,7 @@
/* Release all child sockets */
list_for_each_entry_safe(s, n, &parent->list, list) {
- list_del(&s->list);
+ list_del_init(&s->list);
sk = &s->sk;
lock_sock(sk);
@@ -56,9 +55,12 @@
nfc_put_device(s->dev);
sk->sk_state = LLCP_CLOSED;
- sock_set_flag(sk, SOCK_DEAD);
release_sock(sk);
+
+ sock_orphan(sk);
+
+ s->local = NULL;
}
parent_sk = &parent->sk;
@@ -70,18 +72,19 @@
struct sock *accept_sk;
list_for_each_entry_safe(lsk, n, &parent->accept_queue,
- accept_queue) {
+ accept_queue) {
accept_sk = &lsk->sk;
lock_sock(accept_sk);
nfc_llcp_accept_unlink(accept_sk);
accept_sk->sk_state = LLCP_CLOSED;
- sock_set_flag(accept_sk, SOCK_DEAD);
release_sock(accept_sk);
sock_orphan(accept_sk);
+
+ lsk->local = NULL;
}
}
@@ -89,18 +92,32 @@
nfc_put_device(parent->dev);
parent_sk->sk_state = LLCP_CLOSED;
- sock_set_flag(parent_sk, SOCK_DEAD);
release_sock(parent_sk);
+
+ sock_orphan(parent_sk);
+
+ parent->local = NULL;
}
mutex_unlock(&local->socket_lock);
}
+static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local)
+{
+ mutex_lock(&local->sdp_lock);
+
+ local->local_wks = 0;
+ local->local_sdp = 0;
+ local->local_sap = 0;
+
+ mutex_unlock(&local->sdp_lock);
+}
+
static void nfc_llcp_timeout_work(struct work_struct *work)
{
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
- timeout_work);
+ timeout_work);
nfc_dep_link_down(local->dev);
}
@@ -146,7 +163,7 @@
num_wks = ARRAY_SIZE(wks);
- for (sap = 0 ; sap < num_wks; sap++) {
+ for (sap = 0; sap < num_wks; sap++) {
if (wks[sap] == NULL)
continue;
@@ -158,13 +175,13 @@
}
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
- struct nfc_llcp_sock *sock)
+ struct nfc_llcp_sock *sock)
{
mutex_lock(&local->sdp_lock);
if (sock->service_name != NULL && sock->service_name_len > 0) {
int ssap = nfc_llcp_wks_sap(sock->service_name,
- sock->service_name_len);
+ sock->service_name_len);
if (ssap > 0) {
pr_debug("WKS %d\n", ssap);
@@ -176,7 +193,7 @@
return LLCP_SAP_MAX;
}
- set_bit(BIT(ssap), &local->local_wks);
+ set_bit(ssap, &local->local_wks);
mutex_unlock(&local->sdp_lock);
return ssap;
@@ -195,25 +212,25 @@
pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
- set_bit(BIT(ssap), &local->local_sdp);
+ set_bit(ssap, &local->local_sdp);
mutex_unlock(&local->sdp_lock);
return LLCP_WKS_NUM_SAP + ssap;
} else if (sock->ssap != 0) {
if (sock->ssap < LLCP_WKS_NUM_SAP) {
- if (!(local->local_wks & BIT(sock->ssap))) {
- set_bit(BIT(sock->ssap), &local->local_wks);
+ if (!test_bit(sock->ssap, &local->local_wks)) {
+ set_bit(sock->ssap, &local->local_wks);
mutex_unlock(&local->sdp_lock);
return sock->ssap;
}
} else if (sock->ssap < LLCP_SDP_NUM_SAP) {
- if (!(local->local_sdp &
- BIT(sock->ssap - LLCP_WKS_NUM_SAP))) {
- set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP),
- &local->local_sdp);
+ if (!test_bit(sock->ssap - LLCP_WKS_NUM_SAP,
+ &local->local_sdp)) {
+ set_bit(sock->ssap - LLCP_WKS_NUM_SAP,
+ &local->local_sdp);
mutex_unlock(&local->sdp_lock);
return sock->ssap;
@@ -238,7 +255,7 @@
return LLCP_SAP_MAX;
}
- set_bit(BIT(local_ssap), &local->local_sap);
+ set_bit(local_ssap, &local->local_sap);
mutex_unlock(&local->sdp_lock);
@@ -265,12 +282,12 @@
mutex_lock(&local->sdp_lock);
- clear_bit(1 << local_ssap, sdp);
+ clear_bit(local_ssap, sdp);
mutex_unlock(&local->sdp_lock);
}
-u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len)
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
{
struct nfc_llcp_local *local;
@@ -294,7 +311,7 @@
version = LLCP_VERSION_11;
version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
- 1, &version_length);
+ 1, &version_length);
gb_len += version_length;
/* 1500 ms */
@@ -304,7 +321,7 @@
pr_debug("Local wks 0x%lx\n", local->local_wks);
wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
- &wks_length);
+ &wks_length);
gb_len += wks_length;
gb_len += ARRAY_SIZE(llcp_magic);
@@ -349,8 +366,7 @@
memcpy(local->remote_gb, gb, gb_len);
local->remote_gb_len = gb_len;
- if (local->remote_gb == NULL ||
- local->remote_gb_len == 0)
+ if (local->remote_gb == NULL || local->remote_gb_len == 0)
return -ENODEV;
if (memcmp(local->remote_gb, llcp_magic, 3)) {
@@ -359,26 +375,27 @@
}
return nfc_llcp_parse_tlv(local,
- &local->remote_gb[3], local->remote_gb_len - 3);
+ &local->remote_gb[3],
+ local->remote_gb_len - 3);
}
static void nfc_llcp_tx_work(struct work_struct *work)
{
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
- tx_work);
+ tx_work);
struct sk_buff *skb;
skb = skb_dequeue(&local->tx_queue);
if (skb != NULL) {
pr_debug("Sending pending skb\n");
nfc_data_exchange(local->dev, local->target_idx,
- skb, nfc_llcp_recv, local);
+ skb, nfc_llcp_recv, local);
} else {
nfc_llcp_send_symm(local->dev);
}
mod_timer(&local->link_timer,
- jiffies + msecs_to_jiffies(local->remote_lto));
+ jiffies + msecs_to_jiffies(local->remote_lto));
}
static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -408,13 +425,13 @@
static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
{
- pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16);
+ pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16);
sock->send_n = (sock->send_n + 1) % 16;
sock->recv_ack_n = (sock->recv_n - 1) % 16;
}
static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
- u8 ssap, u8 dsap)
+ u8 ssap, u8 dsap)
{
struct nfc_llcp_sock *sock, *llcp_sock, *n;
@@ -438,7 +455,7 @@
list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
- &llcp_sock->sk, llcp_sock->dsap);
+ &llcp_sock->sk, llcp_sock->dsap);
if (llcp_sock->dsap == dsap) {
sock_hold(&llcp_sock->sk);
mutex_unlock(&local->socket_lock);
@@ -482,7 +499,7 @@
}
static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct sock *new_sk, *parent;
struct nfc_llcp_sock *sock, *new_sock;
@@ -494,7 +511,7 @@
pr_debug("%d %d\n", dsap, ssap);
nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
- skb->len - LLCP_HEADER_SIZE);
+ skb->len - LLCP_HEADER_SIZE);
if (dsap != LLCP_SAP_SDP) {
bound_sap = dsap;
@@ -513,7 +530,7 @@
lock_sock(&sock->sk);
if (sock->dsap == LLCP_SAP_SDP &&
- sock->sk.sk_state == LLCP_LISTEN)
+ sock->sk.sk_state == LLCP_LISTEN)
goto enqueue;
} else {
u8 *sn;
@@ -529,23 +546,23 @@
mutex_lock(&local->socket_lock);
for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
- bound_sap++) {
+ bound_sap++) {
sock = local->sockets[bound_sap];
if (sock == NULL)
continue;
if (sock->service_name == NULL ||
- sock->service_name_len == 0)
+ sock->service_name_len == 0)
continue;
if (sock->service_name_len != sn_len)
continue;
if (sock->dsap == LLCP_SAP_SDP &&
- sock->sk.sk_state == LLCP_LISTEN &&
- !memcmp(sn, sock->service_name, sn_len)) {
+ sock->sk.sk_state == LLCP_LISTEN &&
+ !memcmp(sn, sock->service_name, sn_len)) {
pr_debug("Found service name at SAP %d\n",
- bound_sap);
+ bound_sap);
sock_hold(&sock->sk);
mutex_unlock(&local->socket_lock);
@@ -570,8 +587,7 @@
goto fail;
}
- new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type,
- GFP_ATOMIC);
+ new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
if (new_sk == NULL) {
reason = LLCP_DM_REJ;
release_sock(&sock->sk);
@@ -616,8 +632,39 @@
}
+int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
+{
+ int nr_frames = 0;
+ struct nfc_llcp_local *local = sock->local;
+
+ pr_debug("Remote ready %d tx queue len %d remote rw %d",
+ sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
+ local->remote_rw);
+
+ /* Try to queue some I frames for transmission */
+ while (sock->remote_ready &&
+ skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) {
+ struct sk_buff *pdu, *pending_pdu;
+
+ pdu = skb_dequeue(&sock->tx_queue);
+ if (pdu == NULL)
+ break;
+
+ /* Update N(S)/N(R) */
+ nfc_llcp_set_nrns(sock, pdu);
+
+ pending_pdu = skb_clone(pdu, GFP_KERNEL);
+
+ skb_queue_tail(&local->tx_queue, pdu);
+ skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
+ nr_frames++;
+ }
+
+ return nr_frames;
+}
+
static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
struct sock *sk;
@@ -644,15 +691,15 @@
nfc_llcp_sock_put(llcp_sock);
}
- if (ns == llcp_sock->recv_n)
- llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
- else
- pr_err("Received out of sequence I PDU\n");
-
/* Pass the payload upstream */
if (ptype == LLCP_PDU_I) {
pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
+ if (ns == llcp_sock->recv_n)
+ llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
+ else
+ pr_err("Received out of sequence I PDU\n");
+
skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
pr_err("receive queue is full\n");
@@ -673,30 +720,20 @@
}
}
- /* Queue some I frames for transmission */
- while (llcp_sock->remote_ready &&
- skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) {
- struct sk_buff *pdu, *pending_pdu;
+ if (ptype == LLCP_PDU_RR)
+ llcp_sock->remote_ready = true;
+ else if (ptype == LLCP_PDU_RNR)
+ llcp_sock->remote_ready = false;
- pdu = skb_dequeue(&llcp_sock->tx_queue);
- if (pdu == NULL)
- break;
-
- /* Update N(S)/N(R) */
- nfc_llcp_set_nrns(llcp_sock, pdu);
-
- pending_pdu = skb_clone(pdu, GFP_KERNEL);
-
- skb_queue_tail(&local->tx_queue, pdu);
- skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu);
- }
+ if (nfc_llcp_queue_i_frames(llcp_sock) == 0)
+ nfc_llcp_send_rr(llcp_sock);
release_sock(sk);
nfc_llcp_sock_put(llcp_sock);
}
static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
struct sock *sk;
@@ -718,7 +755,6 @@
nfc_llcp_sock_put(llcp_sock);
}
-
if (sk->sk_state == LLCP_CONNECTED) {
nfc_put_device(local->dev);
sk->sk_state = LLCP_CLOSED;
@@ -731,13 +767,11 @@
nfc_llcp_sock_put(llcp_sock);
}
-static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
u8 dsap, ssap;
-
dsap = nfc_llcp_dsap(skb);
ssap = nfc_llcp_ssap(skb);
@@ -756,7 +790,7 @@
llcp_sock->dsap = ssap;
nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
- skb->len - LLCP_HEADER_SIZE);
+ skb->len - LLCP_HEADER_SIZE);
nfc_llcp_sock_put(llcp_sock);
}
@@ -764,7 +798,7 @@
static void nfc_llcp_rx_work(struct work_struct *work)
{
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
- rx_work);
+ rx_work);
u8 dsap, ssap, ptype;
struct sk_buff *skb;
@@ -802,6 +836,7 @@
case LLCP_PDU_I:
case LLCP_PDU_RR:
+ case LLCP_PDU_RNR:
pr_debug("I frame\n");
nfc_llcp_recv_hdlc(local, skb);
break;
@@ -821,7 +856,7 @@
pr_debug("Received an LLCP PDU\n");
if (err < 0) {
- pr_err("err %d", err);
+ pr_err("err %d\n", err);
return;
}
@@ -840,6 +875,8 @@
if (local == NULL)
return;
+ nfc_llcp_clear_sdp(local);
+
/* Close and purge all existing sockets */
nfc_llcp_socket_release(local);
}
@@ -865,7 +902,7 @@
queue_work(local->tx_wq, &local->tx_work);
} else {
mod_timer(&local->link_timer,
- jiffies + msecs_to_jiffies(local->remote_lto));
+ jiffies + msecs_to_jiffies(local->remote_lto));
}
}
@@ -891,8 +928,10 @@
skb_queue_head_init(&local->tx_queue);
INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
- local->tx_wq = alloc_workqueue(name,
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ local->tx_wq =
+ alloc_workqueue(name,
+ WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1);
if (local->tx_wq == NULL) {
err = -ENOMEM;
goto err_local;
@@ -901,8 +940,10 @@
local->rx_pending = NULL;
INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
- local->rx_wq = alloc_workqueue(name,
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ local->rx_wq =
+ alloc_workqueue(name,
+ WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1);
if (local->rx_wq == NULL) {
err = -ENOMEM;
goto err_tx_wq;
@@ -910,8 +951,10 @@
INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
- local->timeout_wq = alloc_workqueue(name,
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ local->timeout_wq =
+ alloc_workqueue(name,
+ WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1);
if (local->timeout_wq == NULL) {
err = -ENOMEM;
goto err_rx_wq;
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 0ad2e33..50680ce 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -28,6 +28,10 @@
#define LLCP_DEFAULT_RW 1
#define LLCP_DEFAULT_MIU 128
+#define LLCP_MAX_LTO 0xff
+#define LLCP_MAX_RW 15
+#define LLCP_MAX_MIUX 0x7ff
+
#define LLCP_WKS_NUM_SAP 16
#define LLCP_SDP_NUM_SAP 16
#define LLCP_LOCAL_NUM_SAP 32
@@ -162,9 +166,10 @@
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
- struct nfc_llcp_sock *sock);
+ struct nfc_llcp_sock *sock);
u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
+int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock);
/* Sock API */
struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
@@ -175,7 +180,7 @@
/* TLV API */
int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len);
+ u8 *tlv_array, u16 tlv_array_len);
/* Commands API */
void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
@@ -187,6 +192,9 @@
int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
+ struct msghdr *msg, size_t len);
+int nfc_llcp_send_rr(struct nfc_llcp_sock *sock);
/* Socket API */
int __init nfc_llcp_sock_init(void);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index f738ccd..c13e02e 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -78,9 +78,11 @@
llcp_sock->local = local;
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
llcp_sock->service_name_len = min_t(unsigned int,
- llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+ llcp_addr.service_name_len,
+ NFC_LLCP_MAX_SERVICE_NAME);
llcp_sock->service_name = kmemdup(llcp_addr.service_name,
- llcp_sock->service_name_len, GFP_KERNEL);
+ llcp_sock->service_name_len,
+ GFP_KERNEL);
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
if (llcp_sock->ssap == LLCP_MAX_SAP)
@@ -110,7 +112,7 @@
lock_sock(sk);
if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != LLCP_BOUND) {
+ || sk->sk_state != LLCP_BOUND) {
ret = -EBADFD;
goto error;
}
@@ -149,13 +151,13 @@
sock_hold(sk);
list_add_tail(&llcp_sock->accept_queue,
- &llcp_sock_parent->accept_queue);
+ &llcp_sock_parent->accept_queue);
llcp_sock->parent = parent;
sk_acceptq_added(parent);
}
struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
- struct socket *newsock)
+ struct socket *newsock)
{
struct nfc_llcp_sock *lsk, *n, *llcp_parent;
struct sock *sk;
@@ -163,7 +165,7 @@
llcp_parent = nfc_llcp_sock(parent);
list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue,
- accept_queue) {
+ accept_queue) {
sk = &lsk->sk;
lock_sock(sk);
@@ -192,7 +194,7 @@
}
static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *new_sk;
@@ -248,7 +250,7 @@
static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
int *len, int peer)
{
- struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr;
+ struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *)addr;
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -262,7 +264,7 @@
llcp_addr->ssap = llcp_sock->ssap;
llcp_addr->service_name_len = llcp_sock->service_name_len;
memcpy(llcp_addr->service_name, llcp_sock->service_name,
- llcp_addr->service_name_len);
+ llcp_addr->service_name_len);
return 0;
}
@@ -275,7 +277,7 @@
parent_sock = nfc_llcp_sock(parent);
list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
- accept_queue) {
+ accept_queue) {
sk = &llcp_sock->sk;
if (sk->sk_state == LLCP_CONNECTED)
@@ -286,7 +288,7 @@
}
static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+ poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
@@ -315,6 +317,7 @@
struct sock *sk = sock->sk;
struct nfc_llcp_local *local;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ int err = 0;
if (!sk)
return 0;
@@ -322,25 +325,17 @@
pr_debug("%p\n", sk);
local = llcp_sock->local;
- if (local == NULL)
- return -ENODEV;
+ if (local == NULL) {
+ err = -ENODEV;
+ goto out;
+ }
mutex_lock(&local->socket_lock);
- if (llcp_sock == local->sockets[llcp_sock->ssap]) {
+ if (llcp_sock == local->sockets[llcp_sock->ssap])
local->sockets[llcp_sock->ssap] = NULL;
- } else {
- struct nfc_llcp_sock *parent, *s, *n;
-
- parent = local->sockets[llcp_sock->ssap];
-
- list_for_each_entry_safe(s, n, &parent->list, list)
- if (llcp_sock == s) {
- list_del(&s->list);
- break;
- }
-
- }
+ else
+ list_del_init(&llcp_sock->list);
mutex_unlock(&local->socket_lock);
@@ -355,7 +350,7 @@
struct sock *accept_sk;
list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
- accept_queue) {
+ accept_queue) {
accept_sk = &lsk->sk;
lock_sock(accept_sk);
@@ -364,31 +359,27 @@
release_sock(accept_sk);
- sock_set_flag(sk, SOCK_DEAD);
sock_orphan(accept_sk);
- sock_put(accept_sk);
}
}
/* Freeing the SAP */
if ((sk->sk_state == LLCP_CONNECTED
- && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
- sk->sk_state == LLCP_BOUND ||
- sk->sk_state == LLCP_LISTEN)
+ && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
+ sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
- sock_set_flag(sk, SOCK_DEAD);
-
release_sock(sk);
+out:
sock_orphan(sk);
sock_put(sk);
- return 0;
+ return err;
}
static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
- int len, int flags)
+ int len, int flags)
{
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -400,7 +391,7 @@
pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
if (!addr || len < sizeof(struct sockaddr_nfc) ||
- addr->sa_family != AF_NFC) {
+ addr->sa_family != AF_NFC) {
pr_err("Invalid socket\n");
return -EINVAL;
}
@@ -411,7 +402,7 @@
}
pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx,
- addr->target_idx, addr->nfc_protocol);
+ addr->target_idx, addr->nfc_protocol);
lock_sock(sk);
@@ -441,7 +432,7 @@
device_unlock(&dev->dev);
if (local->rf_mode == NFC_RF_INITIATOR &&
- addr->target_idx != local->target_idx) {
+ addr->target_idx != local->target_idx) {
ret = -ENOLINK;
goto put_dev;
}
@@ -459,9 +450,11 @@
llcp_sock->dsap = LLCP_SAP_SDP;
llcp_sock->nfc_protocol = addr->nfc_protocol;
llcp_sock->service_name_len = min_t(unsigned int,
- addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+ addr->service_name_len,
+ NFC_LLCP_MAX_SERVICE_NAME);
llcp_sock->service_name = kmemdup(addr->service_name,
- llcp_sock->service_name_len, GFP_KERNEL);
+ llcp_sock->service_name_len,
+ GFP_KERNEL);
local->sockets[llcp_sock->ssap] = llcp_sock;
@@ -482,6 +475,34 @@
return ret;
}
+static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ int ret;
+
+ pr_debug("sock %p sk %p", sock, sk);
+
+ ret = sock_error(sk);
+ if (ret)
+ return ret;
+
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != LLCP_CONNECTED) {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
+
+ release_sock(sk);
+
+ return nfc_llcp_send_i_frame(llcp_sock, msg, len);
+}
+
static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
@@ -496,7 +517,7 @@
lock_sock(sk);
if (sk->sk_state == LLCP_CLOSED &&
- skb_queue_empty(&sk->sk_receive_queue)) {
+ skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
return 0;
}
@@ -509,7 +530,7 @@
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
pr_err("Recv datagram failed state %d %d %d",
- sk->sk_state, err, sock_error(sk));
+ sk->sk_state, err, sock_error(sk));
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
@@ -517,7 +538,7 @@
return err;
}
- rlen = skb->len; /* real length of skb */
+ rlen = skb->len; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
cskb = skb;
@@ -567,7 +588,7 @@
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
- .sendmsg = sock_no_sendmsg,
+ .sendmsg = llcp_sock_sendmsg,
.recvmsg = llcp_sock_recvmsg,
.mmap = sock_no_mmap,
};
@@ -627,6 +648,8 @@
void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
{
+ struct nfc_llcp_local *local = sock->local;
+
kfree(sock->service_name);
skb_queue_purge(&sock->tx_queue);
@@ -635,11 +658,16 @@
list_del_init(&sock->accept_queue);
+ if (local != NULL && sock == local->sockets[sock->ssap])
+ local->sockets[sock->ssap] = NULL;
+ else
+ list_del_init(&sock->list);
+
sock->parent = NULL;
}
static int llcp_sock_create(struct net *net, struct socket *sock,
- const struct nfc_protocol *nfc_proto)
+ const struct nfc_protocol *nfc_proto)
{
struct sock *sk;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index a47e90c..9ec065b 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -66,9 +66,8 @@
/* Execute request and wait for completion. */
static int __nci_request(struct nci_dev *ndev,
- void (*req)(struct nci_dev *ndev, unsigned long opt),
- unsigned long opt,
- __u32 timeout)
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int rc = 0;
long completion_rc;
@@ -77,9 +76,9 @@
init_completion(&ndev->req_completion);
req(ndev, opt);
- completion_rc = wait_for_completion_interruptible_timeout(
- &ndev->req_completion,
- timeout);
+ completion_rc =
+ wait_for_completion_interruptible_timeout(&ndev->req_completion,
+ timeout);
pr_debug("wait_for_completion return %ld\n", completion_rc);
@@ -110,8 +109,9 @@
}
static inline int nci_request(struct nci_dev *ndev,
- void (*req)(struct nci_dev *ndev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+ void (*req)(struct nci_dev *ndev,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int rc;
@@ -152,14 +152,14 @@
/* by default mapping is set to NCI_RF_INTERFACE_FRAME */
for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
if (ndev->supported_rf_interfaces[i] ==
- NCI_RF_INTERFACE_ISO_DEP) {
+ NCI_RF_INTERFACE_ISO_DEP) {
cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
NCI_DISC_MAP_MODE_LISTEN;
cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
(*num)++;
} else if (ndev->supported_rf_interfaces[i] ==
- NCI_RF_INTERFACE_NFC_DEP) {
+ NCI_RF_INTERFACE_NFC_DEP) {
cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
NCI_DISC_MAP_MODE_LISTEN;
@@ -172,8 +172,7 @@
}
nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
- (1 + ((*num)*sizeof(struct disc_map_config))),
- &cmd);
+ (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
}
static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
@@ -184,36 +183,36 @@
cmd.num_disc_configs = 0;
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_JEWEL_MASK
- || protocols & NFC_PROTO_MIFARE_MASK
- || protocols & NFC_PROTO_ISO14443_MASK
- || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ (protocols & NFC_PROTO_JEWEL_MASK
+ || protocols & NFC_PROTO_MIFARE_MASK
+ || protocols & NFC_PROTO_ISO14443_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
- NCI_NFC_A_PASSIVE_POLL_MODE;
+ NCI_NFC_A_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_ISO14443_MASK)) {
+ (protocols & NFC_PROTO_ISO14443_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
- NCI_NFC_B_PASSIVE_POLL_MODE;
+ NCI_NFC_B_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_FELICA_MASK
- || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ (protocols & NFC_PROTO_FELICA_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
- NCI_NFC_F_PASSIVE_POLL_MODE;
+ NCI_NFC_F_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
- (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
- &cmd);
+ (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
+ &cmd);
}
struct nci_rf_discover_select_param {
@@ -224,7 +223,7 @@
static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
{
struct nci_rf_discover_select_param *param =
- (struct nci_rf_discover_select_param *)opt;
+ (struct nci_rf_discover_select_param *)opt;
struct nci_rf_discover_select_cmd cmd;
cmd.rf_discovery_id = param->rf_discovery_id;
@@ -245,8 +244,7 @@
}
nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
- sizeof(struct nci_rf_discover_select_cmd),
- &cmd);
+ sizeof(struct nci_rf_discover_select_cmd), &cmd);
}
static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
@@ -256,8 +254,7 @@
cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
- sizeof(struct nci_rf_deactivate_cmd),
- &cmd);
+ sizeof(struct nci_rf_deactivate_cmd), &cmd);
}
static int nci_open_device(struct nci_dev *ndev)
@@ -281,16 +278,16 @@
set_bit(NCI_INIT, &ndev->flags);
rc = __nci_request(ndev, nci_reset_req, 0,
- msecs_to_jiffies(NCI_RESET_TIMEOUT));
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
if (!rc) {
rc = __nci_request(ndev, nci_init_req, 0,
- msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
}
if (!rc) {
rc = __nci_request(ndev, nci_init_complete_req, 0,
- msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
}
clear_bit(NCI_INIT, &ndev->flags);
@@ -340,7 +337,7 @@
set_bit(NCI_INIT, &ndev->flags);
__nci_request(ndev, nci_reset_req, 0,
- msecs_to_jiffies(NCI_RESET_TIMEOUT));
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
clear_bit(NCI_INIT, &ndev->flags);
/* Flush cmd wq */
@@ -396,7 +393,7 @@
int rc;
if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
- (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
+ (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
pr_err("unable to start poll, since poll is already active\n");
return -EBUSY;
}
@@ -407,17 +404,17 @@
}
if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
- (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
+ (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
pr_debug("target active or w4 select, implicitly deactivate\n");
rc = nci_request(ndev, nci_rf_deactivate_req, 0,
- msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
if (rc)
return -EBUSY;
}
rc = nci_request(ndev, nci_rf_discover_req, protocols,
- msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
+ msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
if (!rc)
ndev->poll_prots = protocols;
@@ -430,17 +427,17 @@
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
- (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
+ (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
pr_err("unable to stop poll, since poll is not active\n");
return;
}
nci_request(ndev, nci_rf_deactivate_req, 0,
- msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
- __u32 protocol)
+ __u32 protocol)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
struct nci_rf_discover_select_param param;
@@ -451,7 +448,7 @@
pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
- (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
+ (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
pr_err("there is no available target to activate\n");
return -EINVAL;
}
@@ -494,8 +491,8 @@
param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
rc = nci_request(ndev, nci_rf_discover_select_req,
- (unsigned long)¶m,
- msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
+ (unsigned long)¶m,
+ msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
}
if (!rc)
@@ -519,14 +516,13 @@
if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
nci_request(ndev, nci_rf_deactivate_req, 0,
- msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
}
static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
- struct sk_buff *skb,
- data_exchange_cb_t cb,
- void *cb_context)
+ struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -571,9 +567,8 @@
* @supported_protocols: NFC protocols supported by the device
*/
struct nci_dev *nci_allocate_device(struct nci_ops *ops,
- __u32 supported_protocols,
- int tx_headroom,
- int tx_tailroom)
+ __u32 supported_protocols,
+ int tx_headroom, int tx_tailroom)
{
struct nci_dev *ndev;
@@ -594,9 +589,9 @@
ndev->tx_tailroom = tx_tailroom;
ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
- supported_protocols,
- tx_headroom + NCI_DATA_HDR_SIZE,
- tx_tailroom);
+ supported_protocols,
+ tx_headroom + NCI_DATA_HDR_SIZE,
+ tx_tailroom);
if (!ndev->nfc_dev)
goto free_exit;
@@ -668,9 +663,9 @@
skb_queue_head_init(&ndev->tx_q);
setup_timer(&ndev->cmd_timer, nci_cmd_timer,
- (unsigned long) ndev);
+ (unsigned long) ndev);
setup_timer(&ndev->data_timer, nci_data_timer,
- (unsigned long) ndev);
+ (unsigned long) ndev);
mutex_init(&ndev->req_lock);
@@ -719,7 +714,7 @@
pr_debug("len %d\n", skb->len);
if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
- && !test_bit(NCI_INIT, &ndev->flags))) {
+ && !test_bit(NCI_INIT, &ndev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
@@ -799,7 +794,7 @@
/* Check if data flow control is used */
if (atomic_read(&ndev->credits_cnt) !=
- NCI_DATA_FLOW_CONTROL_NOT_USED)
+ NCI_DATA_FLOW_CONTROL_NOT_USED)
atomic_dec(&ndev->credits_cnt);
pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
@@ -810,7 +805,7 @@
nci_send_frame(skb);
mod_timer(&ndev->data_timer,
- jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
+ jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
}
}
@@ -879,6 +874,6 @@
nci_send_frame(skb);
mod_timer(&ndev->cmd_timer,
- jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
}
}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index 7880ae9..a0bc326 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -35,8 +35,7 @@
#include <linux/nfc.h>
/* Complete data exchange transaction and forward skb to nfc core */
-void nci_data_exchange_complete(struct nci_dev *ndev,
- struct sk_buff *skb,
+void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
int err)
{
data_exchange_cb_t cb = ndev->data_exchange_cb;
@@ -67,9 +66,9 @@
/* ----------------- NCI TX Data ----------------- */
static inline void nci_push_data_hdr(struct nci_dev *ndev,
- __u8 conn_id,
- struct sk_buff *skb,
- __u8 pbf)
+ __u8 conn_id,
+ struct sk_buff *skb,
+ __u8 pbf)
{
struct nci_data_hdr *hdr;
int plen = skb->len;
@@ -86,8 +85,8 @@
}
static int nci_queue_tx_data_frags(struct nci_dev *ndev,
- __u8 conn_id,
- struct sk_buff *skb) {
+ __u8 conn_id,
+ struct sk_buff *skb) {
int total_len = skb->len;
unsigned char *data = skb->data;
unsigned long flags;
@@ -105,8 +104,8 @@
min_t(int, total_len, ndev->max_data_pkt_payload_size);
skb_frag = nci_skb_alloc(ndev,
- (NCI_DATA_HDR_SIZE + frag_len),
- GFP_KERNEL);
+ (NCI_DATA_HDR_SIZE + frag_len),
+ GFP_KERNEL);
if (skb_frag == NULL) {
rc = -ENOMEM;
goto free_exit;
@@ -118,7 +117,8 @@
/* second, set the header */
nci_push_data_hdr(ndev, conn_id, skb_frag,
- ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT)));
+ ((total_len == frag_len) ?
+ (NCI_PBF_LAST) : (NCI_PBF_CONT)));
__skb_queue_tail(&frags_q, skb_frag);
@@ -186,8 +186,8 @@
/* ----------------- NCI RX Data ----------------- */
static void nci_add_rx_data_frag(struct nci_dev *ndev,
- struct sk_buff *skb,
- __u8 pbf)
+ struct sk_buff *skb,
+ __u8 pbf)
{
int reassembly_len;
int err = 0;
@@ -211,8 +211,8 @@
/* second, combine the two fragments */
memcpy(skb_push(skb, reassembly_len),
- ndev->rx_data_reassembly->data,
- reassembly_len);
+ ndev->rx_data_reassembly->data,
+ reassembly_len);
/* third, free old reassembly */
kfree_skb(ndev->rx_data_reassembly);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 03e7b46..2e3dee4 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -40,7 +40,7 @@
/* Handle NCI Notification packets */
static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
int i;
@@ -62,7 +62,7 @@
if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
/* found static rf connection */
atomic_add(ntf->conn_entries[i].credits,
- &ndev->credits_cnt);
+ &ndev->credits_cnt);
}
}
@@ -72,7 +72,7 @@
}
static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = skb->data[0];
@@ -80,7 +80,7 @@
if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
/* Activation failed, so complete the request
- (the state remains the same) */
+ (the state remains the same) */
nci_req_complete(ndev, status);
}
}
@@ -101,7 +101,7 @@
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfca_poll *nfca_poll,
- __u8 *data)
+ __u8 *data)
{
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
@@ -128,7 +128,7 @@
static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
- __u8 *data)
+ __u8 *data)
{
nfcb_poll->sensb_res_len = *data++;
@@ -142,13 +142,13 @@
static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfcf_poll *nfcf_poll,
- __u8 *data)
+ __u8 *data)
{
nfcf_poll->bit_rate = *data++;
nfcf_poll->sensf_res_len = *data++;
pr_debug("bit_rate %d, sensf_res_len %d\n",
- nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
+ nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len);
data += nfcf_poll->sensf_res_len;
@@ -189,7 +189,7 @@
target->nfcid1_len = nfca_poll->nfcid1_len;
if (target->nfcid1_len > 0) {
memcpy(target->nfcid1, nfca_poll->nfcid1,
- target->nfcid1_len);
+ target->nfcid1_len);
}
} else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) {
nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
@@ -197,7 +197,7 @@
target->sensb_res_len = nfcb_poll->sensb_res_len;
if (target->sensb_res_len > 0) {
memcpy(target->sensb_res, nfcb_poll->sensb_res,
- target->sensb_res_len);
+ target->sensb_res_len);
}
} else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) {
nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
@@ -205,7 +205,7 @@
target->sensf_res_len = nfcf_poll->sensf_res_len;
if (target->sensf_res_len > 0) {
memcpy(target->sensf_res, nfcf_poll->sensf_res,
- target->sensf_res_len);
+ target->sensf_res_len);
}
} else {
pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode);
@@ -220,7 +220,7 @@
}
static void nci_add_new_target(struct nci_dev *ndev,
- struct nci_rf_discover_ntf *ntf)
+ struct nci_rf_discover_ntf *ntf)
{
struct nfc_target *target;
int i, rc;
@@ -230,8 +230,8 @@
if (target->idx == ntf->rf_discovery_id) {
/* This target already exists, add the new protocol */
nci_add_new_protocol(ndev, target, ntf->rf_protocol,
- ntf->rf_tech_and_mode,
- &ntf->rf_tech_specific_params);
+ ntf->rf_tech_and_mode,
+ &ntf->rf_tech_specific_params);
return;
}
}
@@ -245,27 +245,27 @@
target = &ndev->targets[ndev->n_targets];
rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
- ntf->rf_tech_and_mode,
- &ntf->rf_tech_specific_params);
+ ntf->rf_tech_and_mode,
+ &ntf->rf_tech_specific_params);
if (!rc) {
target->idx = ntf->rf_discovery_id;
ndev->n_targets++;
pr_debug("target_idx %d, n_targets %d\n", target->idx,
- ndev->n_targets);
+ ndev->n_targets);
}
}
void nci_clear_target_list(struct nci_dev *ndev)
{
memset(ndev->targets, 0,
- (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS));
+ (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS));
ndev->n_targets = 0;
}
static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nci_rf_discover_ntf ntf;
__u8 *data = skb->data;
@@ -280,7 +280,7 @@
pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode);
pr_debug("rf_tech_specific_params_len %d\n",
- ntf.rf_tech_specific_params_len);
+ ntf.rf_tech_specific_params_len);
if (ntf.rf_tech_specific_params_len > 0) {
switch (ntf.rf_tech_and_mode) {
@@ -318,7 +318,7 @@
} else {
atomic_set(&ndev->state, NCI_W4_HOST_SELECT);
nfc_targets_found(ndev->nfc_dev, ndev->targets,
- ndev->n_targets);
+ ndev->n_targets);
}
}
@@ -335,20 +335,17 @@
pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
if (nfca_poll->rats_res_len > 0) {
memcpy(nfca_poll->rats_res,
- data,
- nfca_poll->rats_res_len);
+ data, nfca_poll->rats_res_len);
}
break;
case NCI_NFC_B_PASSIVE_POLL_MODE:
nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
nfcb_poll->attrib_res_len = *data++;
- pr_debug("attrib_res_len %d\n",
- nfcb_poll->attrib_res_len);
+ pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
if (nfcb_poll->attrib_res_len > 0) {
memcpy(nfcb_poll->attrib_res,
- data,
- nfcb_poll->attrib_res_len);
+ data, nfcb_poll->attrib_res_len);
}
break;
@@ -362,7 +359,7 @@
}
static void nci_target_auto_activated(struct nci_dev *ndev,
- struct nci_rf_intf_activated_ntf *ntf)
+ struct nci_rf_intf_activated_ntf *ntf)
{
struct nfc_target *target;
int rc;
@@ -370,8 +367,8 @@
target = &ndev->targets[ndev->n_targets];
rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
- ntf->activation_rf_tech_and_mode,
- &ntf->rf_tech_specific_params);
+ ntf->activation_rf_tech_and_mode,
+ &ntf->rf_tech_specific_params);
if (rc)
return;
@@ -384,7 +381,7 @@
}
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nci_rf_intf_activated_ntf ntf;
__u8 *data = skb->data;
@@ -405,7 +402,8 @@
ntf.activation_rf_tech_and_mode);
pr_debug("max_data_pkt_payload_size 0x%x\n",
ntf.max_data_pkt_payload_size);
- pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits);
+ pr_debug("initial_num_credits 0x%x\n",
+ ntf.initial_num_credits);
pr_debug("rf_tech_specific_params_len %d\n",
ntf.rf_tech_specific_params_len);
@@ -441,18 +439,15 @@
pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
ntf.data_exch_rf_tech_and_mode);
- pr_debug("data_exch_tx_bit_rate 0x%x\n",
- ntf.data_exch_tx_bit_rate);
- pr_debug("data_exch_rx_bit_rate 0x%x\n",
- ntf.data_exch_rx_bit_rate);
- pr_debug("activation_params_len %d\n",
- ntf.activation_params_len);
+ pr_debug("data_exch_tx_bit_rate 0x%x\n", ntf.data_exch_tx_bit_rate);
+ pr_debug("data_exch_rx_bit_rate 0x%x\n", ntf.data_exch_rx_bit_rate);
+ pr_debug("activation_params_len %d\n", ntf.activation_params_len);
if (ntf.activation_params_len > 0) {
switch (ntf.rf_interface) {
case NCI_RF_INTERFACE_ISO_DEP:
err = nci_extract_activation_params_iso_dep(ndev,
- &ntf, data);
+ &ntf, data);
break;
case NCI_RF_INTERFACE_FRAME:
@@ -489,7 +484,7 @@
}
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index aa63b1e..3003c33 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -67,19 +67,18 @@
ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
if (ndev->num_supported_rf_interfaces >
- NCI_MAX_SUPPORTED_RF_INTERFACES) {
+ NCI_MAX_SUPPORTED_RF_INTERFACES) {
ndev->num_supported_rf_interfaces =
NCI_MAX_SUPPORTED_RF_INTERFACES;
}
memcpy(ndev->supported_rf_interfaces,
- rsp_1->supported_rf_interfaces,
- ndev->num_supported_rf_interfaces);
+ rsp_1->supported_rf_interfaces,
+ ndev->num_supported_rf_interfaces);
rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
- ndev->max_logical_connections =
- rsp_2->max_logical_connections;
+ ndev->max_logical_connections = rsp_2->max_logical_connections;
ndev->max_routing_table_size =
__le16_to_cpu(rsp_2->max_routing_table_size);
ndev->max_ctrl_pkt_payload_len =
@@ -121,7 +120,7 @@
}
static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = skb->data[0];
@@ -143,7 +142,7 @@
}
static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = skb->data[0];
@@ -155,7 +154,7 @@
}
static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = skb->data[0];
@@ -163,7 +162,7 @@
/* If target was active, complete the request only in deactivate_ntf */
if ((status != NCI_STATUS_OK) ||
- (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
+ (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
nci_clear_target_list(ndev);
atomic_set(&ndev->state, NCI_IDLE);
nci_req_complete(ndev, status);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 07f0348..6404052 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -48,34 +48,34 @@
[NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
[NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
[NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
+ [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
};
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
- struct netlink_callback *cb, int flags)
+ struct netlink_callback *cb, int flags)
{
void *hdr;
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
- &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
+ &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
if (!hdr)
return -EMSGSIZE;
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
- NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS,
- target->supported_protocols);
+ NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
if (target->nfcid1_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
- target->nfcid1);
+ target->nfcid1);
if (target->sensb_res_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
- target->sensb_res);
+ target->sensb_res);
if (target->sensf_res_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
- target->sensf_res);
+ target->sensf_res);
return genlmsg_end(msg, hdr);
@@ -91,9 +91,9 @@
u32 idx;
rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize,
- nfc_genl_family.attrbuf,
- nfc_genl_family.maxattr,
- nfc_genl_policy);
+ nfc_genl_family.attrbuf,
+ nfc_genl_family.maxattr,
+ nfc_genl_policy);
if (rc < 0)
return ERR_PTR(rc);
@@ -110,7 +110,7 @@
}
static int nfc_genl_dump_targets(struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb)
{
int i = cb->args[0];
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
@@ -130,7 +130,7 @@
while (i < dev->n_targets) {
rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
- NLM_F_MULTI);
+ NLM_F_MULTI);
if (rc < 0)
break;
@@ -166,7 +166,7 @@
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_EVENT_TARGETS_FOUND);
+ NFC_EVENT_TARGETS_FOUND);
if (!hdr)
goto free_msg;
@@ -193,13 +193,14 @@
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_EVENT_DEVICE_ADDED);
+ NFC_EVENT_DEVICE_ADDED);
if (!hdr)
goto free_msg;
NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
+ NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
genlmsg_end(msg, hdr);
@@ -224,7 +225,7 @@
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_EVENT_DEVICE_REMOVED);
+ NFC_EVENT_DEVICE_REMOVED);
if (!hdr)
goto free_msg;
@@ -244,14 +245,14 @@
}
static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
- u32 pid, u32 seq,
- struct netlink_callback *cb,
- int flags)
+ u32 pid, u32 seq,
+ struct netlink_callback *cb,
+ int flags)
{
void *hdr;
hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
- NFC_CMD_GET_DEVICE);
+ NFC_CMD_GET_DEVICE);
if (!hdr)
return -EMSGSIZE;
@@ -261,6 +262,7 @@
NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
+ NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
return genlmsg_end(msg, hdr);
@@ -270,7 +272,7 @@
}
static int nfc_genl_dump_devices(struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
@@ -297,8 +299,7 @@
int rc;
rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- cb, NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
if (rc < 0)
break;
@@ -323,7 +324,7 @@
}
int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
- u8 comm_mode, u8 rf_mode)
+ u8 comm_mode, u8 rf_mode)
{
struct sk_buff *msg;
void *hdr;
@@ -334,8 +335,7 @@
if (!msg)
return -ENOMEM;
- hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_CMD_DEP_LINK_UP);
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP);
if (!hdr)
goto free_msg;
@@ -372,7 +372,7 @@
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_CMD_DEP_LINK_DOWN);
+ NFC_CMD_DEP_LINK_DOWN);
if (!hdr)
goto free_msg;
@@ -414,7 +414,7 @@
}
rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
- NULL, 0);
+ NULL, 0);
if (rc < 0)
goto out_free;
@@ -481,7 +481,7 @@
pr_debug("Poll start\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
- !info->attrs[NFC_ATTR_PROTOCOLS])
+ !info->attrs[NFC_ATTR_PROTOCOLS])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -539,13 +539,12 @@
struct nfc_dev *dev;
int rc, tgt_idx;
u32 idx;
- u8 comm, rf;
+ u8 comm;
pr_debug("DEP link up\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
- !info->attrs[NFC_ATTR_COMM_MODE] ||
- !info->attrs[NFC_ATTR_RF_MODE])
+ !info->attrs[NFC_ATTR_COMM_MODE])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -555,19 +554,15 @@
tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
- rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]);
if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
return -EINVAL;
- if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET)
- return -EINVAL;
-
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
- rc = nfc_dep_link_up(dev, tgt_idx, comm, rf);
+ rc = nfc_dep_link_up(dev, tgt_idx, comm);
nfc_put_device(dev);
@@ -642,7 +637,7 @@
};
static int nfc_genl_rcv_nl_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+ unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct class_dev_iter iter;
@@ -695,7 +690,7 @@
int rc;
rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops,
- ARRAY_SIZE(nfc_genl_ops));
+ ARRAY_SIZE(nfc_genl_ops));
if (rc)
return rc;
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 6d28d75..ec8794c 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -32,7 +32,7 @@
struct proto *proto;
struct module *owner;
int (*create)(struct net *net, struct socket *sock,
- const struct nfc_protocol *nfc_proto);
+ const struct nfc_protocol *nfc_proto);
};
struct nfc_rawsock {
@@ -54,7 +54,7 @@
int nfc_llcp_register_device(struct nfc_dev *dev);
void nfc_llcp_unregister_device(struct nfc_dev *dev);
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
-u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len);
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
int __init nfc_llcp_init(void);
void nfc_llcp_exit(void);
@@ -65,7 +65,7 @@
}
static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
- u8 comm_mode, u8 rf_mode)
+ u8 comm_mode, u8 rf_mode)
{
}
@@ -78,7 +78,8 @@
{
}
-static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
+ u8 *gb, u8 gb_len)
{
return 0;
}
@@ -160,8 +161,7 @@
int nfc_stop_poll(struct nfc_dev *dev);
-int nfc_dep_link_up(struct nfc_dev *dev, int target_idx,
- u8 comm_mode, u8 rf_mode);
+int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, u8 comm_mode);
int nfc_dep_link_down(struct nfc_dev *dev);
@@ -169,9 +169,7 @@
int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
-int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
- struct sk_buff *skb,
- data_exchange_cb_t cb,
- void *cb_context);
+int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context);
#endif /* __LOCAL_NFC_H */
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 5325439..5a839ce 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -63,7 +63,7 @@
}
static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
- int len, int flags)
+ int len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
@@ -73,7 +73,7 @@
pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
if (!addr || len < sizeof(struct sockaddr_nfc) ||
- addr->sa_family != AF_NFC)
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
@@ -120,7 +120,7 @@
}
static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
- int err)
+ int err)
{
struct sock *sk = (struct sock *) context;
@@ -173,7 +173,7 @@
sock_hold(sk);
rc = nfc_data_exchange(dev, target_idx, skb,
- rawsock_data_exchange_complete, sk);
+ rawsock_data_exchange_complete, sk);
if (rc) {
rawsock_report_error(sk, rc);
sock_put(sk);
@@ -181,7 +181,7 @@
}
static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+ struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nfc_dev *dev = nfc_rawsock(sk)->dev;
@@ -218,7 +218,7 @@
}
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+ struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -274,7 +274,7 @@
if (sk->sk_state == TCP_ESTABLISHED) {
nfc_deactivate_target(nfc_rawsock(sk)->dev,
- nfc_rawsock(sk)->target_idx);
+ nfc_rawsock(sk)->target_idx);
nfc_put_device(nfc_rawsock(sk)->dev);
}
@@ -287,7 +287,7 @@
}
static int rawsock_create(struct net *net, struct socket *sock,
- const struct nfc_protocol *nfc_proto)
+ const struct nfc_protocol *nfc_proto)
{
struct sock *sk;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2725d1b..48badff 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira Networks.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -145,9 +145,16 @@
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
*addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) {
- if (likely(transport_len >= sizeof(struct udphdr)))
- inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
- *addr, new_addr, 1);
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&uh->check, skb,
+ *addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
}
csum_replace4(&nh->check, *addr, new_addr);
@@ -197,8 +204,22 @@
skb->rxhash = 0;
}
-static int set_udp_port(struct sk_buff *skb,
- const struct ovs_key_udp *udp_port_key)
+static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
+{
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
+ set_tp_port(skb, port, new_port, &uh->check);
+
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ *port = new_port;
+ skb->rxhash = 0;
+ }
+}
+
+static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
{
struct udphdr *uh;
int err;
@@ -210,16 +231,15 @@
uh = udp_hdr(skb);
if (udp_port_key->udp_src != uh->source)
- set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+ set_udp_port(skb, &uh->source, udp_port_key->udp_src);
if (udp_port_key->udp_dst != uh->dest)
- set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+ set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
return 0;
}
-static int set_tcp_port(struct sk_buff *skb,
- const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
{
struct tcphdr *th;
int err;
@@ -328,11 +348,11 @@
break;
case OVS_KEY_ATTR_TCP:
- err = set_tcp_port(skb, nla_data(nested_attr));
+ err = set_tcp(skb, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_UDP:
- err = set_udp_port(skb, nla_data(nested_attr));
+ err = set_udp(skb, nla_data(nested_attr));
break;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ce64c18..2c03050 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1521,6 +1521,9 @@
vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
+ if (ovs_header->dp_ifindex &&
+ ovs_header->dp_ifindex != get_dpifindex(vport->dp))
+ return ERR_PTR(-ENODEV);
return vport;
} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 408ebd0..06b42b7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4170,14 +4170,16 @@
}
/* Helper routine to branch off an association to a new socket. */
-SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
- struct socket **sockp)
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
{
- struct sock *sk = asoc->base.sk;
+ struct sctp_association *asoc = sctp_id2assoc(sk, id);
struct socket *sock;
struct sctp_af *af;
int err = 0;
+ if (!asoc)
+ return -EINVAL;
+
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
@@ -4206,13 +4208,13 @@
return err;
}
+EXPORT_SYMBOL(sctp_do_peeloff);
static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
sctp_peeloff_arg_t peeloff;
struct socket *newsock;
int retval = 0;
- struct sctp_association *asoc;
if (len < sizeof(sctp_peeloff_arg_t))
return -EINVAL;
@@ -4220,15 +4222,7 @@
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
- asoc = sctp_id2assoc(sk, peeloff.associd);
- if (!asoc) {
- retval = -EINVAL;
- goto out;
- }
-
- SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__, sk, asoc);
-
- retval = sctp_do_peeloff(asoc, &newsock);
+ retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
if (retval < 0)
goto out;
@@ -4239,8 +4233,8 @@
goto out;
}
- SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n",
- __func__, sk, asoc, newsock->sk, retval);
+ SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n",
+ __func__, sk, newsock->sk, retval);
/* Return the fd mapped to the new socket. */
peeloff.sd = retval;
diff --git a/net/socket.c b/net/socket.c
index 28a96af..12a48d8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -181,7 +181,7 @@
* invalid addresses -EFAULT is returned. On a success 0 is returned.
*/
-int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr)
+int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr)
{
if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
return -EINVAL;
@@ -209,7 +209,7 @@
* specified. Zero is returned for a success.
*/
-static int move_addr_to_user(struct sockaddr *kaddr, int klen,
+static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
void __user *uaddr, int __user *ulen)
{
int err;
@@ -1449,7 +1449,7 @@
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
- err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
+ err = move_addr_to_kernel(umyaddr, addrlen, &address);
if (err >= 0) {
err = security_socket_bind(sock,
(struct sockaddr *)&address,
@@ -1556,7 +1556,7 @@
err = -ECONNABORTED;
goto out_fd;
}
- err = move_addr_to_user((struct sockaddr *)&address,
+ err = move_addr_to_user(&address,
len, upeer_sockaddr, upeer_addrlen);
if (err < 0)
goto out_fd;
@@ -1605,7 +1605,7 @@
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address);
+ err = move_addr_to_kernel(uservaddr, addrlen, &address);
if (err < 0)
goto out_put;
@@ -1645,7 +1645,7 @@
err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
if (err)
goto out_put;
- err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len);
+ err = move_addr_to_user(&address, len, usockaddr, usockaddr_len);
out_put:
fput_light(sock->file, fput_needed);
@@ -1677,7 +1677,7 @@
sock->ops->getname(sock, (struct sockaddr *)&address, &len,
1);
if (!err)
- err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr,
+ err = move_addr_to_user(&address, len, usockaddr,
usockaddr_len);
fput_light(sock->file, fput_needed);
}
@@ -1716,7 +1716,7 @@
msg.msg_controllen = 0;
msg.msg_namelen = 0;
if (addr) {
- err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address);
+ err = move_addr_to_kernel(addr, addr_len, &address);
if (err < 0)
goto out_put;
msg.msg_name = (struct sockaddr *)&address;
@@ -1779,7 +1779,7 @@
err = sock_recvmsg(sock, &msg, size, flags);
if (err >= 0 && addr != NULL) {
- err2 = move_addr_to_user((struct sockaddr *)&address,
+ err2 = move_addr_to_user(&address,
msg.msg_namelen, addr, addr_len);
if (err2 < 0)
err = err2;
@@ -1933,13 +1933,9 @@
/* This will also move the address data into kernel space */
if (MSG_CMSG_COMPAT & flags) {
- err = verify_compat_iovec(msg_sys, iov,
- (struct sockaddr *)&address,
- VERIFY_READ);
+ err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ);
} else
- err = verify_iovec(msg_sys, iov,
- (struct sockaddr *)&address,
- VERIFY_READ);
+ err = verify_iovec(msg_sys, iov, &address, VERIFY_READ);
if (err < 0)
goto out_freeiov;
total_len = err;
@@ -2143,13 +2139,9 @@
uaddr = (__force void __user *)msg_sys->msg_name;
uaddr_len = COMPAT_NAMELEN(msg);
if (MSG_CMSG_COMPAT & flags) {
- err = verify_compat_iovec(msg_sys, iov,
- (struct sockaddr *)&addr,
- VERIFY_WRITE);
+ err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
} else
- err = verify_iovec(msg_sys, iov,
- (struct sockaddr *)&addr,
- VERIFY_WRITE);
+ err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
if (err < 0)
goto out_freeiov;
total_len = err;
@@ -2166,7 +2158,7 @@
len = err;
if (uaddr != NULL) {
- err = move_addr_to_user((struct sockaddr *)&addr,
+ err = move_addr_to_user(&addr,
msg_sys->msg_namelen, uaddr,
uaddr_len);
if (err < 0)
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 9d3e3b6..ba21ab2 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -23,6 +23,8 @@
#define MESH_PERR_MIN_INT 100
#define MESH_DIAM_TRAVERSAL_TIME 50
+#define MESH_RSSI_THRESHOLD 0
+
/*
* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds
* before timing out. This way it will remain ACTIVE and no data frames
@@ -56,6 +58,7 @@
.dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
.dot11MeshGateAnnouncementProtocol = false,
.dot11MeshForwarding = true,
+ .rssi_threshold = MESH_RSSI_THRESHOLD,
};
const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index fb1e721..f5a7ac3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -814,8 +814,8 @@
cookie);
}
-bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
- size_t len, gfp_t gfp)
+bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
+ const u8 *buf, size_t len, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -854,7 +854,8 @@
/* found match! */
/* Indicate the received Action frame to user space */
- if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq,
+ if (nl80211_send_mgmt(rdev, dev, reg->nlpid,
+ freq, sig_mbm,
buf, len, gfp))
continue;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1998c36..39dbdf2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -204,6 +204,7 @@
.len = NL80211_HT_CAPABILITY_LEN
},
[NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
+ [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
};
/* policy for the key attributes */
@@ -2214,6 +2215,13 @@
if (err)
return err;
+ if (info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]) {
+ if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER))
+ return -EOPNOTSUPP;
+ params.inactivity_timeout = nla_get_u16(
+ info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
+ }
+
err = rdev->ops->start_ap(&rdev->wiphy, dev, ¶ms);
if (!err)
wdev->beacon_interval = params.beacon_interval;
@@ -3290,6 +3298,8 @@
cur_params.dot11MeshGateAnnouncementProtocol);
NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
cur_params.dot11MeshForwarding);
+ NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
+ cur_params.rssi_threshold);
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -3322,6 +3332,7 @@
[NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
[NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
+ [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
};
static const struct nla_policy
@@ -3413,6 +3424,8 @@
nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
+ mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
if (mask_out)
*mask_out = mask;
@@ -7673,7 +7686,8 @@
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 nlpid,
- int freq, const u8 *buf, size_t len, gfp_t gfp)
+ int freq, int sig_dbm,
+ const u8 *buf, size_t len, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
@@ -7691,6 +7705,8 @@
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+ if (sig_dbm)
+ NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
genlmsg_end(msg, hdr);
@@ -7952,7 +7968,7 @@
void cfg80211_report_obss_beacon(struct wiphy *wiphy,
const u8 *frame, size_t len,
- int freq, gfp_t gfp)
+ int freq, int sig_dbm, gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
struct sk_buff *msg;
@@ -7975,6 +7991,8 @@
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
if (freq)
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+ if (sig_dbm)
+ NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
genlmsg_end(msg, hdr);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 12bf4d1..4ffe50d 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -92,7 +92,8 @@
gfp_t gfp);
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid, int freq,
+ struct net_device *netdev, u32 nlpid,
+ int freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp);
void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u64 cookie,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 9aa9db6..1b7a08df 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -904,6 +904,7 @@
/* do NOT round down here */
return (bitrate + 50000) / 100000;
}
+EXPORT_SYMBOL(cfg80211_calculate_bitrate);
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
u32 beacon_int)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f286bb8f..22c73b7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2068,12 +2068,16 @@
*/
static void alc_init_special_input_src(struct hda_codec *codec);
+static int alc269_fill_coef(struct hda_codec *codec);
static int alc_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
unsigned int i;
+ if (codec->vendor_id == 0x10ec0269)
+ alc269_fill_coef(codec);
+
alc_fix_pll(codec);
alc_auto_init_amp(codec, spec->init_amp);
@@ -4367,6 +4371,7 @@
ALC882_FIXUP_PB_M5210,
ALC882_FIXUP_ACER_ASPIRE_7736,
ALC882_FIXUP_ASUS_W90V,
+ ALC889_FIXUP_CD,
ALC889_FIXUP_VAIO_TT,
ALC888_FIXUP_EEE1601,
ALC882_FIXUP_EAPD,
@@ -4494,6 +4499,13 @@
{ }
}
},
+ [ALC889_FIXUP_CD] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1c, 0x993301f0 }, /* CD */
+ { }
+ }
+ },
[ALC889_FIXUP_VAIO_TT] = {
.type = ALC_FIXUP_PINS,
.v.pins = (const struct alc_pincfg[]) {
@@ -4650,6 +4662,7 @@
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -5467,8 +5480,12 @@
static int alc269_fill_coef(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
int val;
+ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+ return 0;
+
if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
alc_write_coef_idx(codec, 0xf, 0x960b);
alc_write_coef_idx(codec, 0xe, 0x8817);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index cc9f6c8..bc030a2 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6333,6 +6333,7 @@
hw->ops.open = snd_hdspm_hwdep_dummy_op;
hw->ops.ioctl = snd_hdspm_hwdep_ioctl;
+ hw->ops.ioctl_compat = snd_hdspm_hwdep_ioctl;
hw->ops.release = snd_hdspm_hwdep_dummy_op;
return 0;
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index c6012ff..d23b19a 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -367,7 +367,7 @@
.platform_name = "samsung-audio",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "wm8753-hifi",
- .codec_name = "wm8753-codec.0-001a",
+ .codec_name = "wm8753.0-001a",
.init = neo1973_wm8753_init,
.ops = &neo1973_hifi_ops,
},
@@ -376,7 +376,7 @@
.stream_name = "Voice",
.cpu_dai_name = "dfbmcs320-pcm",
.codec_dai_name = "wm8753-voice",
- .codec_name = "wm8753-codec.0-001a",
+ .codec_name = "wm8753.0-001a",
.ops = &neo1973_voice_ops,
},
};