Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 3 | * GPL LICENSE SUMMARY |
| 4 | * |
| 5 | * Copyright(c) 2008 Intel Corporation. All rights reserved. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of version 2 of the GNU General Public License as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but |
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 19 | * USA |
| 20 | * |
| 21 | * The full GNU General Public License is included in this distribution |
| 22 | * in the file called LICENSE.GPL. |
| 23 | * |
| 24 | * Contact Information: |
| 25 | * Tomas Winkler <tomas.winkler@intel.com> |
| 26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 27 | *****************************************************************************/ |
| 28 | |
| 29 | #include <linux/kernel.h> |
| 30 | #include <linux/module.h> |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 31 | #include <net/mac80211.h> |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 32 | |
Tomas Winkler | 712b6cf | 2008-03-12 16:58:52 -0700 | [diff] [blame] | 33 | struct iwl_priv; /* FIXME: remove */ |
Tomas Winkler | 0a6857e | 2008-03-12 16:58:49 -0700 | [diff] [blame] | 34 | #include "iwl-debug.h" |
Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 35 | #include "iwl-eeprom.h" |
Tomas Winkler | 3e0d4cb | 2008-04-24 11:55:38 -0700 | [diff] [blame] | 36 | #include "iwl-dev.h" /* FIXME: remove */ |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 37 | #include "iwl-core.h" |
Tomas Winkler | b661c81 | 2008-04-23 17:14:54 -0700 | [diff] [blame] | 38 | #include "iwl-io.h" |
Mohamed Abbas | ad97edd | 2008-03-28 16:21:06 -0700 | [diff] [blame] | 39 | #include "iwl-rfkill.h" |
Mohamed Abbas | 5da4b55 | 2008-04-21 15:41:51 -0700 | [diff] [blame] | 40 | #include "iwl-power.h" |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 41 | |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 42 | |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 43 | MODULE_DESCRIPTION("iwl core"); |
| 44 | MODULE_VERSION(IWLWIFI_VERSION); |
| 45 | MODULE_AUTHOR(DRV_COPYRIGHT); |
Tomas Winkler | 712b6cf | 2008-03-12 16:58:52 -0700 | [diff] [blame] | 46 | MODULE_LICENSE("GPL"); |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 47 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 48 | #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ |
| 49 | [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ |
| 50 | IWL_RATE_SISO_##s##M_PLCP, \ |
| 51 | IWL_RATE_MIMO2_##s##M_PLCP,\ |
| 52 | IWL_RATE_MIMO3_##s##M_PLCP,\ |
| 53 | IWL_RATE_##r##M_IEEE, \ |
| 54 | IWL_RATE_##ip##M_INDEX, \ |
| 55 | IWL_RATE_##in##M_INDEX, \ |
| 56 | IWL_RATE_##rp##M_INDEX, \ |
| 57 | IWL_RATE_##rn##M_INDEX, \ |
| 58 | IWL_RATE_##pp##M_INDEX, \ |
| 59 | IWL_RATE_##np##M_INDEX } |
| 60 | |
| 61 | /* |
| 62 | * Parameter order: |
| 63 | * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate |
| 64 | * |
| 65 | * If there isn't a valid next or previous rate then INV is used which |
| 66 | * maps to IWL_RATE_INVALID |
| 67 | * |
| 68 | */ |
Tomas Winkler | 1826dcc | 2008-05-15 13:54:02 +0800 | [diff] [blame] | 69 | const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 70 | IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ |
| 71 | IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ |
| 72 | IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ |
| 73 | IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ |
| 74 | IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ |
| 75 | IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ |
| 76 | IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ |
| 77 | IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ |
| 78 | IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ |
| 79 | IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ |
| 80 | IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ |
| 81 | IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ |
| 82 | IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ |
| 83 | /* FIXME:RS: ^^ should be INV (legacy) */ |
| 84 | }; |
Tomas Winkler | 1826dcc | 2008-05-15 13:54:02 +0800 | [diff] [blame] | 85 | EXPORT_SYMBOL(iwl_rates); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 86 | |
Tomas Winkler | e7d326ac | 2008-06-12 09:47:11 +0800 | [diff] [blame] | 87 | /** |
| 88 | * translate ucode response to mac80211 tx status control values |
| 89 | */ |
| 90 | void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, |
| 91 | struct ieee80211_tx_info *control) |
| 92 | { |
| 93 | int rate_index; |
| 94 | |
| 95 | control->antenna_sel_tx = |
| 96 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); |
| 97 | if (rate_n_flags & RATE_MCS_HT_MSK) |
| 98 | control->flags |= IEEE80211_TX_CTL_OFDM_HT; |
| 99 | if (rate_n_flags & RATE_MCS_GF_MSK) |
| 100 | control->flags |= IEEE80211_TX_CTL_GREEN_FIELD; |
| 101 | if (rate_n_flags & RATE_MCS_FAT_MSK) |
| 102 | control->flags |= IEEE80211_TX_CTL_40_MHZ_WIDTH; |
| 103 | if (rate_n_flags & RATE_MCS_DUP_MSK) |
| 104 | control->flags |= IEEE80211_TX_CTL_DUP_DATA; |
| 105 | if (rate_n_flags & RATE_MCS_SGI_MSK) |
| 106 | control->flags |= IEEE80211_TX_CTL_SHORT_GI; |
| 107 | rate_index = iwl_hwrate_to_plcp_idx(rate_n_flags); |
| 108 | if (control->band == IEEE80211_BAND_5GHZ) |
| 109 | rate_index -= IWL_FIRST_OFDM_RATE; |
| 110 | control->tx_rate_idx = rate_index; |
| 111 | } |
| 112 | EXPORT_SYMBOL(iwl_hwrate_to_tx_control); |
| 113 | |
| 114 | int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) |
| 115 | { |
| 116 | int idx = 0; |
| 117 | |
| 118 | /* HT rate format */ |
| 119 | if (rate_n_flags & RATE_MCS_HT_MSK) { |
| 120 | idx = (rate_n_flags & 0xff); |
| 121 | |
| 122 | if (idx >= IWL_RATE_MIMO2_6M_PLCP) |
| 123 | idx = idx - IWL_RATE_MIMO2_6M_PLCP; |
| 124 | |
| 125 | idx += IWL_FIRST_OFDM_RATE; |
| 126 | /* skip 9M not supported in ht*/ |
| 127 | if (idx >= IWL_RATE_9M_INDEX) |
| 128 | idx += 1; |
| 129 | if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) |
| 130 | return idx; |
| 131 | |
| 132 | /* legacy rate format, search for match in table */ |
| 133 | } else { |
| 134 | for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) |
| 135 | if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) |
| 136 | return idx; |
| 137 | } |
| 138 | |
| 139 | return -1; |
| 140 | } |
| 141 | EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); |
| 142 | |
| 143 | |
Tomas Winkler | 57bd1be | 2008-05-15 13:54:03 +0800 | [diff] [blame] | 144 | |
| 145 | const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
| 146 | EXPORT_SYMBOL(iwl_bcast_addr); |
| 147 | |
| 148 | |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 149 | /* This function both allocates and initializes hw and priv. */ |
| 150 | struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, |
| 151 | struct ieee80211_ops *hw_ops) |
| 152 | { |
| 153 | struct iwl_priv *priv; |
| 154 | |
| 155 | /* mac80211 allocates memory for this device instance, including |
| 156 | * space for this driver's private structure */ |
| 157 | struct ieee80211_hw *hw = |
| 158 | ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); |
| 159 | if (hw == NULL) { |
| 160 | IWL_ERROR("Can not allocate network device\n"); |
| 161 | goto out; |
| 162 | } |
| 163 | |
| 164 | priv = hw->priv; |
| 165 | priv->hw = hw; |
| 166 | |
| 167 | out: |
| 168 | return hw; |
| 169 | } |
| 170 | EXPORT_SYMBOL(iwl_alloc_all); |
| 171 | |
Tomas Winkler | b661c81 | 2008-04-23 17:14:54 -0700 | [diff] [blame] | 172 | void iwl_hw_detect(struct iwl_priv *priv) |
| 173 | { |
| 174 | priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); |
| 175 | priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); |
| 176 | pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); |
| 177 | } |
| 178 | EXPORT_SYMBOL(iwl_hw_detect); |
| 179 | |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 180 | /* Tell nic where to find the "keep warm" buffer */ |
| 181 | int iwl_kw_init(struct iwl_priv *priv) |
| 182 | { |
| 183 | unsigned long flags; |
| 184 | int ret; |
| 185 | |
| 186 | spin_lock_irqsave(&priv->lock, flags); |
| 187 | ret = iwl_grab_nic_access(priv); |
| 188 | if (ret) |
| 189 | goto out; |
| 190 | |
| 191 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, |
| 192 | priv->kw.dma_addr >> 4); |
| 193 | iwl_release_nic_access(priv); |
| 194 | out: |
| 195 | spin_unlock_irqrestore(&priv->lock, flags); |
| 196 | return ret; |
| 197 | } |
| 198 | |
| 199 | int iwl_kw_alloc(struct iwl_priv *priv) |
| 200 | { |
| 201 | struct pci_dev *dev = priv->pci_dev; |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 202 | struct iwl_kw *kw = &priv->kw; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 203 | |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 204 | kw->size = IWL_KW_SIZE; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 205 | kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr); |
| 206 | if (!kw->v_addr) |
| 207 | return -ENOMEM; |
| 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * iwl_kw_free - Free the "keep warm" buffer |
| 214 | */ |
| 215 | void iwl_kw_free(struct iwl_priv *priv) |
| 216 | { |
| 217 | struct pci_dev *dev = priv->pci_dev; |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 218 | struct iwl_kw *kw = &priv->kw; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 219 | |
| 220 | if (kw->v_addr) { |
| 221 | pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr); |
| 222 | memset(kw, 0, sizeof(*kw)); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | int iwl_hw_nic_init(struct iwl_priv *priv) |
| 227 | { |
| 228 | unsigned long flags; |
| 229 | struct iwl_rx_queue *rxq = &priv->rxq; |
| 230 | int ret; |
| 231 | |
| 232 | /* nic_init */ |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 233 | spin_lock_irqsave(&priv->lock, flags); |
Ron Rindjunsky | 1b73af8 | 2008-05-05 10:22:51 +0800 | [diff] [blame] | 234 | priv->cfg->ops->lib->apm_ops.init(priv); |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 235 | iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); |
| 236 | spin_unlock_irqrestore(&priv->lock, flags); |
| 237 | |
| 238 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); |
| 239 | |
| 240 | priv->cfg->ops->lib->apm_ops.config(priv); |
| 241 | |
| 242 | /* Allocate the RX queue, or reset if it is already allocated */ |
| 243 | if (!rxq->bd) { |
| 244 | ret = iwl_rx_queue_alloc(priv); |
| 245 | if (ret) { |
| 246 | IWL_ERROR("Unable to initialize Rx queue\n"); |
| 247 | return -ENOMEM; |
| 248 | } |
| 249 | } else |
| 250 | iwl_rx_queue_reset(priv, rxq); |
| 251 | |
| 252 | iwl_rx_replenish(priv); |
| 253 | |
| 254 | iwl_rx_init(priv, rxq); |
| 255 | |
| 256 | spin_lock_irqsave(&priv->lock, flags); |
| 257 | |
| 258 | rxq->need_update = 1; |
| 259 | iwl_rx_queue_update_write_ptr(priv, rxq); |
| 260 | |
| 261 | spin_unlock_irqrestore(&priv->lock, flags); |
| 262 | |
| 263 | /* Allocate and init all Tx and Command queues */ |
| 264 | ret = iwl_txq_ctx_reset(priv); |
| 265 | if (ret) |
| 266 | return ret; |
| 267 | |
| 268 | set_bit(STATUS_INIT, &priv->status); |
| 269 | |
| 270 | return 0; |
| 271 | } |
| 272 | EXPORT_SYMBOL(iwl_hw_nic_init); |
| 273 | |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 274 | /** |
Emmanuel Grumbach | 37deb2a | 2008-06-30 17:23:08 +0800 | [diff] [blame] | 275 | * iwl_clear_stations_table - Clear the driver's station table |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 276 | * |
| 277 | * NOTE: This does not clear or otherwise alter the device's station table. |
| 278 | */ |
Emmanuel Grumbach | 37deb2a | 2008-06-30 17:23:08 +0800 | [diff] [blame] | 279 | void iwl_clear_stations_table(struct iwl_priv *priv) |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 280 | { |
| 281 | unsigned long flags; |
| 282 | |
| 283 | spin_lock_irqsave(&priv->sta_lock, flags); |
| 284 | |
Emmanuel Grumbach | 24e5c40 | 2008-06-30 17:23:03 +0800 | [diff] [blame] | 285 | if (iwl_is_alive(priv) && |
Emmanuel Grumbach | 37deb2a | 2008-06-30 17:23:08 +0800 | [diff] [blame] | 286 | !test_bit(STATUS_EXIT_PENDING, &priv->status) && |
| 287 | iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL)) |
Emmanuel Grumbach | 24e5c40 | 2008-06-30 17:23:03 +0800 | [diff] [blame] | 288 | IWL_ERROR("Couldn't clear the station table\n"); |
| 289 | |
Emmanuel Grumbach | 37deb2a | 2008-06-30 17:23:08 +0800 | [diff] [blame] | 290 | priv->num_stations = 0; |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 291 | memset(priv->stations, 0, sizeof(priv->stations)); |
| 292 | |
Tomas Winkler | 40a9a82 | 2008-11-25 23:29:03 +0200 | [diff] [blame] | 293 | /* clean ucode key table bit map */ |
| 294 | priv->ucode_key_table = 0; |
| 295 | |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 296 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
| 297 | } |
Emmanuel Grumbach | 37deb2a | 2008-06-30 17:23:08 +0800 | [diff] [blame] | 298 | EXPORT_SYMBOL(iwl_clear_stations_table); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 299 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 300 | void iwl_reset_qos(struct iwl_priv *priv) |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 301 | { |
| 302 | u16 cw_min = 15; |
| 303 | u16 cw_max = 1023; |
| 304 | u8 aifs = 2; |
| 305 | u8 is_legacy = 0; |
| 306 | unsigned long flags; |
| 307 | int i; |
| 308 | |
| 309 | spin_lock_irqsave(&priv->lock, flags); |
| 310 | priv->qos_data.qos_active = 0; |
| 311 | |
Johannes Berg | 05c914f | 2008-09-11 00:01:58 +0200 | [diff] [blame] | 312 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC) { |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 313 | if (priv->qos_data.qos_enable) |
| 314 | priv->qos_data.qos_active = 1; |
| 315 | if (!(priv->active_rate & 0xfff0)) { |
| 316 | cw_min = 31; |
| 317 | is_legacy = 1; |
| 318 | } |
Johannes Berg | 05c914f | 2008-09-11 00:01:58 +0200 | [diff] [blame] | 319 | } else if (priv->iw_mode == NL80211_IFTYPE_AP) { |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 320 | if (priv->qos_data.qos_enable) |
| 321 | priv->qos_data.qos_active = 1; |
| 322 | } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { |
| 323 | cw_min = 31; |
| 324 | is_legacy = 1; |
| 325 | } |
| 326 | |
| 327 | if (priv->qos_data.qos_active) |
| 328 | aifs = 3; |
| 329 | |
| 330 | priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); |
| 331 | priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); |
| 332 | priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; |
| 333 | priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; |
| 334 | priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; |
| 335 | |
| 336 | if (priv->qos_data.qos_active) { |
| 337 | i = 1; |
| 338 | priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); |
| 339 | priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); |
| 340 | priv->qos_data.def_qos_parm.ac[i].aifsn = 7; |
| 341 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; |
| 342 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 343 | |
| 344 | i = 2; |
| 345 | priv->qos_data.def_qos_parm.ac[i].cw_min = |
| 346 | cpu_to_le16((cw_min + 1) / 2 - 1); |
| 347 | priv->qos_data.def_qos_parm.ac[i].cw_max = |
| 348 | cpu_to_le16(cw_max); |
| 349 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; |
| 350 | if (is_legacy) |
| 351 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 352 | cpu_to_le16(6016); |
| 353 | else |
| 354 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 355 | cpu_to_le16(3008); |
| 356 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 357 | |
| 358 | i = 3; |
| 359 | priv->qos_data.def_qos_parm.ac[i].cw_min = |
| 360 | cpu_to_le16((cw_min + 1) / 4 - 1); |
| 361 | priv->qos_data.def_qos_parm.ac[i].cw_max = |
| 362 | cpu_to_le16((cw_max + 1) / 2 - 1); |
| 363 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; |
| 364 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 365 | if (is_legacy) |
| 366 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 367 | cpu_to_le16(3264); |
| 368 | else |
| 369 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 370 | cpu_to_le16(1504); |
| 371 | } else { |
| 372 | for (i = 1; i < 4; i++) { |
| 373 | priv->qos_data.def_qos_parm.ac[i].cw_min = |
| 374 | cpu_to_le16(cw_min); |
| 375 | priv->qos_data.def_qos_parm.ac[i].cw_max = |
| 376 | cpu_to_le16(cw_max); |
| 377 | priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; |
| 378 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; |
| 379 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 380 | } |
| 381 | } |
| 382 | IWL_DEBUG_QOS("set QoS to default \n"); |
| 383 | |
| 384 | spin_unlock_irqrestore(&priv->lock, flags); |
| 385 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 386 | EXPORT_SYMBOL(iwl_reset_qos); |
| 387 | |
Tomas Winkler | 3ac7f146 | 2008-07-21 02:40:14 +0300 | [diff] [blame] | 388 | #define MAX_BIT_RATE_40_MHZ 0x96 /* 150 Mbps */ |
| 389 | #define MAX_BIT_RATE_20_MHZ 0x48 /* 72 Mbps */ |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 390 | static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, |
| 391 | struct ieee80211_ht_info *ht_info, |
| 392 | enum ieee80211_band band) |
| 393 | { |
Ron Rindjunsky | 39130df | 2008-05-15 13:53:56 +0800 | [diff] [blame] | 394 | u16 max_bit_rate = 0; |
| 395 | u8 rx_chains_num = priv->hw_params.rx_chains_num; |
| 396 | u8 tx_chains_num = priv->hw_params.tx_chains_num; |
| 397 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 398 | ht_info->cap = 0; |
| 399 | memset(ht_info->supp_mcs_set, 0, 16); |
| 400 | |
| 401 | ht_info->ht_supported = 1; |
| 402 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 403 | ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD; |
| 404 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; |
Tomas Winkler | 00c5ae2 | 2008-09-03 11:26:42 +0800 | [diff] [blame] | 405 | ht_info->cap |= (u16)(IEEE80211_HT_CAP_SM_PS & |
| 406 | (WLAN_HT_CAP_SM_PS_DISABLED << 2)); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 407 | |
Ron Rindjunsky | 39130df | 2008-05-15 13:53:56 +0800 | [diff] [blame] | 408 | max_bit_rate = MAX_BIT_RATE_20_MHZ; |
| 409 | if (priv->hw_params.fat_channel & BIT(band)) { |
| 410 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH; |
| 411 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40; |
| 412 | ht_info->supp_mcs_set[4] = 0x01; |
| 413 | max_bit_rate = MAX_BIT_RATE_40_MHZ; |
| 414 | } |
| 415 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 416 | if (priv->cfg->mod_params->amsdu_size_8K) |
| 417 | ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU; |
| 418 | |
| 419 | ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; |
| 420 | ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; |
| 421 | |
| 422 | ht_info->supp_mcs_set[0] = 0xFF; |
Ron Rindjunsky | 39130df | 2008-05-15 13:53:56 +0800 | [diff] [blame] | 423 | if (rx_chains_num >= 2) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 424 | ht_info->supp_mcs_set[1] = 0xFF; |
Ron Rindjunsky | 39130df | 2008-05-15 13:53:56 +0800 | [diff] [blame] | 425 | if (rx_chains_num >= 3) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 426 | ht_info->supp_mcs_set[2] = 0xFF; |
Ron Rindjunsky | 39130df | 2008-05-15 13:53:56 +0800 | [diff] [blame] | 427 | |
| 428 | /* Highest supported Rx data rate */ |
| 429 | max_bit_rate *= rx_chains_num; |
| 430 | ht_info->supp_mcs_set[10] = (u8)(max_bit_rate & 0x00FF); |
| 431 | ht_info->supp_mcs_set[11] = (u8)((max_bit_rate & 0xFF00) >> 8); |
| 432 | |
| 433 | /* Tx MCS capabilities */ |
| 434 | ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED; |
| 435 | if (tx_chains_num != rx_chains_num) { |
| 436 | ht_info->supp_mcs_set[12] |= IEEE80211_HT_CAP_MCS_TX_RX_DIFF; |
| 437 | ht_info->supp_mcs_set[12] |= ((tx_chains_num - 1) << 2); |
| 438 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 439 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 440 | |
| 441 | static void iwlcore_init_hw_rates(struct iwl_priv *priv, |
| 442 | struct ieee80211_rate *rates) |
| 443 | { |
| 444 | int i; |
| 445 | |
| 446 | for (i = 0; i < IWL_RATE_COUNT; i++) { |
Tomas Winkler | 1826dcc | 2008-05-15 13:54:02 +0800 | [diff] [blame] | 447 | rates[i].bitrate = iwl_rates[i].ieee * 5; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 448 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ |
| 449 | rates[i].hw_value_short = i; |
| 450 | rates[i].flags = 0; |
| 451 | if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { |
| 452 | /* |
| 453 | * If CCK != 1M then set short preamble rate flag. |
| 454 | */ |
| 455 | rates[i].flags |= |
Tomas Winkler | 1826dcc | 2008-05-15 13:54:02 +0800 | [diff] [blame] | 456 | (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 457 | 0 : IEEE80211_RATE_SHORT_PREAMBLE; |
| 458 | } |
| 459 | } |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom |
| 464 | */ |
| 465 | static int iwlcore_init_geos(struct iwl_priv *priv) |
| 466 | { |
| 467 | struct iwl_channel_info *ch; |
| 468 | struct ieee80211_supported_band *sband; |
| 469 | struct ieee80211_channel *channels; |
| 470 | struct ieee80211_channel *geo_ch; |
| 471 | struct ieee80211_rate *rates; |
| 472 | int i = 0; |
| 473 | |
| 474 | if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || |
| 475 | priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { |
| 476 | IWL_DEBUG_INFO("Geography modes already initialized.\n"); |
| 477 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); |
| 478 | return 0; |
| 479 | } |
| 480 | |
| 481 | channels = kzalloc(sizeof(struct ieee80211_channel) * |
| 482 | priv->channel_count, GFP_KERNEL); |
| 483 | if (!channels) |
| 484 | return -ENOMEM; |
| 485 | |
| 486 | rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)), |
| 487 | GFP_KERNEL); |
| 488 | if (!rates) { |
| 489 | kfree(channels); |
| 490 | return -ENOMEM; |
| 491 | } |
| 492 | |
| 493 | /* 5.2GHz channels start after the 2.4GHz channels */ |
| 494 | sband = &priv->bands[IEEE80211_BAND_5GHZ]; |
| 495 | sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; |
| 496 | /* just OFDM */ |
| 497 | sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; |
| 498 | sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE; |
| 499 | |
Ron Rindjunsky | 4977929 | 2008-06-30 17:23:21 +0800 | [diff] [blame] | 500 | if (priv->cfg->sku & IWL_SKU_N) |
| 501 | iwlcore_init_ht_hw_capab(priv, &sband->ht_info, |
| 502 | IEEE80211_BAND_5GHZ); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 503 | |
| 504 | sband = &priv->bands[IEEE80211_BAND_2GHZ]; |
| 505 | sband->channels = channels; |
| 506 | /* OFDM & CCK */ |
| 507 | sband->bitrates = rates; |
| 508 | sband->n_bitrates = IWL_RATE_COUNT; |
| 509 | |
Ron Rindjunsky | 4977929 | 2008-06-30 17:23:21 +0800 | [diff] [blame] | 510 | if (priv->cfg->sku & IWL_SKU_N) |
| 511 | iwlcore_init_ht_hw_capab(priv, &sband->ht_info, |
| 512 | IEEE80211_BAND_2GHZ); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 513 | |
| 514 | priv->ieee_channels = channels; |
| 515 | priv->ieee_rates = rates; |
| 516 | |
| 517 | iwlcore_init_hw_rates(priv, rates); |
| 518 | |
| 519 | for (i = 0; i < priv->channel_count; i++) { |
| 520 | ch = &priv->channel_info[i]; |
| 521 | |
| 522 | /* FIXME: might be removed if scan is OK */ |
| 523 | if (!is_channel_valid(ch)) |
| 524 | continue; |
| 525 | |
| 526 | if (is_channel_a_band(ch)) |
| 527 | sband = &priv->bands[IEEE80211_BAND_5GHZ]; |
| 528 | else |
| 529 | sband = &priv->bands[IEEE80211_BAND_2GHZ]; |
| 530 | |
| 531 | geo_ch = &sband->channels[sband->n_channels++]; |
| 532 | |
| 533 | geo_ch->center_freq = |
| 534 | ieee80211_channel_to_frequency(ch->channel); |
| 535 | geo_ch->max_power = ch->max_power_avg; |
| 536 | geo_ch->max_antenna_gain = 0xff; |
| 537 | geo_ch->hw_value = ch->channel; |
| 538 | |
| 539 | if (is_channel_valid(ch)) { |
| 540 | if (!(ch->flags & EEPROM_CHANNEL_IBSS)) |
| 541 | geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; |
| 542 | |
| 543 | if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) |
| 544 | geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; |
| 545 | |
| 546 | if (ch->flags & EEPROM_CHANNEL_RADAR) |
| 547 | geo_ch->flags |= IEEE80211_CHAN_RADAR; |
| 548 | |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 549 | geo_ch->flags |= ch->fat_extension_channel; |
Emmanuel Grumbach | 4d38c2e | 2008-05-29 16:35:24 +0800 | [diff] [blame] | 550 | |
Tomas Winkler | 630fe9b | 2008-06-12 09:47:08 +0800 | [diff] [blame] | 551 | if (ch->max_power_avg > priv->tx_power_channel_lmt) |
| 552 | priv->tx_power_channel_lmt = ch->max_power_avg; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 553 | } else { |
| 554 | geo_ch->flags |= IEEE80211_CHAN_DISABLED; |
| 555 | } |
| 556 | |
| 557 | /* Save flags for reg domain usage */ |
| 558 | geo_ch->orig_flags = geo_ch->flags; |
| 559 | |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 560 | IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 561 | ch->channel, geo_ch->center_freq, |
| 562 | is_channel_a_band(ch) ? "5.2" : "2.4", |
| 563 | geo_ch->flags & IEEE80211_CHAN_DISABLED ? |
| 564 | "restricted" : "valid", |
| 565 | geo_ch->flags); |
| 566 | } |
| 567 | |
| 568 | if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && |
| 569 | priv->cfg->sku & IWL_SKU_A) { |
| 570 | printk(KERN_INFO DRV_NAME |
| 571 | ": Incorrectly detected BG card as ABG. Please send " |
| 572 | "your PCI ID 0x%04X:0x%04X to maintainer.\n", |
| 573 | priv->pci_dev->device, priv->pci_dev->subsystem_device); |
| 574 | priv->cfg->sku &= ~IWL_SKU_A; |
| 575 | } |
| 576 | |
| 577 | printk(KERN_INFO DRV_NAME |
| 578 | ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", |
| 579 | priv->bands[IEEE80211_BAND_2GHZ].n_channels, |
| 580 | priv->bands[IEEE80211_BAND_5GHZ].n_channels); |
| 581 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 582 | |
| 583 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); |
| 584 | |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | /* |
| 589 | * iwlcore_free_geos - undo allocations in iwlcore_init_geos |
| 590 | */ |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 591 | static void iwlcore_free_geos(struct iwl_priv *priv) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 592 | { |
| 593 | kfree(priv->ieee_channels); |
| 594 | kfree(priv->ieee_rates); |
| 595 | clear_bit(STATUS_GEO_CONFIGURED, &priv->status); |
| 596 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 597 | |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 598 | static bool is_single_rx_stream(struct iwl_priv *priv) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 599 | { |
| 600 | return !priv->current_ht_config.is_ht || |
| 601 | ((priv->current_ht_config.supp_mcs_set[1] == 0) && |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 602 | (priv->current_ht_config.supp_mcs_set[2] == 0)); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 603 | } |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 604 | |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 605 | static u8 iwl_is_channel_extension(struct iwl_priv *priv, |
| 606 | enum ieee80211_band band, |
| 607 | u16 channel, u8 extension_chan_offset) |
| 608 | { |
| 609 | const struct iwl_channel_info *ch_info; |
| 610 | |
| 611 | ch_info = iwl_get_channel_info(priv, band, channel); |
| 612 | if (!is_channel_valid(ch_info)) |
| 613 | return 0; |
| 614 | |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 615 | if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) |
| 616 | return !(ch_info->fat_extension_channel & |
| 617 | IEEE80211_CHAN_NO_FAT_ABOVE); |
| 618 | else if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) |
| 619 | return !(ch_info->fat_extension_channel & |
| 620 | IEEE80211_CHAN_NO_FAT_BELOW); |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 621 | |
| 622 | return 0; |
| 623 | } |
| 624 | |
| 625 | u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, |
| 626 | struct ieee80211_ht_info *sta_ht_inf) |
| 627 | { |
| 628 | struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; |
| 629 | |
| 630 | if ((!iwl_ht_conf->is_ht) || |
| 631 | (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) || |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 632 | (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE)) |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 633 | return 0; |
| 634 | |
| 635 | if (sta_ht_inf) { |
| 636 | if ((!sta_ht_inf->ht_supported) || |
| 637 | (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH))) |
| 638 | return 0; |
| 639 | } |
| 640 | |
| 641 | return iwl_is_channel_extension(priv, priv->band, |
| 642 | iwl_ht_conf->control_channel, |
| 643 | iwl_ht_conf->extension_chan_offset); |
| 644 | } |
| 645 | EXPORT_SYMBOL(iwl_is_fat_tx_allowed); |
| 646 | |
| 647 | void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) |
| 648 | { |
Gregory Greenman | c1adf9f | 2008-05-15 13:53:59 +0800 | [diff] [blame] | 649 | struct iwl_rxon_cmd *rxon = &priv->staging_rxon; |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 650 | u32 val; |
| 651 | |
Emmanuel Grumbach | 42eb7c6 | 2008-09-17 10:10:05 +0800 | [diff] [blame] | 652 | if (!ht_info->is_ht) { |
| 653 | rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | |
| 654 | RXON_FLG_CHANNEL_MODE_PURE_40_MSK | |
| 655 | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | |
| 656 | RXON_FLG_FAT_PROT_MSK | |
| 657 | RXON_FLG_HT_PROT_MSK); |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 658 | return; |
Emmanuel Grumbach | 42eb7c6 | 2008-09-17 10:10:05 +0800 | [diff] [blame] | 659 | } |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 660 | |
| 661 | /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */ |
| 662 | if (iwl_is_fat_tx_allowed(priv, NULL)) |
| 663 | rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK; |
| 664 | else |
| 665 | rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | |
| 666 | RXON_FLG_CHANNEL_MODE_PURE_40_MSK); |
| 667 | |
| 668 | if (le16_to_cpu(rxon->channel) != ht_info->control_channel) { |
| 669 | IWL_DEBUG_ASSOC("control diff than current %d %d\n", |
| 670 | le16_to_cpu(rxon->channel), |
| 671 | ht_info->control_channel); |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 672 | return; |
| 673 | } |
| 674 | |
| 675 | /* Note: control channel is opposite of extension channel */ |
| 676 | switch (ht_info->extension_chan_offset) { |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 677 | case IEEE80211_HT_IE_CHA_SEC_ABOVE: |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 678 | rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); |
| 679 | break; |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 680 | case IEEE80211_HT_IE_CHA_SEC_BELOW: |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 681 | rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; |
| 682 | break; |
Emmanuel Grumbach | 963f551 | 2008-06-12 09:47:00 +0800 | [diff] [blame] | 683 | case IEEE80211_HT_IE_CHA_SEC_NONE: |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 684 | default: |
| 685 | rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; |
| 686 | break; |
| 687 | } |
| 688 | |
| 689 | val = ht_info->ht_protection; |
| 690 | |
| 691 | rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS); |
| 692 | |
| 693 | iwl_set_rxon_chain(priv); |
| 694 | |
| 695 | IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X " |
| 696 | "rxon flags 0x%X operation mode :0x%X " |
| 697 | "extension channel offset 0x%x " |
| 698 | "control chan %d\n", |
| 699 | ht_info->supp_mcs_set[0], |
| 700 | ht_info->supp_mcs_set[1], |
| 701 | ht_info->supp_mcs_set[2], |
| 702 | le32_to_cpu(rxon->flags), ht_info->ht_protection, |
| 703 | ht_info->extension_chan_offset, |
| 704 | ht_info->control_channel); |
| 705 | return; |
| 706 | } |
| 707 | EXPORT_SYMBOL(iwl_set_rxon_ht); |
| 708 | |
Tomas Winkler | 9e5e6c3 | 2008-09-16 14:01:04 +0800 | [diff] [blame] | 709 | #define IWL_NUM_RX_CHAINS_MULTIPLE 3 |
| 710 | #define IWL_NUM_RX_CHAINS_SINGLE 2 |
| 711 | #define IWL_NUM_IDLE_CHAINS_DUAL 2 |
| 712 | #define IWL_NUM_IDLE_CHAINS_SINGLE 1 |
| 713 | |
| 714 | /* Determine how many receiver/antenna chains to use. |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 715 | * More provides better reception via diversity. Fewer saves power. |
| 716 | * MIMO (dual stream) requires at least 2, but works better with 3. |
| 717 | * This does not determine *which* chains to use, just how many. |
| 718 | */ |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 719 | static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 720 | { |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 721 | bool is_single = is_single_rx_stream(priv); |
| 722 | bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 723 | |
| 724 | /* # of Rx chains to use when expecting MIMO. */ |
Ron Rindjunsky | 12837be | 2008-09-03 11:26:47 +0800 | [diff] [blame] | 725 | if (is_single || (!is_cam && (priv->current_ht_config.sm_ps == |
| 726 | WLAN_HT_CAP_SM_PS_STATIC))) |
Tomas Winkler | 9e5e6c3 | 2008-09-16 14:01:04 +0800 | [diff] [blame] | 727 | return IWL_NUM_RX_CHAINS_SINGLE; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 728 | else |
Tomas Winkler | 9e5e6c3 | 2008-09-16 14:01:04 +0800 | [diff] [blame] | 729 | return IWL_NUM_RX_CHAINS_MULTIPLE; |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 730 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 731 | |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 732 | static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) |
| 733 | { |
| 734 | int idle_cnt; |
| 735 | bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 736 | /* # Rx chains when idling and maybe trying to save power */ |
Ron Rindjunsky | 12837be | 2008-09-03 11:26:47 +0800 | [diff] [blame] | 737 | switch (priv->current_ht_config.sm_ps) { |
Tomas Winkler | 00c5ae2 | 2008-09-03 11:26:42 +0800 | [diff] [blame] | 738 | case WLAN_HT_CAP_SM_PS_STATIC: |
| 739 | case WLAN_HT_CAP_SM_PS_DYNAMIC: |
Tomas Winkler | 9e5e6c3 | 2008-09-16 14:01:04 +0800 | [diff] [blame] | 740 | idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL : |
| 741 | IWL_NUM_IDLE_CHAINS_SINGLE; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 742 | break; |
Tomas Winkler | 00c5ae2 | 2008-09-03 11:26:42 +0800 | [diff] [blame] | 743 | case WLAN_HT_CAP_SM_PS_DISABLED: |
Tomas Winkler | 9e5e6c3 | 2008-09-16 14:01:04 +0800 | [diff] [blame] | 744 | idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 745 | break; |
Tomas Winkler | 00c5ae2 | 2008-09-03 11:26:42 +0800 | [diff] [blame] | 746 | case WLAN_HT_CAP_SM_PS_INVALID: |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 747 | default: |
Ron Rindjunsky | 12837be | 2008-09-03 11:26:47 +0800 | [diff] [blame] | 748 | IWL_ERROR("invalide mimo ps mode %d\n", |
| 749 | priv->current_ht_config.sm_ps); |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 750 | WARN_ON(1); |
| 751 | idle_cnt = -1; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 752 | break; |
| 753 | } |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 754 | return idle_cnt; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 755 | } |
| 756 | |
Grumbach, Emmanuel | 0481644 | 2008-09-03 11:26:53 +0800 | [diff] [blame] | 757 | /* up to 4 chains */ |
| 758 | static u8 iwl_count_chain_bitmap(u32 chain_bitmap) |
| 759 | { |
| 760 | u8 res; |
| 761 | res = (chain_bitmap & BIT(0)) >> 0; |
| 762 | res += (chain_bitmap & BIT(1)) >> 1; |
| 763 | res += (chain_bitmap & BIT(2)) >> 2; |
| 764 | res += (chain_bitmap & BIT(4)) >> 4; |
| 765 | return res; |
| 766 | } |
| 767 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 768 | /** |
| 769 | * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image |
| 770 | * |
| 771 | * Selects how many and which Rx receivers/antennas/chains to use. |
| 772 | * This should not be used for scan command ... it puts data in wrong place. |
| 773 | */ |
| 774 | void iwl_set_rxon_chain(struct iwl_priv *priv) |
| 775 | { |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 776 | bool is_single = is_single_rx_stream(priv); |
| 777 | bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); |
Grumbach, Emmanuel | 0481644 | 2008-09-03 11:26:53 +0800 | [diff] [blame] | 778 | u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; |
| 779 | u32 active_chains; |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 780 | u16 rx_chain; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 781 | |
| 782 | /* Tell uCode which antennas are actually connected. |
| 783 | * Before first association, we assume all antennas are connected. |
| 784 | * Just after first association, iwl_chain_noise_calibration() |
| 785 | * checks which antennas actually *are* connected. */ |
Grumbach, Emmanuel | 0481644 | 2008-09-03 11:26:53 +0800 | [diff] [blame] | 786 | if (priv->chain_noise_data.active_chains) |
| 787 | active_chains = priv->chain_noise_data.active_chains; |
| 788 | else |
| 789 | active_chains = priv->hw_params.valid_rx_ant; |
| 790 | |
| 791 | rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 792 | |
| 793 | /* How many receivers should we use? */ |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 794 | active_rx_cnt = iwl_get_active_rx_chain_count(priv); |
| 795 | idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 796 | |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 797 | |
Grumbach, Emmanuel | 0481644 | 2008-09-03 11:26:53 +0800 | [diff] [blame] | 798 | /* correct rx chain count according hw settings |
| 799 | * and chain noise calibration |
| 800 | */ |
| 801 | valid_rx_cnt = iwl_count_chain_bitmap(active_chains); |
| 802 | if (valid_rx_cnt < active_rx_cnt) |
| 803 | active_rx_cnt = valid_rx_cnt; |
| 804 | |
| 805 | if (valid_rx_cnt < idle_rx_cnt) |
| 806 | idle_rx_cnt = valid_rx_cnt; |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 807 | |
| 808 | rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; |
| 809 | rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; |
| 810 | |
| 811 | priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); |
| 812 | |
Tomas Winkler | 9e5e6c3 | 2008-09-16 14:01:04 +0800 | [diff] [blame] | 813 | if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 814 | priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; |
| 815 | else |
| 816 | priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; |
| 817 | |
Emmanuel Grumbach | a33c2f4 | 2008-09-03 11:26:56 +0800 | [diff] [blame] | 818 | IWL_DEBUG_ASSOC("rx_chain=0x%X active=%d idle=%d\n", |
Tomas Winkler | 28a6b07 | 2008-09-03 11:18:47 +0800 | [diff] [blame] | 819 | priv->staging_rxon.rx_chain, |
| 820 | active_rx_cnt, idle_rx_cnt); |
| 821 | |
| 822 | WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || |
| 823 | active_rx_cnt < idle_rx_cnt); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 824 | } |
| 825 | EXPORT_SYMBOL(iwl_set_rxon_chain); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 826 | |
| 827 | /** |
Tomas Winkler | 17e7278 | 2008-09-03 11:26:26 +0800 | [diff] [blame] | 828 | * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 829 | * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz |
| 830 | * @channel: Any channel valid for the requested phymode |
| 831 | |
| 832 | * In addition to setting the staging RXON, priv->phymode is also set. |
| 833 | * |
| 834 | * NOTE: Does not commit to the hardware; it sets appropriate bit fields |
| 835 | * in the staging RXON flag structure based on the phymode |
| 836 | */ |
Tomas Winkler | 17e7278 | 2008-09-03 11:26:26 +0800 | [diff] [blame] | 837 | int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch) |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 838 | { |
Tomas Winkler | 17e7278 | 2008-09-03 11:26:26 +0800 | [diff] [blame] | 839 | enum ieee80211_band band = ch->band; |
| 840 | u16 channel = ieee80211_frequency_to_channel(ch->center_freq); |
| 841 | |
Assaf Krauss | 8622e70 | 2008-03-21 13:53:43 -0700 | [diff] [blame] | 842 | if (!iwl_get_channel_info(priv, band, channel)) { |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 843 | IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", |
| 844 | channel, band); |
| 845 | return -EINVAL; |
| 846 | } |
| 847 | |
| 848 | if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && |
| 849 | (priv->band == band)) |
| 850 | return 0; |
| 851 | |
| 852 | priv->staging_rxon.channel = cpu_to_le16(channel); |
| 853 | if (band == IEEE80211_BAND_5GHZ) |
| 854 | priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; |
| 855 | else |
| 856 | priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; |
| 857 | |
| 858 | priv->band = band; |
| 859 | |
| 860 | IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band); |
| 861 | |
| 862 | return 0; |
| 863 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 864 | EXPORT_SYMBOL(iwl_set_rxon_channel); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 865 | |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 866 | int iwl_setup_mac(struct iwl_priv *priv) |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 867 | { |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 868 | int ret; |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 869 | struct ieee80211_hw *hw = priv->hw; |
Tomas Winkler | e227cea | 2008-07-18 13:53:05 +0800 | [diff] [blame] | 870 | hw->rate_control_algorithm = "iwl-agn-rs"; |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 871 | |
Bruno Randolf | 566bfe5 | 2008-05-08 19:15:40 +0200 | [diff] [blame] | 872 | /* Tell mac80211 our characteristics */ |
Johannes Berg | 605a0bd | 2008-07-15 10:10:01 +0200 | [diff] [blame] | 873 | hw->flags = IEEE80211_HW_SIGNAL_DBM | |
Bruno Randolf | 566bfe5 | 2008-05-08 19:15:40 +0200 | [diff] [blame] | 874 | IEEE80211_HW_NOISE_DBM; |
Luis R. Rodriguez | f59ac04 | 2008-08-29 16:26:43 -0700 | [diff] [blame] | 875 | hw->wiphy->interface_modes = |
| 876 | BIT(NL80211_IFTYPE_AP) | |
| 877 | BIT(NL80211_IFTYPE_STATION) | |
| 878 | BIT(NL80211_IFTYPE_ADHOC); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 879 | /* Default value; 4 EDCA QOS priorities */ |
| 880 | hw->queues = 4; |
Ron Rindjunsky | 4977929 | 2008-06-30 17:23:21 +0800 | [diff] [blame] | 881 | /* queues to support 11n aggregation */ |
| 882 | if (priv->cfg->sku & IWL_SKU_N) |
Tomas Winkler | 9f17b31 | 2008-07-11 11:53:35 +0800 | [diff] [blame] | 883 | hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues; |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 884 | |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 885 | hw->conf.beacon_int = 100; |
Tomas Winkler | b5d7be5 | 2008-07-19 04:41:24 +0300 | [diff] [blame] | 886 | hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 887 | |
| 888 | if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) |
| 889 | priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = |
| 890 | &priv->bands[IEEE80211_BAND_2GHZ]; |
| 891 | if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) |
| 892 | priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = |
| 893 | &priv->bands[IEEE80211_BAND_5GHZ]; |
| 894 | |
| 895 | ret = ieee80211_register_hw(priv->hw); |
| 896 | if (ret) { |
| 897 | IWL_ERROR("Failed to register hw (error %d)\n", ret); |
| 898 | return ret; |
| 899 | } |
| 900 | priv->mac80211_registered = 1; |
| 901 | |
| 902 | return 0; |
| 903 | } |
| 904 | EXPORT_SYMBOL(iwl_setup_mac); |
| 905 | |
Ron Rindjunsky | da154e30 | 2008-06-30 17:23:20 +0800 | [diff] [blame] | 906 | int iwl_set_hw_params(struct iwl_priv *priv) |
| 907 | { |
| 908 | priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto; |
| 909 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; |
| 910 | priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; |
| 911 | if (priv->cfg->mod_params->amsdu_size_8K) |
| 912 | priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K; |
| 913 | else |
| 914 | priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K; |
| 915 | priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256; |
| 916 | |
Ron Rindjunsky | 4977929 | 2008-06-30 17:23:21 +0800 | [diff] [blame] | 917 | if (priv->cfg->mod_params->disable_11n) |
| 918 | priv->cfg->sku &= ~IWL_SKU_N; |
| 919 | |
Ron Rindjunsky | da154e30 | 2008-06-30 17:23:20 +0800 | [diff] [blame] | 920 | /* Device-specific setup */ |
| 921 | return priv->cfg->ops->lib->set_hw_params(priv); |
| 922 | } |
| 923 | EXPORT_SYMBOL(iwl_set_hw_params); |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 924 | |
| 925 | int iwl_init_drv(struct iwl_priv *priv) |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 926 | { |
| 927 | int ret; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 928 | |
| 929 | priv->retry_rate = 1; |
| 930 | priv->ibss_beacon = NULL; |
| 931 | |
| 932 | spin_lock_init(&priv->lock); |
| 933 | spin_lock_init(&priv->power_data.lock); |
| 934 | spin_lock_init(&priv->sta_lock); |
| 935 | spin_lock_init(&priv->hcmd_lock); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 936 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 937 | INIT_LIST_HEAD(&priv->free_frames); |
| 938 | |
| 939 | mutex_init(&priv->mutex); |
| 940 | |
| 941 | /* Clear the driver's (not device's) station table */ |
Emmanuel Grumbach | 37deb2a | 2008-06-30 17:23:08 +0800 | [diff] [blame] | 942 | iwl_clear_stations_table(priv); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 943 | |
| 944 | priv->data_retry_limit = -1; |
| 945 | priv->ieee_channels = NULL; |
| 946 | priv->ieee_rates = NULL; |
| 947 | priv->band = IEEE80211_BAND_2GHZ; |
| 948 | |
Johannes Berg | 05c914f | 2008-09-11 00:01:58 +0200 | [diff] [blame] | 949 | priv->iw_mode = NL80211_IFTYPE_STATION; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 950 | |
| 951 | priv->use_ant_b_for_management_frame = 1; /* start with ant B */ |
Ron Rindjunsky | 12837be | 2008-09-03 11:26:47 +0800 | [diff] [blame] | 952 | priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 953 | |
| 954 | /* Choose which receivers/antennas to use */ |
| 955 | iwl_set_rxon_chain(priv); |
Tomas Winkler | f53696d | 2008-06-12 09:47:12 +0800 | [diff] [blame] | 956 | iwl_init_scan_params(priv); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 957 | |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 958 | if (priv->cfg->mod_params->enable_qos) |
| 959 | priv->qos_data.qos_enable = 1; |
| 960 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 961 | iwl_reset_qos(priv); |
| 962 | |
| 963 | priv->qos_data.qos_active = 0; |
| 964 | priv->qos_data.qos_cap.val = 0; |
| 965 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 966 | priv->rates_mask = IWL_RATES_MASK; |
| 967 | /* If power management is turned on, default to AC mode */ |
| 968 | priv->power_mode = IWL_POWER_AC; |
Tomas Winkler | 630fe9b | 2008-06-12 09:47:08 +0800 | [diff] [blame] | 969 | priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MAX; |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 970 | |
| 971 | ret = iwl_init_channel_map(priv); |
| 972 | if (ret) { |
| 973 | IWL_ERROR("initializing regulatory failed: %d\n", ret); |
| 974 | goto err; |
| 975 | } |
| 976 | |
| 977 | ret = iwlcore_init_geos(priv); |
| 978 | if (ret) { |
| 979 | IWL_ERROR("initializing geos failed: %d\n", ret); |
| 980 | goto err_free_channel_map; |
| 981 | } |
| 982 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 983 | return 0; |
| 984 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 985 | err_free_channel_map: |
| 986 | iwl_free_channel_map(priv); |
| 987 | err: |
| 988 | return ret; |
| 989 | } |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 990 | EXPORT_SYMBOL(iwl_init_drv); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 991 | |
Tomas Winkler | 630fe9b | 2008-06-12 09:47:08 +0800 | [diff] [blame] | 992 | int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) |
| 993 | { |
| 994 | int ret = 0; |
| 995 | if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) { |
| 996 | IWL_WARNING("Requested user TXPOWER %d below limit.\n", |
| 997 | priv->tx_power_user_lmt); |
| 998 | return -EINVAL; |
| 999 | } |
| 1000 | |
| 1001 | if (tx_power > IWL_TX_POWER_TARGET_POWER_MAX) { |
| 1002 | IWL_WARNING("Requested user TXPOWER %d above limit.\n", |
| 1003 | priv->tx_power_user_lmt); |
| 1004 | return -EINVAL; |
| 1005 | } |
| 1006 | |
| 1007 | if (priv->tx_power_user_lmt != tx_power) |
| 1008 | force = true; |
| 1009 | |
| 1010 | priv->tx_power_user_lmt = tx_power; |
| 1011 | |
| 1012 | if (force && priv->cfg->ops->lib->send_tx_power) |
| 1013 | ret = priv->cfg->ops->lib->send_tx_power(priv); |
| 1014 | |
| 1015 | return ret; |
| 1016 | } |
| 1017 | EXPORT_SYMBOL(iwl_set_tx_power); |
| 1018 | |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 1019 | void iwl_uninit_drv(struct iwl_priv *priv) |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 1020 | { |
Tomas Winkler | 6e21f2c | 2008-09-03 11:26:37 +0800 | [diff] [blame] | 1021 | iwl_calib_free_results(priv); |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 1022 | iwlcore_free_geos(priv); |
| 1023 | iwl_free_channel_map(priv); |
Emmanuel Grumbach | 261415f | 2008-05-29 16:35:25 +0800 | [diff] [blame] | 1024 | kfree(priv->scan); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 1025 | } |
Tomas Winkler | 6ba8795 | 2008-05-15 13:54:17 +0800 | [diff] [blame] | 1026 | EXPORT_SYMBOL(iwl_uninit_drv); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 1027 | |
Emmanuel Grumbach | 49ea859 | 2008-04-15 16:01:37 -0700 | [diff] [blame] | 1028 | int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags) |
| 1029 | { |
| 1030 | u32 stat_flags = 0; |
| 1031 | struct iwl_host_cmd cmd = { |
| 1032 | .id = REPLY_STATISTICS_CMD, |
| 1033 | .meta.flags = flags, |
| 1034 | .len = sizeof(stat_flags), |
| 1035 | .data = (u8 *) &stat_flags, |
| 1036 | }; |
| 1037 | return iwl_send_cmd(priv, &cmd); |
| 1038 | } |
| 1039 | EXPORT_SYMBOL(iwl_send_statistics_request); |
Tomas Winkler | 7e8c519 | 2008-04-15 16:01:43 -0700 | [diff] [blame] | 1040 | |
Emmanuel Grumbach | b0692f2 | 2008-04-24 11:55:18 -0700 | [diff] [blame] | 1041 | /** |
| 1042 | * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, |
| 1043 | * using sample data 100 bytes apart. If these sample points are good, |
| 1044 | * it's a pretty good bet that everything between them is good, too. |
| 1045 | */ |
| 1046 | static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) |
| 1047 | { |
| 1048 | u32 val; |
| 1049 | int ret = 0; |
| 1050 | u32 errcnt = 0; |
| 1051 | u32 i; |
| 1052 | |
| 1053 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); |
| 1054 | |
| 1055 | ret = iwl_grab_nic_access(priv); |
| 1056 | if (ret) |
| 1057 | return ret; |
| 1058 | |
| 1059 | for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { |
| 1060 | /* read data comes through single port, auto-incr addr */ |
| 1061 | /* NOTE: Use the debugless read so we don't flood kernel log |
| 1062 | * if IWL_DL_IO is set */ |
| 1063 | iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, |
| 1064 | i + RTC_INST_LOWER_BOUND); |
| 1065 | val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
| 1066 | if (val != le32_to_cpu(*image)) { |
| 1067 | ret = -EIO; |
| 1068 | errcnt++; |
| 1069 | if (errcnt >= 3) |
| 1070 | break; |
| 1071 | } |
| 1072 | } |
| 1073 | |
| 1074 | iwl_release_nic_access(priv); |
| 1075 | |
| 1076 | return ret; |
| 1077 | } |
| 1078 | |
| 1079 | /** |
| 1080 | * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host, |
| 1081 | * looking at all data. |
| 1082 | */ |
| 1083 | static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image, |
| 1084 | u32 len) |
| 1085 | { |
| 1086 | u32 val; |
| 1087 | u32 save_len = len; |
| 1088 | int ret = 0; |
| 1089 | u32 errcnt; |
| 1090 | |
| 1091 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); |
| 1092 | |
| 1093 | ret = iwl_grab_nic_access(priv); |
| 1094 | if (ret) |
| 1095 | return ret; |
| 1096 | |
| 1097 | iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); |
| 1098 | |
| 1099 | errcnt = 0; |
| 1100 | for (; len > 0; len -= sizeof(u32), image++) { |
| 1101 | /* read data comes through single port, auto-incr addr */ |
| 1102 | /* NOTE: Use the debugless read so we don't flood kernel log |
| 1103 | * if IWL_DL_IO is set */ |
| 1104 | val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
| 1105 | if (val != le32_to_cpu(*image)) { |
| 1106 | IWL_ERROR("uCode INST section is invalid at " |
| 1107 | "offset 0x%x, is 0x%x, s/b 0x%x\n", |
| 1108 | save_len - len, val, le32_to_cpu(*image)); |
| 1109 | ret = -EIO; |
| 1110 | errcnt++; |
| 1111 | if (errcnt >= 20) |
| 1112 | break; |
| 1113 | } |
| 1114 | } |
| 1115 | |
| 1116 | iwl_release_nic_access(priv); |
| 1117 | |
| 1118 | if (!errcnt) |
| 1119 | IWL_DEBUG_INFO |
| 1120 | ("ucode image in INSTRUCTION memory is good\n"); |
| 1121 | |
| 1122 | return ret; |
| 1123 | } |
| 1124 | |
| 1125 | /** |
| 1126 | * iwl_verify_ucode - determine which instruction image is in SRAM, |
| 1127 | * and verify its contents |
| 1128 | */ |
| 1129 | int iwl_verify_ucode(struct iwl_priv *priv) |
| 1130 | { |
| 1131 | __le32 *image; |
| 1132 | u32 len; |
| 1133 | int ret; |
| 1134 | |
| 1135 | /* Try bootstrap */ |
| 1136 | image = (__le32 *)priv->ucode_boot.v_addr; |
| 1137 | len = priv->ucode_boot.len; |
| 1138 | ret = iwlcore_verify_inst_sparse(priv, image, len); |
| 1139 | if (!ret) { |
| 1140 | IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); |
| 1141 | return 0; |
| 1142 | } |
| 1143 | |
| 1144 | /* Try initialize */ |
| 1145 | image = (__le32 *)priv->ucode_init.v_addr; |
| 1146 | len = priv->ucode_init.len; |
| 1147 | ret = iwlcore_verify_inst_sparse(priv, image, len); |
| 1148 | if (!ret) { |
| 1149 | IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); |
| 1150 | return 0; |
| 1151 | } |
| 1152 | |
| 1153 | /* Try runtime/protocol */ |
| 1154 | image = (__le32 *)priv->ucode_code.v_addr; |
| 1155 | len = priv->ucode_code.len; |
| 1156 | ret = iwlcore_verify_inst_sparse(priv, image, len); |
| 1157 | if (!ret) { |
| 1158 | IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); |
| 1159 | return 0; |
| 1160 | } |
| 1161 | |
| 1162 | IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); |
| 1163 | |
| 1164 | /* Since nothing seems to match, show first several data entries in |
| 1165 | * instruction SRAM, so maybe visual inspection will give a clue. |
| 1166 | * Selection of bootstrap image (vs. other images) is arbitrary. */ |
| 1167 | image = (__le32 *)priv->ucode_boot.v_addr; |
| 1168 | len = priv->ucode_boot.len; |
| 1169 | ret = iwl_verify_inst_full(priv, image, len); |
| 1170 | |
| 1171 | return ret; |
| 1172 | } |
| 1173 | EXPORT_SYMBOL(iwl_verify_ucode); |
| 1174 | |
Ester Kummer | ede0cba | 2008-05-29 16:34:46 +0800 | [diff] [blame] | 1175 | static const char *desc_lookup(int i) |
| 1176 | { |
| 1177 | switch (i) { |
| 1178 | case 1: |
| 1179 | return "FAIL"; |
| 1180 | case 2: |
| 1181 | return "BAD_PARAM"; |
| 1182 | case 3: |
| 1183 | return "BAD_CHECKSUM"; |
| 1184 | case 4: |
| 1185 | return "NMI_INTERRUPT"; |
| 1186 | case 5: |
| 1187 | return "SYSASSERT"; |
| 1188 | case 6: |
| 1189 | return "FATAL_ERROR"; |
| 1190 | } |
| 1191 | |
| 1192 | return "UNKNOWN"; |
| 1193 | } |
| 1194 | |
| 1195 | #define ERROR_START_OFFSET (1 * sizeof(u32)) |
| 1196 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) |
| 1197 | |
| 1198 | void iwl_dump_nic_error_log(struct iwl_priv *priv) |
| 1199 | { |
| 1200 | u32 data2, line; |
| 1201 | u32 desc, time, count, base, data1; |
| 1202 | u32 blink1, blink2, ilink1, ilink2; |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1203 | int ret; |
Ester Kummer | ede0cba | 2008-05-29 16:34:46 +0800 | [diff] [blame] | 1204 | |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1205 | if (priv->ucode_type == UCODE_INIT) |
| 1206 | base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); |
| 1207 | else |
| 1208 | base = le32_to_cpu(priv->card_alive.error_event_table_ptr); |
Ester Kummer | ede0cba | 2008-05-29 16:34:46 +0800 | [diff] [blame] | 1209 | |
| 1210 | if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { |
| 1211 | IWL_ERROR("Not valid error log pointer 0x%08X\n", base); |
| 1212 | return; |
| 1213 | } |
| 1214 | |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1215 | ret = iwl_grab_nic_access(priv); |
| 1216 | if (ret) { |
Ester Kummer | ede0cba | 2008-05-29 16:34:46 +0800 | [diff] [blame] | 1217 | IWL_WARNING("Can not read from adapter at this time.\n"); |
| 1218 | return; |
| 1219 | } |
| 1220 | |
| 1221 | count = iwl_read_targ_mem(priv, base); |
| 1222 | |
| 1223 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { |
| 1224 | IWL_ERROR("Start IWL Error Log Dump:\n"); |
| 1225 | IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count); |
| 1226 | } |
| 1227 | |
| 1228 | desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); |
| 1229 | blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); |
| 1230 | blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); |
| 1231 | ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); |
| 1232 | ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32)); |
| 1233 | data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32)); |
| 1234 | data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); |
| 1235 | line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); |
| 1236 | time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); |
| 1237 | |
| 1238 | IWL_ERROR("Desc Time " |
| 1239 | "data1 data2 line\n"); |
| 1240 | IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n", |
| 1241 | desc_lookup(desc), desc, time, data1, data2, line); |
| 1242 | IWL_ERROR("blink1 blink2 ilink1 ilink2\n"); |
| 1243 | IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, |
| 1244 | ilink1, ilink2); |
| 1245 | |
| 1246 | iwl_release_nic_access(priv); |
| 1247 | } |
| 1248 | EXPORT_SYMBOL(iwl_dump_nic_error_log); |
| 1249 | |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1250 | #define EVENT_START_OFFSET (4 * sizeof(u32)) |
| 1251 | |
| 1252 | /** |
| 1253 | * iwl_print_event_log - Dump error event log to syslog |
| 1254 | * |
Emmanuel Grumbach | a33c2f4 | 2008-09-03 11:26:56 +0800 | [diff] [blame] | 1255 | * NOTE: Must be called with iwl_grab_nic_access() already obtained! |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1256 | */ |
Emmanuel Grumbach | a33c2f4 | 2008-09-03 11:26:56 +0800 | [diff] [blame] | 1257 | static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1258 | u32 num_events, u32 mode) |
| 1259 | { |
| 1260 | u32 i; |
| 1261 | u32 base; /* SRAM byte address of event log header */ |
| 1262 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ |
| 1263 | u32 ptr; /* SRAM byte address of log data */ |
| 1264 | u32 ev, time, data; /* event log data */ |
| 1265 | |
| 1266 | if (num_events == 0) |
| 1267 | return; |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1268 | if (priv->ucode_type == UCODE_INIT) |
| 1269 | base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); |
| 1270 | else |
| 1271 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1272 | |
| 1273 | if (mode == 0) |
| 1274 | event_size = 2 * sizeof(u32); |
| 1275 | else |
| 1276 | event_size = 3 * sizeof(u32); |
| 1277 | |
| 1278 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); |
| 1279 | |
| 1280 | /* "time" is actually "data" for mode 0 (no timestamp). |
| 1281 | * place event id # at far right for easier visual parsing. */ |
| 1282 | for (i = 0; i < num_events; i++) { |
| 1283 | ev = iwl_read_targ_mem(priv, ptr); |
| 1284 | ptr += sizeof(u32); |
| 1285 | time = iwl_read_targ_mem(priv, ptr); |
| 1286 | ptr += sizeof(u32); |
Tomas Winkler | 77c5d08 | 2008-06-12 09:47:02 +0800 | [diff] [blame] | 1287 | if (mode == 0) { |
| 1288 | /* data, ev */ |
| 1289 | IWL_ERROR("EVT_LOG:0x%08x:%04u\n", time, ev); |
| 1290 | } else { |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1291 | data = iwl_read_targ_mem(priv, ptr); |
| 1292 | ptr += sizeof(u32); |
Tomas Winkler | 77c5d08 | 2008-06-12 09:47:02 +0800 | [diff] [blame] | 1293 | IWL_ERROR("EVT_LOGT:%010u:0x%08x:%04u\n", |
| 1294 | time, data, ev); |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1295 | } |
| 1296 | } |
| 1297 | } |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1298 | |
| 1299 | void iwl_dump_nic_event_log(struct iwl_priv *priv) |
| 1300 | { |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1301 | int ret; |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1302 | u32 base; /* SRAM byte address of event log header */ |
| 1303 | u32 capacity; /* event log capacity in # entries */ |
| 1304 | u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ |
| 1305 | u32 num_wraps; /* # times uCode wrapped to top of log */ |
| 1306 | u32 next_entry; /* index of next entry to be written by uCode */ |
| 1307 | u32 size; /* # entries that we'll print */ |
| 1308 | |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1309 | if (priv->ucode_type == UCODE_INIT) |
| 1310 | base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); |
| 1311 | else |
| 1312 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); |
| 1313 | |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1314 | if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { |
| 1315 | IWL_ERROR("Invalid event log pointer 0x%08X\n", base); |
| 1316 | return; |
| 1317 | } |
| 1318 | |
Gregory Greenman | e1dfc08 | 2008-05-29 16:34:48 +0800 | [diff] [blame] | 1319 | ret = iwl_grab_nic_access(priv); |
| 1320 | if (ret) { |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1321 | IWL_WARNING("Can not read from adapter at this time.\n"); |
| 1322 | return; |
| 1323 | } |
| 1324 | |
| 1325 | /* event log header */ |
| 1326 | capacity = iwl_read_targ_mem(priv, base); |
| 1327 | mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); |
| 1328 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); |
| 1329 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); |
| 1330 | |
| 1331 | size = num_wraps ? capacity : next_entry; |
| 1332 | |
| 1333 | /* bail out if nothing in log */ |
| 1334 | if (size == 0) { |
| 1335 | IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); |
| 1336 | iwl_release_nic_access(priv); |
| 1337 | return; |
| 1338 | } |
| 1339 | |
| 1340 | IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", |
| 1341 | size, num_wraps); |
| 1342 | |
| 1343 | /* if uCode has wrapped back to top of log, start at the oldest entry, |
| 1344 | * i.e the next one that uCode would fill. */ |
| 1345 | if (num_wraps) |
| 1346 | iwl_print_event_log(priv, next_entry, |
| 1347 | capacity - next_entry, mode); |
| 1348 | /* (then/else) start at top of log */ |
| 1349 | iwl_print_event_log(priv, 0, next_entry, mode); |
| 1350 | |
| 1351 | iwl_release_nic_access(priv); |
| 1352 | } |
| 1353 | EXPORT_SYMBOL(iwl_dump_nic_event_log); |
| 1354 | |
Emmanuel Grumbach | 47f4a58 | 2008-06-12 09:47:13 +0800 | [diff] [blame] | 1355 | void iwl_rf_kill_ct_config(struct iwl_priv *priv) |
| 1356 | { |
| 1357 | struct iwl_ct_kill_config cmd; |
| 1358 | unsigned long flags; |
| 1359 | int ret = 0; |
Ester Kummer | 189a2b5 | 2008-05-15 13:54:18 +0800 | [diff] [blame] | 1360 | |
Emmanuel Grumbach | 47f4a58 | 2008-06-12 09:47:13 +0800 | [diff] [blame] | 1361 | spin_lock_irqsave(&priv->lock, flags); |
| 1362 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, |
| 1363 | CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); |
| 1364 | spin_unlock_irqrestore(&priv->lock, flags); |
| 1365 | |
| 1366 | cmd.critical_temperature_R = |
| 1367 | cpu_to_le32(priv->hw_params.ct_kill_threshold); |
| 1368 | |
| 1369 | ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, |
| 1370 | sizeof(cmd), &cmd); |
| 1371 | if (ret) |
| 1372 | IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); |
| 1373 | else |
| 1374 | IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, " |
| 1375 | "critical temperature is %d\n", |
| 1376 | cmd.critical_temperature_R); |
| 1377 | } |
| 1378 | EXPORT_SYMBOL(iwl_rf_kill_ct_config); |
Emmanuel Grumbach | 14a08a7f | 2008-06-13 15:44:55 +0800 | [diff] [blame] | 1379 | |
| 1380 | /* |
| 1381 | * CARD_STATE_CMD |
| 1382 | * |
| 1383 | * Use: Sets the device's internal card state to enable, disable, or halt |
| 1384 | * |
| 1385 | * When in the 'enable' state the card operates as normal. |
| 1386 | * When in the 'disable' state, the card enters into a low power mode. |
| 1387 | * When in the 'halt' state, the card is shut down and must be fully |
| 1388 | * restarted to come back on. |
| 1389 | */ |
| 1390 | static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) |
| 1391 | { |
| 1392 | struct iwl_host_cmd cmd = { |
| 1393 | .id = REPLY_CARD_STATE_CMD, |
| 1394 | .len = sizeof(u32), |
| 1395 | .data = &flags, |
| 1396 | .meta.flags = meta_flag, |
| 1397 | }; |
| 1398 | |
| 1399 | return iwl_send_cmd(priv, &cmd); |
| 1400 | } |
| 1401 | |
| 1402 | void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv) |
| 1403 | { |
| 1404 | unsigned long flags; |
| 1405 | |
| 1406 | if (test_bit(STATUS_RF_KILL_SW, &priv->status)) |
| 1407 | return; |
| 1408 | |
| 1409 | IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO OFF\n"); |
| 1410 | |
| 1411 | iwl_scan_cancel(priv); |
| 1412 | /* FIXME: This is a workaround for AP */ |
Johannes Berg | 05c914f | 2008-09-11 00:01:58 +0200 | [diff] [blame] | 1413 | if (priv->iw_mode != NL80211_IFTYPE_AP) { |
Emmanuel Grumbach | 14a08a7f | 2008-06-13 15:44:55 +0800 | [diff] [blame] | 1414 | spin_lock_irqsave(&priv->lock, flags); |
| 1415 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, |
| 1416 | CSR_UCODE_SW_BIT_RFKILL); |
| 1417 | spin_unlock_irqrestore(&priv->lock, flags); |
| 1418 | /* call the host command only if no hw rf-kill set */ |
| 1419 | if (!test_bit(STATUS_RF_KILL_HW, &priv->status) && |
| 1420 | iwl_is_ready(priv)) |
| 1421 | iwl_send_card_state(priv, |
| 1422 | CARD_STATE_CMD_DISABLE, 0); |
| 1423 | set_bit(STATUS_RF_KILL_SW, &priv->status); |
| 1424 | /* make sure mac80211 stop sending Tx frame */ |
| 1425 | if (priv->mac80211_registered) |
| 1426 | ieee80211_stop_queues(priv->hw); |
| 1427 | } |
| 1428 | } |
| 1429 | EXPORT_SYMBOL(iwl_radio_kill_sw_disable_radio); |
| 1430 | |
| 1431 | int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv) |
| 1432 | { |
| 1433 | unsigned long flags; |
| 1434 | |
| 1435 | if (!test_bit(STATUS_RF_KILL_SW, &priv->status)) |
| 1436 | return 0; |
| 1437 | |
| 1438 | IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO ON\n"); |
| 1439 | |
| 1440 | spin_lock_irqsave(&priv->lock, flags); |
| 1441 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
| 1442 | |
Emmanuel Grumbach | a9efa65 | 2008-06-30 17:23:25 +0800 | [diff] [blame] | 1443 | /* If the driver is up it will receive CARD_STATE_NOTIFICATION |
| 1444 | * notification where it will clear SW rfkill status. |
| 1445 | * Setting it here would break the handler. Only if the |
| 1446 | * interface is down we can set here since we don't |
| 1447 | * receive any further notification. |
| 1448 | */ |
| 1449 | if (!priv->is_open) |
| 1450 | clear_bit(STATUS_RF_KILL_SW, &priv->status); |
Emmanuel Grumbach | 14a08a7f | 2008-06-13 15:44:55 +0800 | [diff] [blame] | 1451 | spin_unlock_irqrestore(&priv->lock, flags); |
| 1452 | |
| 1453 | /* wake up ucode */ |
| 1454 | msleep(10); |
| 1455 | |
| 1456 | spin_lock_irqsave(&priv->lock, flags); |
| 1457 | iwl_read32(priv, CSR_UCODE_DRV_GP1); |
| 1458 | if (!iwl_grab_nic_access(priv)) |
| 1459 | iwl_release_nic_access(priv); |
| 1460 | spin_unlock_irqrestore(&priv->lock, flags); |
| 1461 | |
| 1462 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { |
| 1463 | IWL_DEBUG_RF_KILL("Can not turn radio back on - " |
| 1464 | "disabled by HW switch\n"); |
| 1465 | return 0; |
| 1466 | } |
| 1467 | |
Emmanuel Grumbach | a9efa65 | 2008-06-30 17:23:25 +0800 | [diff] [blame] | 1468 | /* If the driver is already loaded, it will receive |
| 1469 | * CARD_STATE_NOTIFICATION notifications and the handler will |
| 1470 | * call restart to reload the driver. |
| 1471 | */ |
Emmanuel Grumbach | 14a08a7f | 2008-06-13 15:44:55 +0800 | [diff] [blame] | 1472 | return 1; |
| 1473 | } |
| 1474 | EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio); |