1
/******************************************************************************
3
* Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms of version 2 of the GNU General Public License as
7
* published by the Free Software Foundation.
9
* This program is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
* You should have received a copy of the GNU General Public License along with
15
* this program; if not, write to the Free Software Foundation, Inc.,
16
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18
* The full GNU General Public License is included in this distribution in the
19
* file called LICENSE.
21
* Contact Information:
22
* James P. Ketrenos <ipw2100-admin@linux.intel.com>
23
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
*****************************************************************************/
27
#include <linux/kernel.h>
28
#include <linux/module.h>
29
#include <linux/version.h>
30
#include <linux/init.h>
31
#include <linux/pci.h>
32
#include <linux/dma-mapping.h>
33
#include <linux/delay.h>
34
#include <linux/skbuff.h>
35
#include <linux/netdevice.h>
36
#include <linux/wireless.h>
37
#include <net/mac80211.h>
38
#include <linux/netdevice.h>
39
#include <linux/etherdevice.h>
40
#include <linux/delay.h>
43
#include "iwl-helpers.h"
45
static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv);
47
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
48
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
49
IWL_RATE_SISO_##s##M_PLCP, \
50
IWL_RATE_MIMO_##s##M_PLCP, \
51
IWL_RATE_##r##M_IEEE, \
52
IWL_RATE_##ip##M_INDEX, \
53
IWL_RATE_##in##M_INDEX, \
54
IWL_RATE_##rp##M_INDEX, \
55
IWL_RATE_##rn##M_INDEX, \
56
IWL_RATE_##pp##M_INDEX, \
57
IWL_RATE_##np##M_INDEX }
61
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
63
* If there isn't a valid next or previous rate then INV is used which
64
* maps to IWL_RATE_INVALID
67
const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
68
IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
69
IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
70
IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
71
IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
72
IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
73
IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
74
IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
75
IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
76
IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
77
IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
78
IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
79
IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
80
IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
83
static int is_fat_channel(__le32 rxon_flags)
85
return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
86
(rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
89
static u8 is_single_stream(struct iwl4965_priv *priv)
91
#ifdef CONFIG_IWL4965_HT
92
if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht ||
93
(priv->active_rate_ht[1] == 0) ||
94
(priv->ps_mode == IWL_MIMO_PS_STATIC))
98
#endif /*CONFIG_IWL4965_HT */
103
* Determine how many receiver/antenna chains to use.
104
* More provides better reception via diversity. Fewer saves power.
105
* MIMO (dual stream) requires at least 2, but works better with 3.
106
* This does not determine *which* chains to use, just how many.
108
static int iwl4965_get_rx_chain_counter(struct iwl4965_priv *priv,
109
u8 *idle_state, u8 *rx_state)
111
u8 is_single = is_single_stream(priv);
112
u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
114
/* # of Rx chains to use when expecting MIMO. */
115
if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
120
/* # Rx chains when idling and maybe trying to save power */
121
switch (priv->ps_mode) {
122
case IWL_MIMO_PS_STATIC:
123
case IWL_MIMO_PS_DYNAMIC:
124
*idle_state = (is_cam) ? 2 : 1;
126
case IWL_MIMO_PS_NONE:
127
*idle_state = (is_cam) ? *rx_state : 1;
137
int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv)
142
spin_lock_irqsave(&priv->lock, flags);
143
rc = iwl4965_grab_nic_access(priv);
145
spin_unlock_irqrestore(&priv->lock, flags);
150
iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
151
rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
154
IWL_ERROR("Can't stop Rx DMA.\n");
156
iwl4965_release_nic_access(priv);
157
spin_unlock_irqrestore(&priv->lock, flags);
162
u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr)
166
int ret = IWL_INVALID_STATION;
169
if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
170
(priv->iw_mode == IEEE80211_IF_TYPE_AP))
173
if (is_broadcast_ether_addr(addr))
174
return IWL4965_BROADCAST_ID;
176
spin_lock_irqsave(&priv->sta_lock, flags);
177
for (i = start; i < priv->hw_setting.max_stations; i++)
178
if ((priv->stations[i].used) &&
180
(priv->stations[i].sta.sta.addr, addr))) {
185
IWL_DEBUG_ASSOC_LIMIT("can not find STA " MAC_FMT " total %d\n",
186
MAC_ARG(addr), priv->num_stations);
189
spin_unlock_irqrestore(&priv->sta_lock, flags);
193
static int iwl4965_nic_set_pwr_src(struct iwl4965_priv *priv, int pwr_max)
198
spin_lock_irqsave(&priv->lock, flags);
199
ret = iwl4965_grab_nic_access(priv);
201
spin_unlock_irqrestore(&priv->lock, flags);
208
ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
211
if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
212
iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
213
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
214
~APMG_PS_CTRL_MSK_PWR_SRC);
216
iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
217
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
218
~APMG_PS_CTRL_MSK_PWR_SRC);
220
iwl4965_release_nic_access(priv);
221
spin_unlock_irqrestore(&priv->lock, flags);
226
static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
231
spin_lock_irqsave(&priv->lock, flags);
232
rc = iwl4965_grab_nic_access(priv);
234
spin_unlock_irqrestore(&priv->lock, flags);
239
iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
241
/* Reset driver's Rx queue write index */
242
iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
244
/* Tell device where to find RBD circular buffer in DRAM */
245
iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
248
/* Tell device where in DRAM to update its Rx status */
249
iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
250
(priv->hw_setting.shared_phys +
251
offsetof(struct iwl4965_shared, val0)) >> 4);
253
/* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
254
iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
255
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
256
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
257
IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
259
(RX_QUEUE_SIZE_LOG <<
260
FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
263
* iwl4965_write32(priv,CSR_INT_COAL_REG,0);
266
iwl4965_release_nic_access(priv);
267
spin_unlock_irqrestore(&priv->lock, flags);
272
/* Tell 4965 where to find the "keep warm" buffer */
273
static int iwl4965_kw_init(struct iwl4965_priv *priv)
278
spin_lock_irqsave(&priv->lock, flags);
279
rc = iwl4965_grab_nic_access(priv);
283
iwl4965_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
284
priv->kw.dma_addr >> 4);
285
iwl4965_release_nic_access(priv);
287
spin_unlock_irqrestore(&priv->lock, flags);
291
static int iwl4965_kw_alloc(struct iwl4965_priv *priv)
293
struct pci_dev *dev = priv->pci_dev;
294
struct iwl4965_kw *kw = &priv->kw;
296
kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
297
kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
304
#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
308
* iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
310
* Does not set up a command, or touch hardware.
312
int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv, int phymode, u16 channel,
313
const struct iwl4965_eeprom_channel *eeprom_ch,
314
u8 fat_extension_channel)
316
struct iwl4965_channel_info *ch_info;
318
ch_info = (struct iwl4965_channel_info *)
319
iwl4965_get_channel_info(priv, phymode, channel);
321
if (!is_channel_valid(ch_info))
324
IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
325
" %ddBm): Ad-Hoc %ssupported\n",
327
is_channel_a_band(ch_info) ?
329
CHECK_AND_PRINT(IBSS),
330
CHECK_AND_PRINT(ACTIVE),
331
CHECK_AND_PRINT(RADAR),
332
CHECK_AND_PRINT(WIDE),
333
CHECK_AND_PRINT(NARROW),
334
CHECK_AND_PRINT(DFS),
336
eeprom_ch->max_power_avg,
337
((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
338
&& !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
341
ch_info->fat_eeprom = *eeprom_ch;
342
ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
343
ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
344
ch_info->fat_min_power = 0;
345
ch_info->fat_scan_power = eeprom_ch->max_power_avg;
346
ch_info->fat_flags = eeprom_ch->flags;
347
ch_info->fat_extension_channel = fat_extension_channel;
353
* iwl4965_kw_free - Free the "keep warm" buffer
355
static void iwl4965_kw_free(struct iwl4965_priv *priv)
357
struct pci_dev *dev = priv->pci_dev;
358
struct iwl4965_kw *kw = &priv->kw;
361
pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
362
memset(kw, 0, sizeof(*kw));
367
* iwl4965_txq_ctx_reset - Reset TX queue context
368
* Destroys all DMA structures and initialise them again
373
static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
376
int txq_id, slots_num;
379
iwl4965_kw_free(priv);
381
/* Free all tx/cmd queues and keep-warm buffer */
382
iwl4965_hw_txq_ctx_free(priv);
384
/* Alloc keep-warm buffer */
385
rc = iwl4965_kw_alloc(priv);
387
IWL_ERROR("Keep Warm allocation failed");
391
spin_lock_irqsave(&priv->lock, flags);
393
rc = iwl4965_grab_nic_access(priv);
395
IWL_ERROR("TX reset failed");
396
spin_unlock_irqrestore(&priv->lock, flags);
400
/* Turn off all Tx DMA channels */
401
iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0);
402
iwl4965_release_nic_access(priv);
403
spin_unlock_irqrestore(&priv->lock, flags);
405
/* Tell 4965 where to find the keep-warm buffer */
406
rc = iwl4965_kw_init(priv);
408
IWL_ERROR("kw_init failed\n");
412
/* Alloc and init all (default 16) Tx queues,
413
* including the command queue (#4) */
414
for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
415
slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
416
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
417
rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
420
IWL_ERROR("Tx %d queue init failed\n", txq_id);
428
iwl4965_hw_txq_ctx_free(priv);
430
iwl4965_kw_free(priv);
435
int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
439
struct iwl4965_rx_queue *rxq = &priv->rxq;
444
iwl4965_power_init_handle(priv);
447
spin_lock_irqsave(&priv->lock, flags);
449
iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
450
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
452
iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
453
rc = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
454
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
455
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
457
spin_unlock_irqrestore(&priv->lock, flags);
458
IWL_DEBUG_INFO("Failed to init the card\n");
462
rc = iwl4965_grab_nic_access(priv);
464
spin_unlock_irqrestore(&priv->lock, flags);
468
iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
470
iwl4965_write_prph(priv, APMG_CLK_CTRL_REG,
471
APMG_CLK_VAL_DMA_CLK_RQT |
472
APMG_CLK_VAL_BSM_CLK_RQT);
473
iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
477
iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
478
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
480
iwl4965_release_nic_access(priv);
481
iwl4965_write32(priv, CSR_INT_COALESCING, 512 / 32);
482
spin_unlock_irqrestore(&priv->lock, flags);
484
/* Determine HW type */
485
rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
489
IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
491
iwl4965_nic_set_pwr_src(priv, 1);
492
spin_lock_irqsave(&priv->lock, flags);
494
if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
495
pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
496
/* Enable No Snoop field */
497
pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
501
spin_unlock_irqrestore(&priv->lock, flags);
503
if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
504
IWL_ERROR("Older EEPROM detected! Aborting.\n");
508
pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
510
/* disable L1 entry -- workaround for pre-B1 */
511
pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
513
spin_lock_irqsave(&priv->lock, flags);
515
/* set CSR_HW_CONFIG_REG for uCode use */
517
iwl4965_set_bit(priv, CSR_SW_VER, CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R |
518
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
519
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
521
rc = iwl4965_grab_nic_access(priv);
523
spin_unlock_irqrestore(&priv->lock, flags);
524
IWL_DEBUG_INFO("Failed to init the card\n");
528
iwl4965_read_prph(priv, APMG_PS_CTRL_REG);
529
iwl4965_set_bits_prph(priv, APMG_PS_CTRL_REG,
530
APMG_PS_CTRL_VAL_RESET_REQ);
532
iwl4965_clear_bits_prph(priv, APMG_PS_CTRL_REG,
533
APMG_PS_CTRL_VAL_RESET_REQ);
535
iwl4965_release_nic_access(priv);
536
spin_unlock_irqrestore(&priv->lock, flags);
538
iwl4965_hw_card_show_info(priv);
542
/* Allocate the RX queue, or reset if it is already allocated */
544
rc = iwl4965_rx_queue_alloc(priv);
546
IWL_ERROR("Unable to initialize Rx queue\n");
550
iwl4965_rx_queue_reset(priv, rxq);
552
iwl4965_rx_replenish(priv);
554
iwl4965_rx_init(priv, rxq);
556
spin_lock_irqsave(&priv->lock, flags);
558
rxq->need_update = 1;
559
iwl4965_rx_queue_update_write_ptr(priv, rxq);
561
spin_unlock_irqrestore(&priv->lock, flags);
563
/* Allocate and init all Tx and Command queues */
564
rc = iwl4965_txq_ctx_reset(priv);
568
if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
569
IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
571
if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
572
IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
574
set_bit(STATUS_INIT, &priv->status);
579
int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
585
spin_lock_irqsave(&priv->lock, flags);
587
/* set stop master bit */
588
iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
590
reg_val = iwl4965_read32(priv, CSR_GP_CNTRL);
592
if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
593
(reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
594
IWL_DEBUG_INFO("Card in power save, master is already "
597
rc = iwl4965_poll_bit(priv, CSR_RESET,
598
CSR_RESET_REG_FLAG_MASTER_DISABLED,
599
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
601
spin_unlock_irqrestore(&priv->lock, flags);
606
spin_unlock_irqrestore(&priv->lock, flags);
607
IWL_DEBUG_INFO("stop master\n");
613
* iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
615
void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
621
/* Stop each Tx DMA channel, and wait for it to be idle */
622
for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
623
spin_lock_irqsave(&priv->lock, flags);
624
if (iwl4965_grab_nic_access(priv)) {
625
spin_unlock_irqrestore(&priv->lock, flags);
629
iwl4965_write_direct32(priv,
630
IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
632
iwl4965_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
633
IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
635
iwl4965_release_nic_access(priv);
636
spin_unlock_irqrestore(&priv->lock, flags);
639
/* Deallocate memory for all Tx queues */
640
iwl4965_hw_txq_ctx_free(priv);
643
int iwl4965_hw_nic_reset(struct iwl4965_priv *priv)
648
iwl4965_hw_nic_stop_master(priv);
650
spin_lock_irqsave(&priv->lock, flags);
652
iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
656
iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
657
rc = iwl4965_poll_bit(priv, CSR_RESET,
658
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
659
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
663
rc = iwl4965_grab_nic_access(priv);
665
iwl4965_write_prph(priv, APMG_CLK_EN_REG,
666
APMG_CLK_VAL_DMA_CLK_RQT |
667
APMG_CLK_VAL_BSM_CLK_RQT);
671
iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
672
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
674
iwl4965_release_nic_access(priv);
677
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
678
wake_up_interruptible(&priv->wait_command_queue);
680
spin_unlock_irqrestore(&priv->lock, flags);
686
#define REG_RECALIB_PERIOD (60)
689
* iwl4965_bg_statistics_periodic - Timer callback to queue statistics
691
* This callback is provided in order to queue the statistics_work
692
* in work_queue context (v. softirq)
694
* This timer function is continually reset to execute within
695
* REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
696
* was received. We need to ensure we receive the statistics in order
697
* to update the temperature used for calibrating the TXPOWER. However,
698
* we can't send the statistics command from softirq context (which
699
* is the context which timers run at) so we have to queue off the
700
* statistics_work to actually send the command to the hardware.
702
static void iwl4965_bg_statistics_periodic(unsigned long data)
704
struct iwl4965_priv *priv = (struct iwl4965_priv *)data;
706
queue_work(priv->workqueue, &priv->statistics_work);
710
* iwl4965_bg_statistics_work - Send the statistics request to the hardware.
712
* This is queued by iwl4965_bg_statistics_periodic.
714
static void iwl4965_bg_statistics_work(struct work_struct *work)
716
struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
719
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
722
mutex_lock(&priv->mutex);
723
iwl4965_send_statistics_request(priv);
724
mutex_unlock(&priv->mutex);
727
#define CT_LIMIT_CONST 259
728
#define TM_CT_KILL_THRESHOLD 110
730
void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv)
732
struct iwl4965_ct_kill_config cmd;
735
u32 crit_temperature;
739
spin_lock_irqsave(&priv->lock, flags);
740
iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
741
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
742
spin_unlock_irqrestore(&priv->lock, flags);
744
if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
745
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
746
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
747
R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
749
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
750
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
751
R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
754
temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
756
crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
757
cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
758
rc = iwl4965_send_cmd_pdu(priv,
759
REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd);
761
IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
763
IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
766
#ifdef CONFIG_IWL4965_SENSITIVITY
768
/* "false alarms" are signals that our DSP tries to lock onto,
769
* but then determines that they are either noise, or transmissions
770
* from a distant wireless network (also "noise", really) that get
771
* "stepped on" by stronger transmissions within our own network.
772
* This algorithm attempts to set a sensitivity level that is high
773
* enough to receive all of our own network traffic, but not so
774
* high that our DSP gets too busy trying to lock onto non-network
776
static int iwl4965_sens_energy_cck(struct iwl4965_priv *priv,
779
struct statistics_general_data *rx_info)
783
u8 max_silence_rssi = 0;
785
u8 silence_rssi_a = 0;
786
u8 silence_rssi_b = 0;
787
u8 silence_rssi_c = 0;
790
/* "false_alarms" values below are cross-multiplications to assess the
791
* numbers of false alarms within the measured period of actual Rx
792
* (Rx is off when we're txing), vs the min/max expected false alarms
793
* (some should be expected if rx is sensitive enough) in a
794
* hypothetical listening period of 200 time units (TU), 204.8 msec:
796
* MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
799
u32 false_alarms = norm_fa * 200 * 1024;
800
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
801
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
802
struct iwl4965_sensitivity_data *data = NULL;
804
data = &(priv->sensitivity_data);
806
data->nrg_auto_corr_silence_diff = 0;
808
/* Find max silence rssi among all 3 receivers.
809
* This is background noise, which may include transmissions from other
810
* networks, measured during silence before our network's beacon */
811
silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
812
ALL_BAND_FILTER)>>8);
813
silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
814
ALL_BAND_FILTER)>>8);
815
silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
816
ALL_BAND_FILTER)>>8);
818
val = max(silence_rssi_b, silence_rssi_c);
819
max_silence_rssi = max(silence_rssi_a, (u8) val);
821
/* Store silence rssi in 20-beacon history table */
822
data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
823
data->nrg_silence_idx++;
824
if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
825
data->nrg_silence_idx = 0;
827
/* Find max silence rssi across 20 beacon history */
828
for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
829
val = data->nrg_silence_rssi[i];
830
silence_ref = max(silence_ref, val);
832
IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
833
silence_rssi_a, silence_rssi_b, silence_rssi_c,
836
/* Find max rx energy (min value!) among all 3 receivers,
837
* measured during beacon frame.
838
* Save it in 10-beacon history table. */
839
i = data->nrg_energy_idx;
840
val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
841
data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
843
data->nrg_energy_idx++;
844
if (data->nrg_energy_idx >= 10)
845
data->nrg_energy_idx = 0;
847
/* Find min rx energy (max value) across 10 beacon history.
848
* This is the minimum signal level that we want to receive well.
849
* Add backoff (margin so we don't miss slightly lower energy frames).
850
* This establishes an upper bound (min value) for energy threshold. */
851
max_nrg_cck = data->nrg_value[0];
852
for (i = 1; i < 10; i++)
853
max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
856
IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
857
rx_info->beacon_energy_a, rx_info->beacon_energy_b,
858
rx_info->beacon_energy_c, max_nrg_cck - 6);
860
/* Count number of consecutive beacons with fewer-than-desired
862
if (false_alarms < min_false_alarms)
863
data->num_in_cck_no_fa++;
865
data->num_in_cck_no_fa = 0;
866
IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
867
data->num_in_cck_no_fa);
869
/* If we got too many false alarms this time, reduce sensitivity */
870
if (false_alarms > max_false_alarms) {
871
IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
872
false_alarms, max_false_alarms);
873
IWL_DEBUG_CALIB("... reducing sensitivity\n");
874
data->nrg_curr_state = IWL_FA_TOO_MANY;
876
if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
877
/* Store for "fewer than desired" on later beacon */
878
data->nrg_silence_ref = silence_ref;
880
/* increase energy threshold (reduce nrg value)
881
* to decrease sensitivity */
882
if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
883
data->nrg_th_cck = data->nrg_th_cck
887
/* increase auto_corr values to decrease sensitivity */
888
if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
889
data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
891
val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
892
data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
894
val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
895
data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
897
/* Else if we got fewer than desired, increase sensitivity */
898
} else if (false_alarms < min_false_alarms) {
899
data->nrg_curr_state = IWL_FA_TOO_FEW;
901
/* Compare silence level with silence level for most recent
902
* healthy number or too many false alarms */
903
data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
906
IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
907
false_alarms, min_false_alarms,
908
data->nrg_auto_corr_silence_diff);
910
/* Increase value to increase sensitivity, but only if:
911
* 1a) previous beacon did *not* have *too many* false alarms
912
* 1b) AND there's a significant difference in Rx levels
913
* from a previous beacon with too many, or healthy # FAs
914
* OR 2) We've seen a lot of beacons (100) with too few
916
if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
917
((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
918
(data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
920
IWL_DEBUG_CALIB("... increasing sensitivity\n");
921
/* Increase nrg value to increase sensitivity */
922
val = data->nrg_th_cck + NRG_STEP_CCK;
923
data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
925
/* Decrease auto_corr values to increase sensitivity */
926
val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
927
data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
929
val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
930
data->auto_corr_cck_mrc =
931
max((u32)AUTO_CORR_MIN_CCK_MRC, val);
934
IWL_DEBUG_CALIB("... but not changing sensitivity\n");
936
/* Else we got a healthy number of false alarms, keep status quo */
938
IWL_DEBUG_CALIB(" FA in safe zone\n");
939
data->nrg_curr_state = IWL_FA_GOOD_RANGE;
941
/* Store for use in "fewer than desired" with later beacon */
942
data->nrg_silence_ref = silence_ref;
944
/* If previous beacon had too many false alarms,
945
* give it some extra margin by reducing sensitivity again
946
* (but don't go below measured energy of desired Rx) */
947
if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
948
IWL_DEBUG_CALIB("... increasing margin\n");
949
data->nrg_th_cck -= NRG_MARGIN;
953
/* Make sure the energy threshold does not go above the measured
954
* energy of the desired Rx signals (reduced by backoff margin),
955
* or else we might start missing Rx frames.
956
* Lower value is higher energy, so we use max()!
958
data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
959
IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
961
data->nrg_prev_state = data->nrg_curr_state;
967
static int iwl4965_sens_auto_corr_ofdm(struct iwl4965_priv *priv,
972
u32 false_alarms = norm_fa * 200 * 1024;
973
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
974
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
975
struct iwl4965_sensitivity_data *data = NULL;
977
data = &(priv->sensitivity_data);
979
/* If we got too many false alarms this time, reduce sensitivity */
980
if (false_alarms > max_false_alarms) {
982
IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
983
false_alarms, max_false_alarms);
985
val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
986
data->auto_corr_ofdm =
987
min((u32)AUTO_CORR_MAX_OFDM, val);
989
val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
990
data->auto_corr_ofdm_mrc =
991
min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
993
val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
994
data->auto_corr_ofdm_x1 =
995
min((u32)AUTO_CORR_MAX_OFDM_X1, val);
997
val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
998
data->auto_corr_ofdm_mrc_x1 =
999
min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1002
/* Else if we got fewer than desired, increase sensitivity */
1003
else if (false_alarms < min_false_alarms) {
1005
IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1006
false_alarms, min_false_alarms);
1008
val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1009
data->auto_corr_ofdm =
1010
max((u32)AUTO_CORR_MIN_OFDM, val);
1012
val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1013
data->auto_corr_ofdm_mrc =
1014
max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1016
val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1017
data->auto_corr_ofdm_x1 =
1018
max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1020
val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1021
data->auto_corr_ofdm_mrc_x1 =
1022
max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1026
IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1027
min_false_alarms, false_alarms, max_false_alarms);
1032
static int iwl4965_sensitivity_callback(struct iwl4965_priv *priv,
1033
struct iwl4965_cmd *cmd, struct sk_buff *skb)
1035
/* We didn't cache the SKB; let the caller free it */
1039
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1040
static int iwl4965_sensitivity_write(struct iwl4965_priv *priv, u8 flags)
1043
struct iwl4965_sensitivity_cmd cmd ;
1044
struct iwl4965_sensitivity_data *data = NULL;
1045
struct iwl4965_host_cmd cmd_out = {
1046
.id = SENSITIVITY_CMD,
1047
.len = sizeof(struct iwl4965_sensitivity_cmd),
1048
.meta.flags = flags,
1052
data = &(priv->sensitivity_data);
1054
memset(&cmd, 0, sizeof(cmd));
1056
cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1057
cpu_to_le16((u16)data->auto_corr_ofdm);
1058
cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1059
cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1060
cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1061
cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1062
cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1063
cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1065
cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1066
cpu_to_le16((u16)data->auto_corr_cck);
1067
cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1068
cpu_to_le16((u16)data->auto_corr_cck_mrc);
1070
cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1071
cpu_to_le16((u16)data->nrg_th_cck);
1072
cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1073
cpu_to_le16((u16)data->nrg_th_ofdm);
1075
cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1076
__constant_cpu_to_le16(190);
1077
cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1078
__constant_cpu_to_le16(390);
1079
cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1080
__constant_cpu_to_le16(62);
1082
IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1083
data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1084
data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1087
IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1088
data->auto_corr_cck, data->auto_corr_cck_mrc,
1091
/* Update uCode's "work" table, and copy it to DSP */
1092
cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1094
if (flags & CMD_ASYNC)
1095
cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1097
/* Don't send command to uCode if nothing has changed */
1098
if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1099
sizeof(u16)*HD_TABLE_SIZE)) {
1100
IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1104
/* Copy table for comparison next time */
1105
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1106
sizeof(u16)*HD_TABLE_SIZE);
1108
rc = iwl4965_send_cmd(priv, &cmd_out);
1110
IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n");
1117
void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force)
1121
struct iwl4965_sensitivity_data *data = NULL;
1123
IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1126
memset(&(priv->sensitivity_tbl[0]), 0,
1127
sizeof(u16)*HD_TABLE_SIZE);
1129
/* Clear driver's sensitivity algo data */
1130
data = &(priv->sensitivity_data);
1131
memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1133
data->num_in_cck_no_fa = 0;
1134
data->nrg_curr_state = IWL_FA_TOO_MANY;
1135
data->nrg_prev_state = IWL_FA_TOO_MANY;
1136
data->nrg_silence_ref = 0;
1137
data->nrg_silence_idx = 0;
1138
data->nrg_energy_idx = 0;
1140
for (i = 0; i < 10; i++)
1141
data->nrg_value[i] = 0;
1143
for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1144
data->nrg_silence_rssi[i] = 0;
1146
data->auto_corr_ofdm = 90;
1147
data->auto_corr_ofdm_mrc = 170;
1148
data->auto_corr_ofdm_x1 = 105;
1149
data->auto_corr_ofdm_mrc_x1 = 220;
1150
data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1151
data->auto_corr_cck_mrc = 200;
1152
data->nrg_th_cck = 100;
1153
data->nrg_th_ofdm = 100;
1155
data->last_bad_plcp_cnt_ofdm = 0;
1156
data->last_fa_cnt_ofdm = 0;
1157
data->last_bad_plcp_cnt_cck = 0;
1158
data->last_fa_cnt_cck = 0;
1160
/* Clear prior Sensitivity command data to force send to uCode */
1162
memset(&(priv->sensitivity_tbl[0]), 0,
1163
sizeof(u16)*HD_TABLE_SIZE);
1165
rc |= iwl4965_sensitivity_write(priv, flags);
1166
IWL_DEBUG_CALIB("<<return 0x%X\n", rc);
1172
/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1173
* Called after every association, but this runs only once!
1174
* ... once chain noise is calibrated the first time, it's good forever. */
1175
void iwl4965_chain_noise_reset(struct iwl4965_priv *priv)
1177
struct iwl4965_chain_noise_data *data = NULL;
1180
data = &(priv->chain_noise_data);
1181
if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl4965_is_associated(priv)) {
1182
struct iwl4965_calibration_cmd cmd;
1184
memset(&cmd, 0, sizeof(cmd));
1185
cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1186
cmd.diff_gain_a = 0;
1187
cmd.diff_gain_b = 0;
1188
cmd.diff_gain_c = 0;
1189
rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1192
data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1193
IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1199
* Accumulate 20 beacons of signal and noise statistics for each of
1200
* 3 receivers/antennas/rx-chains, then figure out:
1201
* 1) Which antennas are connected.
1202
* 2) Differential rx gain settings to balance the 3 receivers.
1204
static void iwl4965_noise_calibration(struct iwl4965_priv *priv,
1205
struct iwl4965_notif_statistics *stat_resp)
1207
struct iwl4965_chain_noise_data *data = NULL;
1216
u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1217
u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1218
u32 max_average_sig;
1219
u16 max_average_sig_antenna_i;
1220
u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1221
u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1223
u16 chan_num = INITIALIZATION_VALUE;
1224
u32 band = INITIALIZATION_VALUE;
1225
u32 active_chains = 0;
1226
unsigned long flags;
1227
struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1229
data = &(priv->chain_noise_data);
1231
/* Accumulate just the first 20 beacons after the first association,
1232
* then we're done forever. */
1233
if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1234
if (data->state == IWL_CHAIN_NOISE_ALIVE)
1235
IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1239
spin_lock_irqsave(&priv->lock, flags);
1240
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1241
IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1242
spin_unlock_irqrestore(&priv->lock, flags);
1246
band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1247
chan_num = le16_to_cpu(priv->staging_rxon.channel);
1249
/* Make sure we accumulate data for just the associated channel
1250
* (even if scanning). */
1251
if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1252
((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1253
(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1254
IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1256
spin_unlock_irqrestore(&priv->lock, flags);
1260
/* Accumulate beacon statistics values across 20 beacons */
1261
chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1263
chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1265
chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1268
chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1269
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1270
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1272
spin_unlock_irqrestore(&priv->lock, flags);
1274
data->beacon_count++;
1276
data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1277
data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1278
data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1280
data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1281
data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1282
data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1284
IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1285
data->beacon_count);
1286
IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1287
chain_sig_a, chain_sig_b, chain_sig_c);
1288
IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1289
chain_noise_a, chain_noise_b, chain_noise_c);
1291
/* If this is the 20th beacon, determine:
1292
* 1) Disconnected antennas (using signal strengths)
1293
* 2) Differential gain (using silence noise) to balance receivers */
1294
if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1296
/* Analyze signal for disconnected antenna */
1297
average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1298
average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1299
average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1301
if (average_sig[0] >= average_sig[1]) {
1302
max_average_sig = average_sig[0];
1303
max_average_sig_antenna_i = 0;
1304
active_chains = (1 << max_average_sig_antenna_i);
1306
max_average_sig = average_sig[1];
1307
max_average_sig_antenna_i = 1;
1308
active_chains = (1 << max_average_sig_antenna_i);
1311
if (average_sig[2] >= max_average_sig) {
1312
max_average_sig = average_sig[2];
1313
max_average_sig_antenna_i = 2;
1314
active_chains = (1 << max_average_sig_antenna_i);
1317
IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1318
average_sig[0], average_sig[1], average_sig[2]);
1319
IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1320
max_average_sig, max_average_sig_antenna_i);
1322
/* Compare signal strengths for all 3 receivers. */
1323
for (i = 0; i < NUM_RX_CHAINS; i++) {
1324
if (i != max_average_sig_antenna_i) {
1325
s32 rssi_delta = (max_average_sig -
1328
/* If signal is very weak, compared with
1329
* strongest, mark it as disconnected. */
1330
if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1331
data->disconn_array[i] = 1;
1333
active_chains |= (1 << i);
1334
IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1335
"disconn_array[i] = %d\n",
1336
i, rssi_delta, data->disconn_array[i]);
1340
/*If both chains A & B are disconnected -
1341
* connect B and leave A as is */
1342
if (data->disconn_array[CHAIN_A] &&
1343
data->disconn_array[CHAIN_B]) {
1344
data->disconn_array[CHAIN_B] = 0;
1345
active_chains |= (1 << CHAIN_B);
1346
IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1347
"W/A - declare B as connected\n");
1350
IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1353
/* Save for use within RXON, TX, SCAN commands, etc. */
1354
priv->valid_antenna = active_chains;
1356
/* Analyze noise for rx balance */
1357
average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1358
average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1359
average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1361
for (i = 0; i < NUM_RX_CHAINS; i++) {
1362
if (!(data->disconn_array[i]) &&
1363
(average_noise[i] <= min_average_noise)) {
1364
/* This means that chain i is active and has
1365
* lower noise values so far: */
1366
min_average_noise = average_noise[i];
1367
min_average_noise_antenna_i = i;
1371
data->delta_gain_code[min_average_noise_antenna_i] = 0;
1373
IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1374
average_noise[0], average_noise[1],
1377
IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1378
min_average_noise, min_average_noise_antenna_i);
1380
for (i = 0; i < NUM_RX_CHAINS; i++) {
1383
if (!(data->disconn_array[i]) &&
1384
(data->delta_gain_code[i] ==
1385
CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1386
delta_g = average_noise[i] - min_average_noise;
1387
data->delta_gain_code[i] = (u8)((delta_g *
1389
if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1390
data->delta_gain_code[i])
1391
data->delta_gain_code[i] =
1392
CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1394
data->delta_gain_code[i] =
1395
(data->delta_gain_code[i] | (1 << 2));
1397
data->delta_gain_code[i] = 0;
1399
IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1400
data->delta_gain_code[0],
1401
data->delta_gain_code[1],
1402
data->delta_gain_code[2]);
1404
/* Differential gain gets sent to uCode only once */
1405
if (!data->radio_write) {
1406
struct iwl4965_calibration_cmd cmd;
1407
data->radio_write = 1;
1409
memset(&cmd, 0, sizeof(cmd));
1410
cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1411
cmd.diff_gain_a = data->delta_gain_code[0];
1412
cmd.diff_gain_b = data->delta_gain_code[1];
1413
cmd.diff_gain_c = data->delta_gain_code[2];
1414
rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1417
IWL_DEBUG_CALIB("fail sending cmd "
1418
"REPLY_PHY_CALIBRATION_CMD \n");
1420
/* TODO we might want recalculate
1421
* rx_chain in rxon cmd */
1423
/* Mark so we run this algo only once! */
1424
data->state = IWL_CHAIN_NOISE_CALIBRATED;
1426
data->chain_noise_a = 0;
1427
data->chain_noise_b = 0;
1428
data->chain_noise_c = 0;
1429
data->chain_signal_a = 0;
1430
data->chain_signal_b = 0;
1431
data->chain_signal_c = 0;
1432
data->beacon_count = 0;
1437
static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv,
1438
struct iwl4965_notif_statistics *resp)
1448
struct iwl4965_sensitivity_data *data = NULL;
1449
struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1450
struct statistics_rx *statistics = &(resp->rx);
1451
unsigned long flags;
1452
struct statistics_general_data statis;
1454
data = &(priv->sensitivity_data);
1456
if (!iwl4965_is_associated(priv)) {
1457
IWL_DEBUG_CALIB("<< - not associated\n");
1461
spin_lock_irqsave(&priv->lock, flags);
1462
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1463
IWL_DEBUG_CALIB("<< invalid data.\n");
1464
spin_unlock_irqrestore(&priv->lock, flags);
1468
/* Extract Statistics: */
1469
rx_enable_time = le32_to_cpu(rx_info->channel_load);
1470
fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1471
fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1472
bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1473
bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1475
statis.beacon_silence_rssi_a =
1476
le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1477
statis.beacon_silence_rssi_b =
1478
le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1479
statis.beacon_silence_rssi_c =
1480
le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1481
statis.beacon_energy_a =
1482
le32_to_cpu(statistics->general.beacon_energy_a);
1483
statis.beacon_energy_b =
1484
le32_to_cpu(statistics->general.beacon_energy_b);
1485
statis.beacon_energy_c =
1486
le32_to_cpu(statistics->general.beacon_energy_c);
1488
spin_unlock_irqrestore(&priv->lock, flags);
1490
IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1492
if (!rx_enable_time) {
1493
IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1497
/* These statistics increase monotonically, and do not reset
1498
* at each beacon. Calculate difference from last value, or just
1499
* use the new statistics value if it has reset or wrapped around. */
1500
if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1501
data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1503
bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1504
data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1507
if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1508
data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1510
bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1511
data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1514
if (data->last_fa_cnt_ofdm > fa_ofdm)
1515
data->last_fa_cnt_ofdm = fa_ofdm;
1517
fa_ofdm -= data->last_fa_cnt_ofdm;
1518
data->last_fa_cnt_ofdm += fa_ofdm;
1521
if (data->last_fa_cnt_cck > fa_cck)
1522
data->last_fa_cnt_cck = fa_cck;
1524
fa_cck -= data->last_fa_cnt_cck;
1525
data->last_fa_cnt_cck += fa_cck;
1528
/* Total aborted signal locks */
1529
norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1530
norm_fa_cck = fa_cck + bad_plcp_cck;
1532
IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1533
bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1535
iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1536
iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1537
rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC);
1542
static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1544
struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
1547
mutex_lock(&priv->mutex);
1549
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1550
test_bit(STATUS_SCANNING, &priv->status)) {
1551
mutex_unlock(&priv->mutex);
1555
if (priv->start_calib) {
1556
iwl4965_noise_calibration(priv, &priv->statistics);
1558
if (priv->sensitivity_data.state ==
1559
IWL_SENS_CALIB_NEED_REINIT) {
1560
iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1561
priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1563
iwl4965_sensitivity_calibration(priv,
1567
mutex_unlock(&priv->mutex);
1570
#endif /*CONFIG_IWL4965_SENSITIVITY*/
1572
static void iwl4965_bg_txpower_work(struct work_struct *work)
1574
struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
1577
/* If a scan happened to start before we got here
1578
* then just return; the statistics notification will
1579
* kick off another scheduled work to compensate for
1580
* any temperature delta we missed here. */
1581
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1582
test_bit(STATUS_SCANNING, &priv->status))
1585
mutex_lock(&priv->mutex);
1587
/* Regardless of if we are assocaited, we must reconfigure the
1588
* TX power since frames can be sent on non-radar channels while
1590
iwl4965_hw_reg_send_txpower(priv);
1592
/* Update last_temperature to keep is_calib_needed from running
1593
* when it isn't needed... */
1594
priv->last_temperature = priv->temperature;
1596
mutex_unlock(&priv->mutex);
1600
* Acquire priv->lock before calling this function !
1602
static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index)
1604
iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
1605
(index & 0xff) | (txq_id << 8));
1606
iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index);
1610
* iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1611
* @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1612
* @scd_retry: (1) Indicates queue will be used in aggregation mode
1614
* NOTE: Acquire priv->lock before calling this function !
1616
static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1617
struct iwl4965_tx_queue *txq,
1618
int tx_fifo_id, int scd_retry)
1620
int txq_id = txq->q.id;
1622
/* Find out whether to activate Tx queue */
1623
int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1625
/* Set up and activate */
1626
iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id),
1627
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1628
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1629
(scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1630
(scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1631
SCD_QUEUE_STTS_REG_MSK);
1633
txq->sched_retry = scd_retry;
1635
IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
1636
active ? "Activate" : "Deactivate",
1637
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1640
static const u16 default_queue_to_tx_fifo[] = {
1650
static inline void iwl4965_txq_ctx_activate(struct iwl4965_priv *priv, int txq_id)
1652
set_bit(txq_id, &priv->txq_ctx_active_msk);
1655
static inline void iwl4965_txq_ctx_deactivate(struct iwl4965_priv *priv, int txq_id)
1657
clear_bit(txq_id, &priv->txq_ctx_active_msk);
1660
int iwl4965_alive_notify(struct iwl4965_priv *priv)
1664
unsigned long flags;
1667
spin_lock_irqsave(&priv->lock, flags);
1669
#ifdef CONFIG_IWL4965_SENSITIVITY
1670
memset(&(priv->sensitivity_data), 0,
1671
sizeof(struct iwl4965_sensitivity_data));
1672
memset(&(priv->chain_noise_data), 0,
1673
sizeof(struct iwl4965_chain_noise_data));
1674
for (i = 0; i < NUM_RX_CHAINS; i++)
1675
priv->chain_noise_data.delta_gain_code[i] =
1676
CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1677
#endif /* CONFIG_IWL4965_SENSITIVITY*/
1678
rc = iwl4965_grab_nic_access(priv);
1680
spin_unlock_irqrestore(&priv->lock, flags);
1684
/* Clear 4965's internal Tx Scheduler data base */
1685
priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR);
1686
a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1687
for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1688
iwl4965_write_targ_mem(priv, a, 0);
1689
for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
1690
iwl4965_write_targ_mem(priv, a, 0);
1691
for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
1692
iwl4965_write_targ_mem(priv, a, 0);
1694
/* Tel 4965 where to find Tx byte count tables */
1695
iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR,
1696
(priv->hw_setting.shared_phys +
1697
offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1699
/* Disable chain mode for all queues */
1700
iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0);
1702
/* Initialize each Tx queue (including the command queue) */
1703
for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
1705
/* TFD circular buffer read/write indexes */
1706
iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0);
1707
iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1709
/* Max Tx Window size for Scheduler-ACK mode */
1710
iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1711
SCD_CONTEXT_QUEUE_OFFSET(i),
1713
SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1714
SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1717
iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1718
SCD_CONTEXT_QUEUE_OFFSET(i) +
1721
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1722
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1725
iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK,
1726
(1 << priv->hw_setting.max_txq_num) - 1);
1728
/* Activate all Tx DMA/FIFO channels */
1729
iwl4965_write_prph(priv, KDR_SCD_TXFACT,
1730
SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1732
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1734
/* Map each Tx/cmd queue to its corresponding fifo */
1735
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1736
int ac = default_queue_to_tx_fifo[i];
1737
iwl4965_txq_ctx_activate(priv, i);
1738
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1741
iwl4965_release_nic_access(priv);
1742
spin_unlock_irqrestore(&priv->lock, flags);
1748
* iwl4965_hw_set_hw_setting
1750
* Called when initializing driver
1752
int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv)
1754
/* Allocate area for Tx byte count tables and Rx queue status */
1755
priv->hw_setting.shared_virt =
1756
pci_alloc_consistent(priv->pci_dev,
1757
sizeof(struct iwl4965_shared),
1758
&priv->hw_setting.shared_phys);
1760
if (!priv->hw_setting.shared_virt)
1763
memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl4965_shared));
1765
priv->hw_setting.max_txq_num = iwl4965_param_queues_num;
1766
priv->hw_setting.ac_queue_count = AC_NUM;
1767
priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
1768
priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
1769
priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
1771
priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
1772
priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
1777
* iwl4965_hw_txq_ctx_free - Free TXQ Context
1779
* Destroy all TX DMA queues and structures
1781
void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv)
1786
for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
1787
iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
1789
/* Keep-warm buffer */
1790
iwl4965_kw_free(priv);
1794
* iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1796
* Does NOT advance any TFD circular buffer read/write indexes
1797
* Does NOT free the TFD itself (which is within circular buffer)
1799
int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
1801
struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1802
struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
1803
struct pci_dev *dev = priv->pci_dev;
1808
/* Host command buffers stay mapped in memory, nothing to clean */
1809
if (txq->q.id == IWL_CMD_QUEUE_NUM)
1812
/* Sanity check on number of chunks */
1813
counter = IWL_GET_BITS(*bd, num_tbs);
1814
if (counter > MAX_NUM_OF_TBS) {
1815
IWL_ERROR("Too many chunks: %i\n", counter);
1816
/* @todo issue fatal error, it is quite serious situation */
1820
/* Unmap chunks, if any.
1821
* TFD info for odd chunks is different format than for even chunks. */
1822
for (i = 0; i < counter; i++) {
1829
IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1830
(IWL_GET_BITS(bd->pa[index],
1831
tb2_addr_hi20) << 16),
1832
IWL_GET_BITS(bd->pa[index], tb2_len),
1836
pci_unmap_single(dev,
1837
le32_to_cpu(bd->pa[index].tb1_addr),
1838
IWL_GET_BITS(bd->pa[index], tb1_len),
1841
/* Free SKB, if any, for this chunk */
1842
if (txq->txb[txq->q.read_ptr].skb[i]) {
1843
struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
1846
txq->txb[txq->q.read_ptr].skb[i] = NULL;
1852
int iwl4965_hw_reg_set_txpower(struct iwl4965_priv *priv, s8 power)
1854
IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
1858
static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1871
*res = ((num * 2 + denom) / (denom * 2)) * sign;
1877
* iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1879
* Determines power supply voltage compensation for txpower calculations.
1880
* Returns number of 1/2-dB steps to subtract from gain table index,
1881
* to compensate for difference between power supply voltage during
1882
* factory measurements, vs. current power supply voltage.
1884
* Voltage indication is higher for lower voltage.
1885
* Lower voltage requires more gain (lower gain table index).
1887
static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1888
s32 current_voltage)
1892
if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1893
(TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1896
iwl4965_math_div_round(current_voltage - eeprom_voltage,
1897
TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1899
if (current_voltage > eeprom_voltage)
1901
if ((comp < -2) || (comp > 2))
1907
static const struct iwl4965_channel_info *
1908
iwl4965_get_channel_txpower_info(struct iwl4965_priv *priv, u8 phymode, u16 channel)
1910
const struct iwl4965_channel_info *ch_info;
1912
ch_info = iwl4965_get_channel_info(priv, phymode, channel);
1914
if (!is_channel_valid(ch_info))
1920
static s32 iwl4965_get_tx_atten_grp(u16 channel)
1922
if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
1923
channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
1924
return CALIB_CH_GROUP_5;
1926
if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
1927
channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
1928
return CALIB_CH_GROUP_1;
1930
if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
1931
channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
1932
return CALIB_CH_GROUP_2;
1934
if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
1935
channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
1936
return CALIB_CH_GROUP_3;
1938
if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
1939
channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
1940
return CALIB_CH_GROUP_4;
1942
IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
1946
static u32 iwl4965_get_sub_band(const struct iwl4965_priv *priv, u32 channel)
1950
for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
1951
if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
1954
if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
1955
&& (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
1962
static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1969
iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
1975
* iwl4965_interpolate_chan - Interpolate factory measurements for one channel
1977
* Interpolates factory measurements from the two sample channels within a
1978
* sub-band, to apply to channel of interest. Interpolation is proportional to
1979
* differences in channel frequencies, which is proportional to differences
1980
* in channel number.
1982
static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel,
1983
struct iwl4965_eeprom_calib_ch_info *chan_info)
1988
const struct iwl4965_eeprom_calib_measure *m1;
1989
const struct iwl4965_eeprom_calib_measure *m2;
1990
struct iwl4965_eeprom_calib_measure *omeas;
1994
s = iwl4965_get_sub_band(priv, channel);
1995
if (s >= EEPROM_TX_POWER_BANDS) {
1996
IWL_ERROR("Tx Power can not find channel %d ", channel);
2000
ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
2001
ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
2002
chan_info->ch_num = (u8) channel;
2004
IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
2005
channel, s, ch_i1, ch_i2);
2007
for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2008
for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2009
m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
2010
measurements[c][m]);
2011
m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
2012
measurements[c][m]);
2013
omeas = &(chan_info->measurements[c][m]);
2016
(u8) iwl4965_interpolate_value(channel, ch_i1,
2021
(u8) iwl4965_interpolate_value(channel, ch_i1,
2022
m1->gain_idx, ch_i2,
2024
omeas->temperature =
2025
(u8) iwl4965_interpolate_value(channel, ch_i1,
2030
(s8) iwl4965_interpolate_value(channel, ch_i1,
2035
("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
2036
m1->actual_pow, m2->actual_pow, omeas->actual_pow);
2038
("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
2039
m1->gain_idx, m2->gain_idx, omeas->gain_idx);
2041
("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
2042
m1->pa_det, m2->pa_det, omeas->pa_det);
2044
("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
2045
m1->temperature, m2->temperature,
2046
omeas->temperature);
2053
/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
2054
* for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
2055
static s32 back_off_table[] = {
2056
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
2057
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
2058
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
2059
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
2063
/* Thermal compensation values for txpower for various frequency ranges ...
2064
* ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
2065
static struct iwl4965_txpower_comp_entry {
2066
s32 degrees_per_05db_a;
2067
s32 degrees_per_05db_a_denom;
2068
} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2069
{9, 2}, /* group 0 5.2, ch 34-43 */
2070
{4, 1}, /* group 1 5.2, ch 44-70 */
2071
{4, 1}, /* group 2 5.2, ch 71-124 */
2072
{4, 1}, /* group 3 5.2, ch 125-200 */
2073
{3, 1} /* group 4 2.4, ch all */
2076
static s32 get_min_power_index(s32 rate_power_index, u32 band)
2079
if ((rate_power_index & 7) <= 4)
2080
return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2082
return MIN_TX_GAIN_INDEX;
2090
static const struct gain_entry gain_table[2][108] = {
2091
/* 5.2GHz power gain index table */
2093
{123, 0x3F}, /* highest txpower */
2202
/* 2.4GHz power gain index table */
2204
{110, 0x3f}, /* highest txpower */
2315
static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 channel,
2316
u8 is_fat, u8 ctrl_chan_high,
2317
struct iwl4965_tx_power_db *tx_power_tbl)
2319
u8 saturation_power;
2321
s32 user_target_power;
2325
s32 current_regulatory;
2326
s32 txatten_grp = CALIB_CH_GROUP_MAX;
2329
const struct iwl4965_channel_info *ch_info = NULL;
2330
struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2331
const struct iwl4965_eeprom_calib_measure *measurement;
2334
s32 voltage_compensation;
2335
s32 degrees_per_05db_num;
2336
s32 degrees_per_05db_denom;
2338
s32 temperature_comp[2];
2339
s32 factory_gain_index[2];
2340
s32 factory_actual_pwr[2];
2343
/* Sanity check requested level (dBm) */
2344
if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2345
IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2346
priv->user_txpower_limit);
2349
if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2350
IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2351
priv->user_txpower_limit);
2355
/* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2356
* are used for indexing into txpower table) */
2357
user_target_power = 2 * priv->user_txpower_limit;
2359
/* Get current (RXON) channel, band, width */
2361
iwl4965_get_channel_txpower_info(priv, priv->phymode, channel);
2363
IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2369
/* get txatten group, used to select 1) thermal txpower adjustment
2370
* and 2) mimo txpower balance between Tx chains. */
2371
txatten_grp = iwl4965_get_tx_atten_grp(channel);
2372
if (txatten_grp < 0)
2375
IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2376
channel, txatten_grp);
2385
/* hardware txpower limits ...
2386
* saturation (clipping distortion) txpowers are in half-dBm */
2388
saturation_power = priv->eeprom.calib_info.saturation_power24;
2390
saturation_power = priv->eeprom.calib_info.saturation_power52;
2392
if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2393
saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2395
saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2397
saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2400
/* regulatory txpower limits ... reg_limit values are in half-dBm,
2401
* max_power_avg values are in dBm, convert * 2 */
2403
reg_limit = ch_info->fat_max_power_avg * 2;
2405
reg_limit = ch_info->max_power_avg * 2;
2407
if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2408
(reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2410
reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2412
reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2415
/* Interpolate txpower calibration values for this channel,
2416
* based on factory calibration tests on spaced channels. */
2417
iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2419
/* calculate tx gain adjustment based on power supply voltage */
2420
voltage = priv->eeprom.calib_info.voltage;
2421
init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2422
voltage_compensation =
2423
iwl4965_get_voltage_compensation(voltage, init_voltage);
2425
IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2427
voltage, voltage_compensation);
2429
/* get current temperature (Celsius) */
2430
current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2431
current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2432
current_temp = KELVIN_TO_CELSIUS(current_temp);
2434
/* select thermal txpower adjustment params, based on channel group
2435
* (same frequency group used for mimo txatten adjustment) */
2436
degrees_per_05db_num =
2437
tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2438
degrees_per_05db_denom =
2439
tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2441
/* get per-chain txpower values from factory measurements */
2442
for (c = 0; c < 2; c++) {
2443
measurement = &ch_eeprom_info.measurements[c][1];
2445
/* txgain adjustment (in half-dB steps) based on difference
2446
* between factory and current temperature */
2447
factory_temp = measurement->temperature;
2448
iwl4965_math_div_round((current_temp - factory_temp) *
2449
degrees_per_05db_denom,
2450
degrees_per_05db_num,
2451
&temperature_comp[c]);
2453
factory_gain_index[c] = measurement->gain_idx;
2454
factory_actual_pwr[c] = measurement->actual_pow;
2456
IWL_DEBUG_TXPOWER("chain = %d\n", c);
2457
IWL_DEBUG_TXPOWER("fctry tmp %d, "
2458
"curr tmp %d, comp %d steps\n",
2459
factory_temp, current_temp,
2460
temperature_comp[c]);
2462
IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2463
factory_gain_index[c],
2464
factory_actual_pwr[c]);
2467
/* for each of 33 bit-rates (including 1 for CCK) */
2468
for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2470
union iwl4965_tx_power_dual_stream tx_power;
2472
/* for mimo, reduce each chain's txpower by half
2473
* (3dB, 6 steps), so total output power is regulatory
2476
current_regulatory = reg_limit -
2477
IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2480
current_regulatory = reg_limit;
2484
/* find txpower limit, either hardware or regulatory */
2485
power_limit = saturation_power - back_off_table[i];
2486
if (power_limit > current_regulatory)
2487
power_limit = current_regulatory;
2489
/* reduce user's txpower request if necessary
2490
* for this rate on this channel */
2491
target_power = user_target_power;
2492
if (target_power > power_limit)
2493
target_power = power_limit;
2495
IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2496
i, saturation_power - back_off_table[i],
2497
current_regulatory, user_target_power,
2500
/* for each of 2 Tx chains (radio transmitters) */
2501
for (c = 0; c < 2; c++) {
2506
(s32)le32_to_cpu(priv->card_alive_init.
2507
tx_atten[txatten_grp][c]);
2511
/* calculate index; higher index means lower txpower */
2512
power_index = (u8) (factory_gain_index[c] -
2514
factory_actual_pwr[c]) -
2515
temperature_comp[c] -
2516
voltage_compensation +
2519
/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2522
if (power_index < get_min_power_index(i, band))
2523
power_index = get_min_power_index(i, band);
2525
/* adjust 5 GHz index to support negative indexes */
2529
/* CCK, rate 32, reduce txpower for CCK */
2530
if (i == POWER_TABLE_CCK_ENTRY)
2532
IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2534
/* stay within the table! */
2535
if (power_index > 107) {
2536
IWL_WARNING("txpower index %d > 107\n",
2540
if (power_index < 0) {
2541
IWL_WARNING("txpower index %d < 0\n",
2546
/* fill txpower command for this rate/chain */
2547
tx_power.s.radio_tx_gain[c] =
2548
gain_table[band][power_index].radio;
2549
tx_power.s.dsp_predis_atten[c] =
2550
gain_table[band][power_index].dsp;
2552
IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2553
"gain 0x%02x dsp %d\n",
2554
c, atten_value, power_index,
2555
tx_power.s.radio_tx_gain[c],
2556
tx_power.s.dsp_predis_atten[c]);
2557
}/* for each chain */
2559
tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2561
}/* for each rate */
2567
* iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
2569
* Uses the active RXON for channel, band, and characteristics (fat, high)
2570
* The power limit is taken from priv->user_txpower_limit.
2572
int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv)
2574
struct iwl4965_txpowertable_cmd cmd = { 0 };
2578
u8 ctrl_chan_high = 0;
2580
if (test_bit(STATUS_SCANNING, &priv->status)) {
2581
/* If this gets hit a lot, switch it to a BUG() and catch
2582
* the stack trace to find out who is calling this during
2584
IWL_WARNING("TX Power requested while scanning!\n");
2588
band = ((priv->phymode == MODE_IEEE80211B) ||
2589
(priv->phymode == MODE_IEEE80211G));
2591
is_fat = is_fat_channel(priv->active_rxon.flags);
2594
(priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2598
cmd.channel = priv->active_rxon.channel;
2600
rc = iwl4965_fill_txpower_tbl(priv, band,
2601
le16_to_cpu(priv->active_rxon.channel),
2602
is_fat, ctrl_chan_high, &cmd.tx_power);
2606
rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2610
int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel)
2615
u8 ctrl_chan_high = 0;
2616
struct iwl4965_channel_switch_cmd cmd = { 0 };
2617
const struct iwl4965_channel_info *ch_info;
2619
band = ((priv->phymode == MODE_IEEE80211B) ||
2620
(priv->phymode == MODE_IEEE80211G));
2622
ch_info = iwl4965_get_channel_info(priv, priv->phymode, channel);
2624
is_fat = is_fat_channel(priv->staging_rxon.flags);
2627
(priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2631
cmd.expect_beacon = 0;
2632
cmd.channel = cpu_to_le16(channel);
2633
cmd.rxon_flags = priv->active_rxon.flags;
2634
cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2635
cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2637
cmd.expect_beacon = is_channel_radar(ch_info);
2639
cmd.expect_beacon = 1;
2641
rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2642
ctrl_chan_high, &cmd.tx_power);
2644
IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2648
rc = iwl4965_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
2652
#define RTS_HCCA_RETRY_LIMIT 3
2653
#define RTS_DFAULT_RETRY_LIMIT 60
2655
void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv,
2656
struct iwl4965_cmd *cmd,
2657
struct ieee80211_tx_control *ctrl,
2658
struct ieee80211_hdr *hdr, int sta_id,
2661
struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2662
u8 rts_retry_limit = 0;
2663
u8 data_retry_limit = 0;
2664
u16 fc = le16_to_cpu(hdr->frame_control);
2667
int rate_idx = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1);
2669
rate_plcp = iwl4965_rates[rate_idx].plcp;
2671
rts_retry_limit = (is_hcca) ?
2672
RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2674
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2675
rate_flags |= RATE_MCS_CCK_MSK;
2678
if (ieee80211_is_probe_response(fc)) {
2679
data_retry_limit = 3;
2680
if (data_retry_limit < rts_retry_limit)
2681
rts_retry_limit = data_retry_limit;
2683
data_retry_limit = IWL_DEFAULT_TX_RETRY;
2685
if (priv->data_retry_limit != -1)
2686
data_retry_limit = priv->data_retry_limit;
2689
if (ieee80211_is_data(fc)) {
2690
tx->initial_rate_index = 0;
2691
tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2693
switch (fc & IEEE80211_FCTL_STYPE) {
2694
case IEEE80211_STYPE_AUTH:
2695
case IEEE80211_STYPE_DEAUTH:
2696
case IEEE80211_STYPE_ASSOC_REQ:
2697
case IEEE80211_STYPE_REASSOC_REQ:
2698
if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2699
tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2700
tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
2707
/* Alternate between antenna A and B for successive frames */
2708
if (priv->use_ant_b_for_management_frame) {
2709
priv->use_ant_b_for_management_frame = 0;
2710
rate_flags |= RATE_MCS_ANT_B_MSK;
2712
priv->use_ant_b_for_management_frame = 1;
2713
rate_flags |= RATE_MCS_ANT_A_MSK;
2717
tx->rts_retry_limit = rts_retry_limit;
2718
tx->data_retry_limit = data_retry_limit;
2719
tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
2722
int iwl4965_hw_get_rx_read(struct iwl4965_priv *priv)
2724
struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
2726
return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
2729
int iwl4965_hw_get_temperature(struct iwl4965_priv *priv)
2731
return priv->temperature;
2734
unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2735
struct iwl4965_frame *frame, u8 rate)
2737
struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
2738
unsigned int frame_size;
2740
tx_beacon_cmd = &frame->u.beacon;
2741
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2743
tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID;
2744
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2746
frame_size = iwl4965_fill_beacon_frame(priv,
2747
tx_beacon_cmd->frame,
2748
iwl4965_broadcast_addr,
2749
sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2751
BUG_ON(frame_size > MAX_MPDU_SIZE);
2752
tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2754
if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2755
tx_beacon_cmd->tx.rate_n_flags =
2756
iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
2758
tx_beacon_cmd->tx.rate_n_flags =
2759
iwl4965_hw_set_rate_n_flags(rate, 0);
2761
tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2762
TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2763
return (sizeof(*tx_beacon_cmd) + frame_size);
2767
* Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2768
* given Tx queue, and enable the DMA channel used for that queue.
2770
* 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2771
* channels supported in hardware.
2773
int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
2776
unsigned long flags;
2777
int txq_id = txq->q.id;
2779
spin_lock_irqsave(&priv->lock, flags);
2780
rc = iwl4965_grab_nic_access(priv);
2782
spin_unlock_irqrestore(&priv->lock, flags);
2786
/* Circular buffer (TFD queue in DRAM) physical base address */
2787
iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2788
txq->q.dma_addr >> 8);
2790
/* Enable DMA channel, using same id as for TFD queue */
2791
iwl4965_write_direct32(
2792
priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2793
IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2794
IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2795
iwl4965_release_nic_access(priv);
2796
spin_unlock_irqrestore(&priv->lock, flags);
2801
int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr,
2802
dma_addr_t addr, u16 len)
2805
struct iwl4965_tfd_frame *tfd = ptr;
2806
u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2808
/* Each TFD can point to a maximum 20 Tx buffers */
2809
if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2810
IWL_ERROR("Error can not send more than %d chunks\n",
2815
index = num_tbs / 2;
2816
is_odd = num_tbs & 0x1;
2819
tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2820
IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
2821
iwl_get_dma_hi_address(addr));
2822
IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2824
IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2825
(u32) (addr & 0xffff));
2826
IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2827
IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2830
IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2835
static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv)
2837
u16 hw_version = priv->eeprom.board_revision_4965;
2839
IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2840
((hw_version >> 8) & 0x0F),
2841
((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2843
IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
2844
priv->eeprom.board_pba_number_4965);
2847
#define IWL_TX_CRC_SIZE 4
2848
#define IWL_TX_DELIMITER_SIZE 4
2851
* iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array
2853
int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2854
struct iwl4965_tx_queue *txq, u16 byte_cnt)
2857
int txq_id = txq->q.id;
2858
struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
2860
if (txq->need_update == 0)
2863
len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2865
/* Set up byte count within first 256 entries */
2866
IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2867
tfd_offset[txq->q.write_ptr], byte_cnt, len);
2869
/* If within first 64 entries, duplicate at end */
2870
if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
2871
IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2872
tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
2879
* iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2881
* Selects how many and which Rx receivers/antennas/chains to use.
2882
* This should not be used for scan command ... it puts data in wrong place.
2884
void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
2886
u8 is_single = is_single_stream(priv);
2887
u8 idle_state, rx_state;
2889
priv->staging_rxon.rx_chain = 0;
2890
rx_state = idle_state = 3;
2892
/* Tell uCode which antennas are actually connected.
2893
* Before first association, we assume all antennas are connected.
2894
* Just after first association, iwl4965_noise_calibration()
2895
* checks which antennas actually *are* connected. */
2896
priv->staging_rxon.rx_chain |=
2897
cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
2899
/* How many receivers should we use? */
2900
iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
2901
priv->staging_rxon.rx_chain |=
2902
cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
2903
priv->staging_rxon.rx_chain |=
2904
cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
2906
if (!is_single && (rx_state >= 2) &&
2907
!test_bit(STATUS_POWER_PMI, &priv->status))
2908
priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
2910
priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
2912
IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
2915
#ifdef CONFIG_IWL4965_HT
2916
#ifdef CONFIG_IWL4965_HT_AGG
2918
get the traffic load value for tid
2920
static u32 iwl4965_tl_get_load(struct iwl4965_priv *priv, u8 tid)
2923
u32 current_time = jiffies_to_msecs(jiffies);
2926
unsigned long flags;
2927
struct iwl4965_traffic_load *tid_ptr = NULL;
2929
if (tid >= TID_MAX_LOAD_COUNT)
2932
tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2934
current_time -= current_time % TID_ROUND_VALUE;
2936
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2937
if (!(tid_ptr->queue_count))
2940
time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
2941
index = time_diff / TID_QUEUE_CELL_SPACING;
2943
if (index >= TID_QUEUE_MAX_SIZE) {
2944
u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
2946
while (tid_ptr->queue_count &&
2947
(tid_ptr->time_stamp < oldest_time)) {
2948
tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
2949
tid_ptr->packet_count[tid_ptr->head] = 0;
2950
tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
2951
tid_ptr->queue_count--;
2953
if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
2957
load = tid_ptr->total;
2960
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
2965
increment traffic load value for tid and also remove
2966
any old values if passed the certian time period
2968
static void iwl4965_tl_add_packet(struct iwl4965_priv *priv, u8 tid)
2970
u32 current_time = jiffies_to_msecs(jiffies);
2973
unsigned long flags;
2974
struct iwl4965_traffic_load *tid_ptr = NULL;
2976
if (tid >= TID_MAX_LOAD_COUNT)
2979
tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2981
current_time -= current_time % TID_ROUND_VALUE;
2983
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2984
if (!(tid_ptr->queue_count)) {
2986
tid_ptr->time_stamp = current_time;
2987
tid_ptr->queue_count = 1;
2989
tid_ptr->packet_count[0] = 1;
2993
time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
2994
index = time_diff / TID_QUEUE_CELL_SPACING;
2996
if (index >= TID_QUEUE_MAX_SIZE) {
2997
u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
2999
while (tid_ptr->queue_count &&
3000
(tid_ptr->time_stamp < oldest_time)) {
3001
tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
3002
tid_ptr->packet_count[tid_ptr->head] = 0;
3003
tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
3004
tid_ptr->queue_count--;
3006
if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
3011
index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE;
3012
tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1;
3013
tid_ptr->total = tid_ptr->total + 1;
3015
if ((index + 1) > tid_ptr->queue_count)
3016
tid_ptr->queue_count = index + 1;
3018
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3022
#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7
3024
BA_STATUS_FAILURE = 0,
3025
BA_STATUS_INITIATOR_DELBA,
3026
BA_STATUS_RECIPIENT_DELBA,
3027
BA_STATUS_RENEW_ADDBA_REQUEST,
3032
* iwl4964_tl_ba_avail - Find out if an unused aggregation queue is available
3034
static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv)
3037
struct iwl4965_lq_mngr *lq;
3041
lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3043
/* Find out how many agg queues are in use */
3044
for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) {
3046
if ((lq->agg_ctrl.granted_ba & msk) ||
3047
(lq->agg_ctrl.wait_for_agg_status & msk))
3051
if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS)
3057
static void iwl4965_ba_status(struct iwl4965_priv *priv,
3058
u8 tid, enum HT_STATUS status);
3060
static int iwl4965_perform_addba(struct iwl4965_priv *priv, u8 tid, u32 length,
3065
rc = iwlwifi_ieee80211_start_BA_session(priv->hw, priv->bssid, tid);
3067
iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
3072
static int iwl4965_perform_delba(struct iwl4965_priv *priv, u8 tid)
3076
rc = iwlwifi_ieee80211_stop_BA_session(priv->hw, priv->bssid, tid);
3078
iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
3083
static void iwl4965_turn_on_agg_for_tid(struct iwl4965_priv *priv,
3084
struct iwl4965_lq_mngr *lq,
3085
u8 auto_agg, u8 tid)
3087
u32 tid_msk = (1 << tid);
3088
unsigned long flags;
3090
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3092
if ((auto_agg) && (!lq->enable_counter)){
3093
lq->agg_ctrl.next_retry = 0;
3094
lq->agg_ctrl.tid_retry = 0;
3095
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3099
if (!(lq->agg_ctrl.granted_ba & tid_msk) &&
3100
(lq->agg_ctrl.requested_ba & tid_msk)) {
3101
u8 available_queues;
3104
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3105
available_queues = iwl4964_tl_ba_avail(priv);
3106
load = iwl4965_tl_get_load(priv, tid);
3108
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3109
if (!available_queues) {
3111
lq->agg_ctrl.tid_retry |= tid_msk;
3113
lq->agg_ctrl.requested_ba &= ~tid_msk;
3114
lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3116
} else if ((auto_agg) &&
3117
((load <= lq->agg_ctrl.tid_traffic_load_threshold) ||
3118
((lq->agg_ctrl.wait_for_agg_status & tid_msk))))
3119
lq->agg_ctrl.tid_retry |= tid_msk;
3121
lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3122
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3123
iwl4965_perform_addba(priv, tid, 0x40,
3124
lq->agg_ctrl.ba_timeout);
3125
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3128
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3131
static void iwl4965_turn_on_agg(struct iwl4965_priv *priv, u8 tid)
3133
struct iwl4965_lq_mngr *lq;
3134
unsigned long flags;
3136
lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3138
if ((tid < TID_MAX_LOAD_COUNT))
3139
iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg,
3141
else if (tid == TID_ALL_SPECIFIED) {
3142
if (lq->agg_ctrl.requested_ba) {
3143
for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
3144
iwl4965_turn_on_agg_for_tid(priv, lq,
3145
lq->agg_ctrl.auto_agg, tid);
3147
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3148
lq->agg_ctrl.tid_retry = 0;
3149
lq->agg_ctrl.next_retry = 0;
3150
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3156
void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid)
3159
struct iwl4965_lq_mngr *lq;
3160
unsigned long flags;
3162
lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3164
if ((tid < TID_MAX_LOAD_COUNT)) {
3166
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3167
lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3168
lq->agg_ctrl.requested_ba &= ~tid_msk;
3169
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3170
iwl4965_perform_delba(priv, tid);
3171
} else if (tid == TID_ALL_SPECIFIED) {
3172
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3173
for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3175
lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3176
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3177
iwl4965_perform_delba(priv, tid);
3178
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3180
lq->agg_ctrl.requested_ba = 0;
3181
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3186
* iwl4965_ba_status - Update driver's link quality mgr with tid's HT status
3188
static void iwl4965_ba_status(struct iwl4965_priv *priv,
3189
u8 tid, enum HT_STATUS status)
3191
struct iwl4965_lq_mngr *lq;
3192
u32 tid_msk = (1 << tid);
3193
unsigned long flags;
3195
lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3197
if ((tid >= TID_MAX_LOAD_COUNT))
3200
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3202
case BA_STATUS_ACTIVE:
3203
if (!(lq->agg_ctrl.granted_ba & tid_msk))
3204
lq->agg_ctrl.granted_ba |= tid_msk;
3207
if ((lq->agg_ctrl.granted_ba & tid_msk))
3208
lq->agg_ctrl.granted_ba &= ~tid_msk;
3212
lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3213
if (status != BA_STATUS_ACTIVE) {
3214
if (lq->agg_ctrl.auto_agg) {
3215
lq->agg_ctrl.tid_retry |= tid_msk;
3216
lq->agg_ctrl.next_retry =
3217
jiffies + msecs_to_jiffies(500);
3219
lq->agg_ctrl.requested_ba &= ~tid_msk;
3221
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3226
static void iwl4965_bg_agg_work(struct work_struct *work)
3228
struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
3234
unsigned long flags;
3235
struct iwl4965_lq_mngr *lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3237
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3238
retry_tid = lq->agg_ctrl.tid_retry;
3239
lq->agg_ctrl.tid_retry = 0;
3240
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3242
if (retry_tid == TID_ALL_SPECIFIED)
3243
iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED);
3245
for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3246
tid_msk = (1 << tid);
3247
if (retry_tid & tid_msk)
3248
iwl4965_turn_on_agg(priv, tid);
3252
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3253
if (lq->agg_ctrl.tid_retry)
3254
lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500);
3255
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3259
/* TODO: move this functionality to rate scaling */
3260
void iwl4965_tl_get_stats(struct iwl4965_priv *priv,
3261
struct ieee80211_hdr *hdr)
3263
__le16 *qc = ieee80211_get_qos_ctrl(hdr);
3266
(priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) {
3268
tid = (u8) (le16_to_cpu(*qc) & 0xF);
3269
if (tid < TID_MAX_LOAD_COUNT)
3270
iwl4965_tl_add_packet(priv, tid);
3273
if (priv->lq_mngr.agg_ctrl.next_retry &&
3274
(time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) {
3275
unsigned long flags;
3277
spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3278
priv->lq_mngr.agg_ctrl.next_retry = 0;
3279
spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3280
schedule_work(&priv->agg_work);
3284
#endif /*CONFIG_IWL4965_HT_AGG */
3285
#endif /* CONFIG_IWL4965_HT */
3288
* sign_extend - Sign extend a value using specified bit as sign-bit
3290
* Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3291
* and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3293
* @param oper value to sign extend
3294
* @param index 0 based bit index (0<=index<32) to sign bit
3296
static s32 sign_extend(u32 oper, int index)
3298
u8 shift = 31 - index;
3300
return (s32)(oper << shift) >> shift;
3304
* iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3305
* @statistics: Provides the temperature reading from the uCode
3307
* A return of <0 indicates bogus data in the statistics
3309
int iwl4965_get_temperature(const struct iwl4965_priv *priv)
3316
if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3317
(priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3318
IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3319
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3320
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3321
R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3322
R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3324
IWL_DEBUG_TEMP("Running temperature calibration\n");
3325
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3326
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3327
R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3328
R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3332
* Temperature is only 23 bits, so sign extend out to 32.
3334
* NOTE If we haven't received a statistics notification yet
3335
* with an updated temperature, use R4 provided to us in the
3336
* "initialize" ALIVE response.
3338
if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3339
vt = sign_extend(R4, 23);
3342
le32_to_cpu(priv->statistics.general.temperature), 23);
3344
IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3348
IWL_ERROR("Calibration conflict R1 == R3\n");
3352
/* Calculate temperature in degrees Kelvin, adjust by 97%.
3353
* Add offset to center the adjustment around 0 degrees Centigrade. */
3354
temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3355
temperature /= (R3 - R1);
3356
temperature = (temperature * 97) / 100 +
3357
TEMPERATURE_CALIB_KELVIN_OFFSET;
3359
IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3360
KELVIN_TO_CELSIUS(temperature));
3365
/* Adjust Txpower only if temperature variance is greater than threshold. */
3366
#define IWL_TEMPERATURE_THRESHOLD 3
3369
* iwl4965_is_temp_calib_needed - determines if new calibration is needed
3371
* If the temperature changed has changed sufficiently, then a recalibration
3374
* Assumes caller will replace priv->last_temperature once calibration
3377
static int iwl4965_is_temp_calib_needed(struct iwl4965_priv *priv)
3381
if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3382
IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3386
temp_diff = priv->temperature - priv->last_temperature;
3388
/* get absolute value */
3389
if (temp_diff < 0) {
3390
IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3391
temp_diff = -temp_diff;
3392
} else if (temp_diff == 0)
3393
IWL_DEBUG_POWER("Same temp, \n");
3395
IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3397
if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3398
IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3402
IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3407
/* Calculate noise level, based on measurements during network silence just
3408
* before arriving beacon. This measurement can be done only if we know
3409
* exactly when to expect beacons, therefore only when we're associated. */
3410
static void iwl4965_rx_calc_noise(struct iwl4965_priv *priv)
3412
struct statistics_rx_non_phy *rx_info
3413
= &(priv->statistics.rx.general);
3414
int num_active_rx = 0;
3415
int total_silence = 0;
3417
le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3419
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3421
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3423
if (bcn_silence_a) {
3424
total_silence += bcn_silence_a;
3427
if (bcn_silence_b) {
3428
total_silence += bcn_silence_b;
3431
if (bcn_silence_c) {
3432
total_silence += bcn_silence_c;
3436
/* Average among active antennas */
3438
priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3440
priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3442
IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3443
bcn_silence_a, bcn_silence_b, bcn_silence_c,
3444
priv->last_rx_noise);
3447
void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3449
struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3453
IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3454
(int)sizeof(priv->statistics), pkt->len);
3456
change = ((priv->statistics.general.temperature !=
3457
pkt->u.stats.general.temperature) ||
3458
((priv->statistics.flag &
3459
STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3460
(pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3462
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3464
set_bit(STATUS_STATISTICS, &priv->status);
3466
/* Reschedule the statistics timer to occur in
3467
* REG_RECALIB_PERIOD seconds to ensure we get a
3468
* thermal update even if the uCode doesn't give
3470
mod_timer(&priv->statistics_periodic, jiffies +
3471
msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3473
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3474
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3475
iwl4965_rx_calc_noise(priv);
3476
#ifdef CONFIG_IWL4965_SENSITIVITY
3477
queue_work(priv->workqueue, &priv->sensitivity_work);
3481
iwl4965_led_background(priv);
3483
/* If the hardware hasn't reported a change in
3484
* temperature then don't bother computing a
3485
* calibrated temperature value */
3489
temp = iwl4965_get_temperature(priv);
3493
if (priv->temperature != temp) {
3494
if (priv->temperature)
3495
IWL_DEBUG_TEMP("Temperature changed "
3496
"from %dC to %dC\n",
3497
KELVIN_TO_CELSIUS(priv->temperature),
3498
KELVIN_TO_CELSIUS(temp));
3500
IWL_DEBUG_TEMP("Temperature "
3501
"initialized to %dC\n",
3502
KELVIN_TO_CELSIUS(temp));
3505
priv->temperature = temp;
3506
set_bit(STATUS_TEMPERATURE, &priv->status);
3508
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3509
iwl4965_is_temp_calib_needed(priv))
3510
queue_work(priv->workqueue, &priv->txpower_work);
3513
static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3515
struct iwl4965_rx_mem_buffer *rxb,
3516
struct ieee80211_rx_status *stats)
3518
struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3519
struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3520
(struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3521
struct ieee80211_hdr *hdr;
3524
unsigned int skblen;
3527
if (!include_phy && priv->last_phy_res[0])
3528
rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3531
IWL_ERROR("MPDU frame without a PHY data\n");
3535
hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3536
rx_start->cfg_phy_cnt);
3538
len = le16_to_cpu(rx_start->byte_count);
3540
rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3541
sizeof(struct iwl4965_rx_phy_res) +
3542
rx_start->cfg_phy_cnt + len);
3545
struct iwl4965_rx_mpdu_res_start *amsdu =
3546
(struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3548
hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3549
sizeof(struct iwl4965_rx_mpdu_res_start));
3550
len = le16_to_cpu(amsdu->byte_count);
3551
rx_start->byte_count = amsdu->byte_count;
3552
rx_end = (__le32 *) (((u8 *) hdr) + len);
3554
if (len > IWL_RX_BUF_SIZE || len < 16) {
3555
IWL_WARNING("byte count out of range [16,4K]"
3560
ampdu_status = le32_to_cpu(*rx_end);
3561
skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3563
/* start from MAC */
3564
skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3565
skb_put(rxb->skb, len); /* end where data ends */
3567
/* We only process data packets if the interface is open */
3568
if (unlikely(!priv->is_open)) {
3569
IWL_DEBUG_DROP_LIMIT
3570
("Dropping packet while interface is not open.\n");
3574
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
3575
if (iwl4965_param_hwcrypto)
3576
iwl4965_set_decrypted_flag(priv, rxb->skb,
3577
ampdu_status, stats);
3578
iwl4965_handle_data_packet_monitor(priv, rxb, hdr, len, stats, 0);
3583
hdr = (struct ieee80211_hdr *)rxb->skb->data;
3585
if (iwl4965_param_hwcrypto)
3586
iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3588
iwlwifi_ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3589
priv->alloc_rxb_skb--;
3591
#ifdef CONFIG_IWL4965_LEDS
3593
priv->rxtxpackets += len;
3597
/* Calc max signal level (dBm) among 3 possible receivers */
3598
static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3600
/* data from PHY/DSP regarding signal strength, etc.,
3601
* contents are always there, not configurable by host. */
3602
struct iwl4965_rx_non_cfg_phy *ncphy =
3603
(struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3604
u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3607
u32 valid_antennae =
3608
(le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3609
>> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3613
/* Find max rssi among 3 possible receivers.
3614
* These values are measured by the digital signal processor (DSP).
3615
* They should stay fairly constant even as the signal strength varies,
3616
* if the radio's automatic gain control (AGC) is working right.
3617
* AGC value (see below) will provide the "interesting" info. */
3618
for (i = 0; i < 3; i++)
3619
if (valid_antennae & (1 << i))
3620
max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3622
IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3623
ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3626
/* dBm = max_rssi dB - agc dB - constant.
3627
* Higher AGC (higher radio gain) means lower signal. */
3628
return (max_rssi - agc - IWL_RSSI_OFFSET);
3631
#ifdef CONFIG_IWL4965_HT
3633
/* Parsed Information Elements */
3634
struct ieee802_11_elems {
3644
u8 ht_cap_param_len;
3646
u8 ht_extra_param_len;
3649
static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems)
3655
memset(elems, 0, sizeof(*elems));
3668
case WLAN_EID_DS_PARAMS:
3669
elems->ds_params = pos;
3670
elems->ds_params_len = elen;
3674
elems->tim_len = elen;
3676
case WLAN_EID_IBSS_PARAMS:
3677
elems->ibss_params = pos;
3678
elems->ibss_params_len = elen;
3680
case WLAN_EID_ERP_INFO:
3681
elems->erp_info = pos;
3682
elems->erp_info_len = elen;
3684
case WLAN_EID_HT_CAPABILITY:
3685
elems->ht_cap_param = pos;
3686
elems->ht_cap_param_len = elen;
3688
case WLAN_EID_HT_EXTRA_INFO:
3689
elems->ht_extra_param = pos;
3690
elems->ht_extra_param_len = elen;
3703
#endif /* CONFIG_IWL4965_HT */
3705
static void iwl4965_sta_modify_ps_wake(struct iwl4965_priv *priv, int sta_id)
3707
unsigned long flags;
3709
spin_lock_irqsave(&priv->sta_lock, flags);
3710
priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3711
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3712
priv->stations[sta_id].sta.sta.modify_mask = 0;
3713
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3714
spin_unlock_irqrestore(&priv->sta_lock, flags);
3716
iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3719
static void iwl4965_update_ps_mode(struct iwl4965_priv *priv, u16 ps_bit, u8 *addr)
3721
/* FIXME: need locking over ps_status ??? */
3722
u8 sta_id = iwl4965_hw_find_station(priv, addr);
3724
if (sta_id != IWL_INVALID_STATION) {
3725
u8 sta_awake = priv->stations[sta_id].
3726
ps_status == STA_PS_STATUS_WAKE;
3728
if (sta_awake && ps_bit)
3729
priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3730
else if (!sta_awake && !ps_bit) {
3731
iwl4965_sta_modify_ps_wake(priv, sta_id);
3732
priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3737
#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
3739
/* Called for REPLY_4965_RX (legacy ABG frames), or
3740
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3741
static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3742
struct iwl4965_rx_mem_buffer *rxb)
3744
struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3745
/* Use phy data (Rx signal strength, etc.) contained within
3746
* this rx packet for legacy frames,
3747
* or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3748
int include_phy = (pkt->hdr.cmd == REPLY_4965_RX);
3749
struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3750
(struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3751
(struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3753
unsigned int len = 0;
3754
struct ieee80211_hdr *header;
3756
struct ieee80211_rx_status stats = {
3757
.mactime = le64_to_cpu(rx_start->timestamp),
3758
.channel = le16_to_cpu(rx_start->channel),
3760
(rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3761
MODE_IEEE80211G : MODE_IEEE80211A,
3763
.rate = iwl4965_hw_get_rate(rx_start->rate_n_flags),
3765
#ifdef CONFIG_IWL4965_HT_AGG
3767
#endif /* CONFIG_IWL4965_HT_AGG */
3771
if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3773
("dsp size out of range [0,20]: "
3774
"%d/n", rx_start->cfg_phy_cnt);
3778
if (priv->last_phy_res[0])
3779
rx_start = (struct iwl4965_rx_phy_res *)
3780
&priv->last_phy_res[1];
3786
IWL_ERROR("MPDU frame without a PHY data\n");
3791
header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3792
+ rx_start->cfg_phy_cnt);
3794
len = le16_to_cpu(rx_start->byte_count);
3795
rx_end = (__le32 *) (pkt->u.raw + rx_start->cfg_phy_cnt +
3796
sizeof(struct iwl4965_rx_phy_res) + len);
3798
struct iwl4965_rx_mpdu_res_start *amsdu =
3799
(struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3801
header = (void *)(pkt->u.raw +
3802
sizeof(struct iwl4965_rx_mpdu_res_start));
3803
len = le16_to_cpu(amsdu->byte_count);
3804
rx_end = (__le32 *) (pkt->u.raw +
3805
sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3808
if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3809
!(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3810
IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3811
le32_to_cpu(*rx_end));
3815
priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3817
stats.freq = ieee80211chan2mhz(stats.channel);
3819
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
3820
stats.ssi = iwl4965_calc_rssi(rx_start);
3822
/* Meaningful noise values are available only from beacon statistics,
3823
* which are gathered only when associated, and indicate noise
3824
* only for the associated network channel ...
3825
* Ignore these noise values while scanning (other channels) */
3826
if (iwl4965_is_associated(priv) &&
3827
!test_bit(STATUS_SCANNING, &priv->status)) {
3828
stats.noise = priv->last_rx_noise;
3829
stats.signal = iwl4965_calc_sig_qual(stats.ssi, stats.noise);
3831
stats.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3832
stats.signal = iwl4965_calc_sig_qual(stats.ssi, 0);
3835
/* Reset beacon noise level if not associated. */
3836
if (!iwl4965_is_associated(priv))
3837
priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3839
#ifdef CONFIG_IWL4965_DEBUG
3840
/* TODO: Parts of iwl4965_report_frame are broken for 4965 */
3841
if (iwl4965_debug_level & (IWL_DL_RX))
3842
/* Set "1" to report good data frames in groups of 100 */
3843
iwl4965_report_frame(priv, pkt, header, 1);
3845
if (iwl4965_debug_level & (IWL_DL_RX | IWL_DL_STATS))
3846
IWL_DEBUG_RX("Rssi %d, noise %d, qual %d, TSF %lu\n",
3847
stats.ssi, stats.noise, stats.signal,
3848
(long unsigned int)le64_to_cpu(rx_start->timestamp));
3851
network_packet = iwl4965_is_network_packet(priv, header);
3852
if (network_packet) {
3853
priv->last_rx_rssi = stats.ssi;
3854
priv->last_beacon_time = priv->ucode_beacon_time;
3855
priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3858
fc = le16_to_cpu(header->frame_control);
3859
switch (fc & IEEE80211_FCTL_FTYPE) {
3860
case IEEE80211_FTYPE_MGMT:
3862
if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3863
iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3865
switch (fc & IEEE80211_FCTL_STYPE) {
3866
case IEEE80211_STYPE_PROBE_RESP:
3867
case IEEE80211_STYPE_BEACON:
3868
if ((priv->iw_mode == IEEE80211_IF_TYPE_STA &&
3869
!compare_ether_addr(header->addr2, priv->bssid)) ||
3870
(priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
3871
!compare_ether_addr(header->addr3, priv->bssid))) {
3872
struct ieee80211_mgmt *mgmt =
3873
(struct ieee80211_mgmt *)header;
3875
le64_to_cpu(mgmt->u.beacon.timestamp);
3877
priv->timestamp0 = timestamp & 0xFFFFFFFF;
3879
(timestamp >> 32) & 0xFFFFFFFF;
3880
priv->beacon_int = le16_to_cpu(
3881
mgmt->u.beacon.beacon_int);
3882
if (priv->call_post_assoc_from_beacon &&
3883
(priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
3884
priv->call_post_assoc_from_beacon = 0;
3885
queue_work(priv->workqueue,
3886
&priv->post_associate.work);
3891
case IEEE80211_STYPE_ACTION:
3895
* TODO: There is no callback function from upper
3896
* stack to inform us when associated status. this
3897
* work around to sniff assoc_resp management frame
3898
* and finish the association process.
3900
case IEEE80211_STYPE_ASSOC_RESP:
3901
case IEEE80211_STYPE_REASSOC_RESP:
3902
if (network_packet) {
3903
#ifdef CONFIG_IWL4965_HT
3905
struct ieee802_11_elems elems;
3906
#endif /*CONFIG_IWL4965_HT */
3907
struct ieee80211_mgmt *mgnt =
3908
(struct ieee80211_mgmt *)header;
3910
/* We have just associated, give some
3911
* time for the 4-way handshake if
3912
* any. Don't start scan too early. */
3913
priv->next_scan_jiffies = jiffies +
3914
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
3916
priv->assoc_id = (~((1 << 15) | (1 << 14))
3917
& le16_to_cpu(mgnt->u.assoc_resp.aid));
3918
priv->assoc_capability =
3920
mgnt->u.assoc_resp.capab_info);
3921
#ifdef CONFIG_IWL4965_HT
3922
pos = mgnt->u.assoc_resp.variable;
3923
if (!parse_elems(pos,
3924
len - (pos - (u8 *) mgnt),
3926
if (elems.ht_extra_param &&
3930
#endif /*CONFIG_IWL4965_HT */
3931
/* assoc_id is 0 no association */
3932
if (!priv->assoc_id)
3934
if (priv->beacon_int)
3935
queue_work(priv->workqueue,
3936
&priv->post_associate.work);
3938
priv->call_post_assoc_from_beacon = 1;
3943
case IEEE80211_STYPE_PROBE_REQ:
3944
if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
3945
!iwl4965_is_associated(priv)) {
3946
IWL_DEBUG_DROP("Dropping (non network): "
3947
MAC_FMT ", " MAC_FMT ", "
3949
MAC_ARG(header->addr1),
3950
MAC_ARG(header->addr2),
3951
MAC_ARG(header->addr3));
3955
iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &stats);
3958
case IEEE80211_FTYPE_CTL:
3959
#ifdef CONFIG_IWL4965_HT_AGG
3960
switch (fc & IEEE80211_FCTL_STYPE) {
3961
case IEEE80211_STYPE_BACK_REQ:
3962
IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3963
iwl4965_handle_data_packet(priv, 0, include_phy,
3973
case IEEE80211_FTYPE_DATA:
3974
if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3975
iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3978
if (unlikely(!network_packet))
3979
IWL_DEBUG_DROP("Dropping (non network): "
3980
MAC_FMT ", " MAC_FMT ", "
3982
MAC_ARG(header->addr1),
3983
MAC_ARG(header->addr2),
3984
MAC_ARG(header->addr3));
3985
else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
3986
IWL_DEBUG_DROP("Dropping (dup): " MAC_FMT ", "
3987
MAC_FMT ", " MAC_FMT "\n",
3988
MAC_ARG(header->addr1),
3989
MAC_ARG(header->addr2),
3990
MAC_ARG(header->addr3));
3992
iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
4001
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4002
* This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4003
static void iwl4965_rx_reply_rx_phy(struct iwl4965_priv *priv,
4004
struct iwl4965_rx_mem_buffer *rxb)
4006
struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4007
priv->last_phy_res[0] = 1;
4008
memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4009
sizeof(struct iwl4965_rx_phy_res));
4012
static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4013
struct iwl4965_rx_mem_buffer *rxb)
4016
#ifdef CONFIG_IWL4965_SENSITIVITY
4017
struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4018
struct iwl4965_missed_beacon_notif *missed_beacon;
4020
missed_beacon = &pkt->u.missed_beacon;
4021
if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4022
IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4023
le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4024
le32_to_cpu(missed_beacon->total_missed_becons),
4025
le32_to_cpu(missed_beacon->num_recvd_beacons),
4026
le32_to_cpu(missed_beacon->num_expected_beacons));
4027
priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4028
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4029
queue_work(priv->workqueue, &priv->sensitivity_work);
4031
#endif /*CONFIG_IWL4965_SENSITIVITY*/
4034
#ifdef CONFIG_IWL4965_HT
4035
#ifdef CONFIG_IWL4965_HT_AGG
4038
* iwl4965_set_tx_status - Update driver's record of one Tx frame's status
4040
* This will get sent to mac80211.
4042
static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx,
4043
u32 status, u32 retry_count, u32 rate)
4045
struct iwlwifi_ieee80211_tx_status *tx_status =
4046
&(priv->txq[txq_id].txb[idx].status);
4048
tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0;
4049
tx_status->retry_count += retry_count;
4050
tx_status->control.tx_rate = rate;
4055
* iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4057
static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4058
int sta_id, int tid)
4060
unsigned long flags;
4062
/* Remove "disable" flag, to enable Tx for this TID */
4063
spin_lock_irqsave(&priv->sta_lock, flags);
4064
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4065
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4066
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4067
spin_unlock_irqrestore(&priv->sta_lock, flags);
4069
iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4074
* iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4076
* Go through block-ack's bitmap of ACK'd frames, update driver's record of
4077
* ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4079
static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4080
struct iwl4965_ht_agg *agg,
4081
struct iwl4965_compressed_ba_resp*
4086
u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl);
4087
u32 bitmap0, bitmap1;
4088
u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0);
4089
u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1);
4091
if (unlikely(!agg->wait_for_ba)) {
4092
IWL_ERROR("Received BA when not expected\n");
4096
/* Mark that the expected block-ack response arrived */
4097
agg->wait_for_ba = 0;
4098
IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl);
4100
/* Calculate shift to align block-ack bits with our Tx window bits */
4101
sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl>>4);
4102
if (sh < 0) /* tbw something is wrong with indices */
4105
/* don't use 64-bit values for now */
4106
bitmap0 = resp_bitmap0 >> sh;
4107
bitmap1 = resp_bitmap1 >> sh;
4108
bitmap0 |= (resp_bitmap1 & ((1<<sh)|((1<<sh)-1))) << (32 - sh);
4110
if (agg->frame_count > (64 - sh)) {
4111
IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4115
/* check for success or failure according to the
4116
* transmitted bitmap and block-ack bitmap */
4117
bitmap0 &= agg->bitmap0;
4118
bitmap1 &= agg->bitmap1;
4120
/* For each frame attempted in aggregation,
4121
* update driver's record of tx frame's status. */
4122
for (i = 0; i < agg->frame_count ; i++) {
4123
int idx = (agg->start_idx + i) & 0xff;
4124
ack = bitmap0 & (1 << i);
4125
IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4126
ack? "ACK":"NACK", i, idx, agg->start_idx + i);
4127
iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0,
4132
IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1);
4138
* iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4139
* @index -- current index
4140
* @n_bd -- total number of entries in queue (s/b power of 2)
4142
static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4144
return (index == 0) ? n_bd - 1 : index - 1;
4148
* iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4150
* Handles block-acknowledge notification from device, which reports success
4151
* of frames sent via aggregation.
4153
static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4154
struct iwl4965_rx_mem_buffer *rxb)
4156
struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4157
struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4159
struct iwl4965_tx_queue *txq = NULL;
4160
struct iwl4965_ht_agg *agg;
4162
/* "flow" corresponds to Tx queue */
4163
u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow);
4165
/* "ssn" is start of block-ack Tx window, corresponds to index
4166
* (in Tx queue's circular buffer) of first TFD/frame in window */
4167
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4169
if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) {
4170
IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4174
txq = &priv->txq[ba_resp_scd_flow];
4175
agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4177
/* Find index just before block-ack window */
4178
index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4180
/* TODO: Need to get this copy more safely - now good for debug */
4182
IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from " MAC_FMT ",
4185
MAC_ARG((u8*) &ba_resp->sta_addr_lo32),
4187
IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = "
4188
"%d, scd_ssn = %d\n",
4190
ba_resp->ba_seq_ctl,
4191
ba_resp->ba_bitmap1,
4192
ba_resp->ba_bitmap0,
4195
IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n",
4201
/* Update driver's record of ACK vs. not for each frame in window */
4202
iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4204
/* Release all TFDs before the SSN, i.e. all TFDs in front of
4205
* block-ack window (we assume that they've been successfully
4206
* transmitted ... if not, it's too late anyway). */
4207
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff))
4208
iwl4965_tx_queue_reclaim(priv, ba_resp_scd_flow, index);
4214
* iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4216
static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv, u16 txq_id)
4218
/* Simply stop the queue, but don't change any configuration;
4219
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4220
iwl4965_write_prph(priv,
4221
KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4222
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4223
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4227
* iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4229
static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4236
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4238
tbl_dw_addr = priv->scd_base_addr +
4239
SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4241
tbl_dw = iwl4965_read_targ_mem(priv, tbl_dw_addr);
4244
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4246
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4248
iwl4965_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
4254
* iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4256
* NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4257
* i.e. it must be one of the higher queues used for aggregation
4259
static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4260
int tx_fifo, int sta_id, int tid,
4263
unsigned long flags;
4267
if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4268
IWL_WARNING("queue number too small: %d, must be > %d\n",
4269
txq_id, IWL_BACK_QUEUE_FIRST_ID);
4271
ra_tid = BUILD_RAxTID(sta_id, tid);
4273
/* Modify device's station table to Tx this TID */
4274
iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
4276
spin_lock_irqsave(&priv->lock, flags);
4277
rc = iwl4965_grab_nic_access(priv);
4279
spin_unlock_irqrestore(&priv->lock, flags);
4283
/* Stop this Tx queue before configuring it */
4284
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4286
/* Map receiver-address / traffic-ID to this queue */
4287
iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4289
/* Set this queue as a chain-building queue */
4290
iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1<<txq_id));
4292
/* Place first TFD at index corresponding to start sequence number.
4293
* Assumes that ssn_idx is valid (!= 0xFFF) */
4294
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4295
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4296
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4298
/* Set up Tx window size and frame limit for this queue */
4299
iwl4965_write_targ_mem(priv,
4300
priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4301
(SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4302
SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4304
iwl4965_write_targ_mem(priv, priv->scd_base_addr +
4305
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4306
(SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4307
& SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4309
iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4311
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
4312
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4314
iwl4965_release_nic_access(priv);
4315
spin_unlock_irqrestore(&priv->lock, flags);
4321
* txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4323
static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4324
u16 ssn_idx, u8 tx_fifo)
4326
unsigned long flags;
4329
if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4330
IWL_WARNING("queue number too small: %d, must be > %d\n",
4331
txq_id, IWL_BACK_QUEUE_FIRST_ID);
4335
spin_lock_irqsave(&priv->lock, flags);
4336
rc = iwl4965_grab_nic_access(priv);
4338
spin_unlock_irqrestore(&priv->lock, flags);
4342
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4344
iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4346
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4347
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4348
/* supposes that ssn_idx is valid (!= 0xFFF) */
4349
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4351
iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4352
iwl4965_txq_ctx_deactivate(priv, txq_id);
4353
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4355
iwl4965_release_nic_access(priv);
4356
spin_unlock_irqrestore(&priv->lock, flags);
4361
#endif/* CONFIG_IWL4965_HT_AGG */
4362
#endif /* CONFIG_IWL4965_HT */
4365
* iwl4965_add_station - Initialize a station's hardware rate table
4367
* The uCode's station table contains a table of fallback rates
4368
* for automatic fallback during transmission.
4370
* NOTE: This sets up a default set of values. These will be replaced later
4371
* if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4374
* NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4375
* calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4376
* which requires station table entry to exist).
4378
void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4381
struct iwl4965_link_quality_cmd link_cmd = {
4386
/* Set up the rate scaling to start at selected rate, fall back
4387
* all the way down to 1M in IEEE order, and then spin on 1M */
4389
r = IWL_RATE_54M_INDEX;
4390
else if (priv->phymode == MODE_IEEE80211A)
4391
r = IWL_RATE_6M_INDEX;
4393
r = IWL_RATE_1M_INDEX;
4395
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4397
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4398
rate_flags |= RATE_MCS_CCK_MSK;
4400
/* Use Tx antenna B only */
4401
rate_flags |= RATE_MCS_ANT_B_MSK;
4402
rate_flags &= ~RATE_MCS_ANT_A_MSK;
4404
link_cmd.rs_table[i].rate_n_flags =
4405
iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4406
r = iwl4965_get_prev_ieee_rate(r);
4409
link_cmd.general_params.single_stream_ant_msk = 2;
4410
link_cmd.general_params.dual_stream_ant_msk = 3;
4411
link_cmd.agg_params.agg_dis_start_th = 3;
4412
link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4414
/* Update the rate scaling for control frame Tx to AP */
4415
link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID;
4417
iwl4965_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd),
4421
#ifdef CONFIG_IWL4965_HT
4423
static u8 iwl4965_is_channel_extension(struct iwl4965_priv *priv, int phymode,
4424
u16 channel, u8 extension_chan_offset)
4426
const struct iwl4965_channel_info *ch_info;
4428
ch_info = iwl4965_get_channel_info(priv, phymode, channel);
4429
if (!is_channel_valid(ch_info))
4432
if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO)
4435
if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4436
(ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4442
static u8 iwl4965_is_fat_tx_allowed(struct iwl4965_priv *priv,
4443
const struct sta_ht_info *ht_info)
4446
if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
4449
if (ht_info->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ)
4452
if (ht_info->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO)
4455
/* no fat tx allowed on 2.4GHZ */
4456
if (priv->phymode != MODE_IEEE80211A)
4458
return (iwl4965_is_channel_extension(priv, priv->phymode,
4459
ht_info->control_channel,
4460
ht_info->extension_chan_offset));
4463
void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct sta_ht_info *ht_info)
4465
struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
4468
if (!ht_info->is_ht)
4471
/* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
4472
if (iwl4965_is_fat_tx_allowed(priv, ht_info))
4473
rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4475
rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4476
RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4478
if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4479
IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4480
le16_to_cpu(rxon->channel),
4481
ht_info->control_channel);
4482
rxon->channel = cpu_to_le16(ht_info->control_channel);
4486
/* Note: control channel is opposite of extension channel */
4487
switch (ht_info->extension_chan_offset) {
4488
case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4489
rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4491
case IWL_EXT_CHANNEL_OFFSET_BELOW:
4492
rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4494
case IWL_EXT_CHANNEL_OFFSET_AUTO:
4495
rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4498
rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4502
val = ht_info->operating_mode;
4504
rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4506
priv->active_rate_ht[0] = ht_info->supp_rates[0];
4507
priv->active_rate_ht[1] = ht_info->supp_rates[1];
4508
iwl4965_set_rxon_chain(priv);
4510
IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4511
"rxon flags 0x%X operation mode :0x%X "
4512
"extension channel offset 0x%x "
4513
"control chan %d\n",
4514
priv->active_rate_ht[0], priv->active_rate_ht[1],
4515
le32_to_cpu(rxon->flags), ht_info->operating_mode,
4516
ht_info->extension_chan_offset,
4517
ht_info->control_channel);
4521
void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index)
4524
struct sta_ht_info *ht_info = &priv->current_assoc_ht;
4526
priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ;
4527
if (!ht_info->is_ht)
4530
sta_flags = priv->stations[index].sta.station_flags;
4532
if (ht_info->tx_mimo_ps_mode == IWL_MIMO_PS_DYNAMIC)
4533
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4535
sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK;
4537
sta_flags |= cpu_to_le32(
4538
(u32)ht_info->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4540
sta_flags |= cpu_to_le32(
4541
(u32)ht_info->mpdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4543
sta_flags &= (~STA_FLG_FAT_EN_MSK);
4544
ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_20MHZ;
4545
ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_20MHZ;
4547
if (iwl4965_is_fat_tx_allowed(priv, ht_info)) {
4548
sta_flags |= STA_FLG_FAT_EN_MSK;
4549
ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_40MHZ;
4550
if (ht_info->supported_chan_width == IWL_CHANNEL_WIDTH_40MHZ)
4551
ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_40MHZ;
4553
priv->current_channel_width = ht_info->tx_chan_width;
4554
priv->stations[index].sta.station_flags = sta_flags;
4559
#ifdef CONFIG_IWL4965_HT_AGG
4561
static void iwl4965_sta_modify_add_ba_tid(struct iwl4965_priv *priv,
4562
int sta_id, int tid, u16 ssn)
4564
unsigned long flags;
4566
spin_lock_irqsave(&priv->sta_lock, flags);
4567
priv->stations[sta_id].sta.station_flags_msk = 0;
4568
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4569
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4570
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4571
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4572
spin_unlock_irqrestore(&priv->sta_lock, flags);
4574
iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4577
static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
4578
int sta_id, int tid)
4580
unsigned long flags;
4582
spin_lock_irqsave(&priv->sta_lock, flags);
4583
priv->stations[sta_id].sta.station_flags_msk = 0;
4584
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4585
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4586
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4587
spin_unlock_irqrestore(&priv->sta_lock, flags);
4589
iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4592
static const u16 default_tid_to_tx_fifo[] = {
4613
* Find first available (lowest unused) Tx Queue, mark it "active".
4614
* Called only when finding queue for aggregation.
4615
* Should never return anything < 7, because they should already
4616
* be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4618
static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
4622
for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4623
if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4628
int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid,
4632
struct iwl4965_priv *priv = hw->priv;
4637
unsigned long flags;
4638
struct iwl4965_tid_data *tid_data;
4640
/* Determine Tx DMA/FIFO channel for this Traffic ID */
4641
if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4642
tx_fifo = default_tid_to_tx_fifo[tid];
4646
IWL_WARNING("iwl-AGG iwl4965_mac_ht_tx_agg_start on da=" MAC_FMT
4647
" tid=%d\n", MAC_ARG(da), tid);
4649
/* Get index into station table */
4650
sta_id = iwl4965_hw_find_station(priv, da);
4651
if (sta_id == IWL_INVALID_STATION)
4654
/* Find available Tx queue for aggregation */
4655
txq_id = iwl4965_txq_ctx_activate_free(priv);
4659
spin_lock_irqsave(&priv->sta_lock, flags);
4660
tid_data = &priv->stations[sta_id].tid[tid];
4662
/* Get starting sequence number for 1st frame in block ack window.
4663
* We'll use least signif byte as 1st frame's index into Tx queue. */
4664
ssn = SEQ_TO_SN(tid_data->seq_number);
4665
tid_data->agg.txq_id = txq_id;
4666
spin_unlock_irqrestore(&priv->sta_lock, flags);
4668
*start_seq_num = ssn;
4670
/* Update driver's link quality manager */
4671
iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE);
4673
/* Set up and enable aggregation for selected Tx queue and FIFO */
4674
return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4679
int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid,
4683
struct iwl4965_priv *priv = hw->priv;
4684
int tx_fifo_id, txq_id, sta_id, ssn = -1;
4685
struct iwl4965_tid_data *tid_data;
4688
IWL_ERROR("%s: da = NULL\n", __func__);
4692
if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4693
tx_fifo_id = default_tid_to_tx_fifo[tid];
4697
sta_id = iwl4965_hw_find_station(priv, da);
4699
if (sta_id == IWL_INVALID_STATION)
4702
tid_data = &priv->stations[sta_id].tid[tid];
4703
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4704
txq_id = tid_data->agg.txq_id;
4706
rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4707
/* FIXME: need more safe way to handle error condition */
4711
iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA);
4712
IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=" MAC_FMT " tid=%d\n",
4718
int iwl4965_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da,
4719
u16 tid, u16 start_seq_num)
4721
struct iwl4965_priv *priv = hw->priv;
4724
IWL_WARNING("iwl-AGG iwl4965_mac_ht_rx_agg_start on da=" MAC_FMT
4725
" tid=%d\n", MAC_ARG(da), tid);
4726
sta_id = iwl4965_hw_find_station(priv, da);
4727
iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, start_seq_num);
4731
int iwl4965_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da,
4732
u16 tid, int generator)
4734
struct iwl4965_priv *priv = hw->priv;
4737
IWL_WARNING("iwl-AGG iwl4965_mac_ht_rx_agg_stop on da=" MAC_FMT " tid=%d\n",
4739
sta_id = iwl4965_hw_find_station(priv, da);
4740
iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4744
#endif /* CONFIG_IWL4965_HT_AGG */
4745
#endif /* CONFIG_IWL4965_HT */
4747
/* Set up 4965-specific Rx frame reply handlers */
4748
void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
4750
/* Legacy Rx frames */
4751
priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx;
4753
/* High-throughput (HT) Rx frames */
4754
priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4755
priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4757
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4758
iwl4965_rx_missed_beacon_notif;
4760
#ifdef CONFIG_IWL4965_HT
4761
#ifdef CONFIG_IWL4965_HT_AGG
4762
priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4763
#endif /* CONFIG_IWL4965_HT_AGG */
4764
#endif /* CONFIG_IWL4965_HT */
4767
void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv)
4769
INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4770
INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
4771
#ifdef CONFIG_IWL4965_SENSITIVITY
4772
INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4774
#ifdef CONFIG_IWL4965_HT
4775
#ifdef CONFIG_IWL4965_HT_AGG
4776
INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work);
4777
#endif /* CONFIG_IWL4965_HT_AGG */
4778
#endif /* CONFIG_IWL4965_HT */
4779
init_timer(&priv->statistics_periodic);
4780
priv->statistics_periodic.data = (unsigned long)priv;
4781
priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4784
void iwl4965_hw_cancel_deferred_work(struct iwl4965_priv *priv)
4786
del_timer_sync(&priv->statistics_periodic);
4788
cancel_delayed_work(&priv->init_alive_start);
4791
struct pci_device_id iwl4965_hw_card_ids[] = {
4792
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4229)},
4793
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4230)},
4798
* The device's EEPROM semaphore prevents conflicts between driver and uCode
4799
* when accessing the EEPROM; each access is a series of pulses to/from the
4800
* EEPROM chip, not a single event, so even reads could conflict if they
4801
* weren't arbitrated by the semaphore.
4803
int iwl4965_eeprom_acquire_semaphore(struct iwl4965_priv *priv)
4808
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
4809
/* Request semaphore */
4810
iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG,
4811
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
4813
/* See if we got it */
4814
rc = iwl4965_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
4815
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4816
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4817
EEPROM_SEM_TIMEOUT);
4819
IWL_DEBUG_IO("Acquired semaphore after %d tries.\n",
4828
inline void iwl4965_eeprom_release_semaphore(struct iwl4965_priv *priv)
4830
iwl4965_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
4831
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
4835
MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);