2
* Copyright (c) 2008-2009 Atheros Communications Inc.
4
* Permission to use, copy, modify, and/or distribute this software for any
5
* purpose with or without fee is hereby granted, provided that the above
6
* copyright notice and this permission notice appear in all copies.
8
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
#include <linux/slab.h>
21
struct ath9k_vif_iter_data {
26
static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
28
struct ath9k_vif_iter_data *iter_data = data;
31
for (i = 0; i < ETH_ALEN; i++)
32
iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
35
void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
37
struct ath_wiphy *aphy = hw->priv;
38
struct ath_softc *sc = aphy->sc;
39
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
40
struct ath9k_vif_iter_data iter_data;
44
* Use the hardware MAC address as reference, the hardware uses it
45
* together with the BSSID mask when matching addresses.
47
iter_data.hw_macaddr = common->macaddr;
48
memset(&iter_data.mask, 0xff, ETH_ALEN);
51
ath9k_vif_iter(&iter_data, vif->addr, vif);
53
/* Get list of all active MAC addresses */
54
spin_lock_bh(&sc->wiphy_lock);
55
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
57
for (i = 0; i < sc->num_sec_wiphy; i++) {
58
if (sc->sec_wiphy[i] == NULL)
60
ieee80211_iterate_active_interfaces_atomic(
61
sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
63
spin_unlock_bh(&sc->wiphy_lock);
65
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
66
ath_hw_setbssidmask(common);
69
int ath9k_wiphy_add(struct ath_softc *sc)
72
struct ath_wiphy *aphy;
73
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
74
struct ieee80211_hw *hw;
77
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
81
spin_lock_bh(&sc->wiphy_lock);
82
for (i = 0; i < sc->num_sec_wiphy; i++) {
83
if (sc->sec_wiphy[i] == NULL)
87
if (i == sc->num_sec_wiphy) {
88
/* No empty slot available; increase array length */
90
n = krealloc(sc->sec_wiphy,
91
(sc->num_sec_wiphy + 1) *
92
sizeof(struct ath_wiphy *),
95
spin_unlock_bh(&sc->wiphy_lock);
96
ieee80211_free_hw(hw);
104
SET_IEEE80211_DEV(hw, sc->dev);
109
sc->sec_wiphy[i] = aphy;
110
aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
111
spin_unlock_bh(&sc->wiphy_lock);
113
memcpy(addr, common->macaddr, ETH_ALEN);
114
addr[0] |= 0x02; /* Locally managed address */
116
* XOR virtual wiphy index into the least significant bits to generate
117
* a different MAC address for each virtual wiphy.
120
addr[4] ^= (i & 0xff00) >> 8;
121
addr[3] ^= (i & 0xff0000) >> 16;
123
SET_IEEE80211_PERM_ADDR(hw, addr);
125
ath9k_set_hw_capab(sc, hw);
127
error = ieee80211_register_hw(hw);
130
/* Make sure wiphy scheduler is started (if enabled) */
131
ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
137
int ath9k_wiphy_del(struct ath_wiphy *aphy)
139
struct ath_softc *sc = aphy->sc;
142
spin_lock_bh(&sc->wiphy_lock);
143
for (i = 0; i < sc->num_sec_wiphy; i++) {
144
if (aphy == sc->sec_wiphy[i]) {
145
sc->sec_wiphy[i] = NULL;
146
spin_unlock_bh(&sc->wiphy_lock);
147
ieee80211_unregister_hw(aphy->hw);
148
ieee80211_free_hw(aphy->hw);
152
spin_unlock_bh(&sc->wiphy_lock);
156
static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
157
struct ieee80211_vif *vif, const u8 *bssid,
160
struct ath_softc *sc = aphy->sc;
161
struct ath_tx_control txctl;
163
struct ieee80211_hdr *hdr;
165
struct ieee80211_tx_info *info;
167
skb = dev_alloc_skb(24);
170
hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
172
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
173
IEEE80211_FCTL_TODS);
175
fc |= cpu_to_le16(IEEE80211_FCTL_PM);
176
hdr->frame_control = fc;
177
memcpy(hdr->addr1, bssid, ETH_ALEN);
178
memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
179
memcpy(hdr->addr3, bssid, ETH_ALEN);
181
info = IEEE80211_SKB_CB(skb);
182
memset(info, 0, sizeof(*info));
183
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
184
info->control.vif = vif;
185
info->control.rates[0].idx = 0;
186
info->control.rates[0].count = 4;
187
info->control.rates[1].idx = -1;
189
memset(&txctl, 0, sizeof(struct ath_tx_control));
190
txctl.txq = sc->tx.txq_map[WME_AC_VO];
191
txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
193
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
198
dev_kfree_skb_any(skb);
202
static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
205
if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
207
for (i = 0; i < sc->num_sec_wiphy; i++) {
208
if (sc->sec_wiphy[i] &&
209
sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
215
static bool ath9k_wiphy_pausing(struct ath_softc *sc)
218
spin_lock_bh(&sc->wiphy_lock);
219
ret = __ath9k_wiphy_pausing(sc);
220
spin_unlock_bh(&sc->wiphy_lock);
224
static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
227
if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
229
for (i = 0; i < sc->num_sec_wiphy; i++) {
230
if (sc->sec_wiphy[i] &&
231
sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
237
bool ath9k_wiphy_scanning(struct ath_softc *sc)
240
spin_lock_bh(&sc->wiphy_lock);
241
ret = __ath9k_wiphy_scanning(sc);
242
spin_unlock_bh(&sc->wiphy_lock);
246
static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
248
/* caller must hold wiphy_lock */
249
static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
253
if (aphy->chan_idx != aphy->sc->chan_idx)
254
return; /* wiphy not on the selected channel */
255
__ath9k_wiphy_unpause(aphy);
258
static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
261
spin_lock_bh(&sc->wiphy_lock);
262
__ath9k_wiphy_unpause_ch(sc->pri_wiphy);
263
for (i = 0; i < sc->num_sec_wiphy; i++)
264
__ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
265
spin_unlock_bh(&sc->wiphy_lock);
268
void ath9k_wiphy_chan_work(struct work_struct *work)
270
struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
271
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
272
struct ath_wiphy *aphy = sc->next_wiphy;
278
* All pending interfaces paused; ready to change
282
/* Change channels */
283
mutex_lock(&sc->mutex);
284
/* XXX: remove me eventually */
285
ath9k_update_ichannel(sc, aphy->hw,
286
&sc->sc_ah->channels[sc->chan_idx]);
288
/* sync hw configuration for hw code */
289
common->hw = aphy->hw;
291
if (ath_set_channel(sc, aphy->hw,
292
&sc->sc_ah->channels[sc->chan_idx]) < 0) {
293
printk(KERN_DEBUG "ath9k: Failed to set channel for new "
295
mutex_unlock(&sc->mutex);
298
mutex_unlock(&sc->mutex);
300
ath9k_wiphy_unpause_channel(sc);
304
* ath9k version of ieee80211_tx_status() for TX frames that are generated
305
* internally in the driver.
307
void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
309
struct ath_wiphy *aphy = hw->priv;
310
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
312
if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
313
if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
314
printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
315
"frame\n", wiphy_name(hw->wiphy));
317
* The AP did not reply; ignore this to allow us to
321
aphy->state = ATH_WIPHY_PAUSED;
322
if (!ath9k_wiphy_pausing(aphy->sc)) {
324
* Drop from tasklet to work to allow mutex for channel
327
ieee80211_queue_work(aphy->sc->hw,
328
&aphy->sc->chan_work);
335
static void ath9k_mark_paused(struct ath_wiphy *aphy)
337
struct ath_softc *sc = aphy->sc;
338
aphy->state = ATH_WIPHY_PAUSED;
339
if (!__ath9k_wiphy_pausing(sc))
340
ieee80211_queue_work(sc->hw, &sc->chan_work);
343
static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
345
struct ath_wiphy *aphy = data;
346
struct ath_vif *avp = (void *) vif->drv_priv;
349
case NL80211_IFTYPE_STATION:
350
if (!vif->bss_conf.assoc) {
351
ath9k_mark_paused(aphy);
354
/* TODO: could avoid this if already in PS mode */
355
if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
356
printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
358
ath9k_mark_paused(aphy);
361
case NL80211_IFTYPE_AP:
362
/* Beacon transmission is paused by aphy->state change */
363
ath9k_mark_paused(aphy);
370
/* caller must hold wiphy_lock */
371
static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
373
ieee80211_stop_queues(aphy->hw);
374
aphy->state = ATH_WIPHY_PAUSING;
376
* TODO: handle PAUSING->PAUSED for the case where there are multiple
377
* active vifs (now we do it on the first vif getting ready; should be
380
ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
385
int ath9k_wiphy_pause(struct ath_wiphy *aphy)
388
spin_lock_bh(&aphy->sc->wiphy_lock);
389
ret = __ath9k_wiphy_pause(aphy);
390
spin_unlock_bh(&aphy->sc->wiphy_lock);
394
static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
396
struct ath_wiphy *aphy = data;
397
struct ath_vif *avp = (void *) vif->drv_priv;
400
case NL80211_IFTYPE_STATION:
401
if (!vif->bss_conf.assoc)
403
ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
405
case NL80211_IFTYPE_AP:
406
/* Beacon transmission is re-enabled by aphy->state change */
413
/* caller must hold wiphy_lock */
414
static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
416
ieee80211_iterate_active_interfaces_atomic(aphy->hw,
417
ath9k_unpause_iter, aphy);
418
aphy->state = ATH_WIPHY_ACTIVE;
419
ieee80211_wake_queues(aphy->hw);
423
int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
426
spin_lock_bh(&aphy->sc->wiphy_lock);
427
ret = __ath9k_wiphy_unpause(aphy);
428
spin_unlock_bh(&aphy->sc->wiphy_lock);
432
static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
435
if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
436
sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
437
for (i = 0; i < sc->num_sec_wiphy; i++) {
438
if (sc->sec_wiphy[i] &&
439
sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
440
sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
444
/* caller must hold wiphy_lock */
445
static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
448
if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
449
__ath9k_wiphy_pause(sc->pri_wiphy);
450
for (i = 0; i < sc->num_sec_wiphy; i++) {
451
if (sc->sec_wiphy[i] &&
452
sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
453
__ath9k_wiphy_pause(sc->sec_wiphy[i]);
457
int ath9k_wiphy_select(struct ath_wiphy *aphy)
459
struct ath_softc *sc = aphy->sc;
462
spin_lock_bh(&sc->wiphy_lock);
463
if (__ath9k_wiphy_scanning(sc)) {
465
* For now, we are using mac80211 sw scan and it expects to
466
* have full control over channel changes, so avoid wiphy
467
* scheduling during a scan. This could be optimized if the
468
* scanning control were moved into the driver.
470
spin_unlock_bh(&sc->wiphy_lock);
473
if (__ath9k_wiphy_pausing(sc)) {
474
if (sc->wiphy_select_failures == 0)
475
sc->wiphy_select_first_fail = jiffies;
476
sc->wiphy_select_failures++;
477
if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
479
printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
480
"out; disable/enable hw to recover\n");
481
__ath9k_wiphy_mark_all_paused(sc);
483
* TODO: this workaround to fix hardware is unlikely to
484
* be specific to virtual wiphy changes. It can happen
485
* on normal channel change, too, and as such, this
486
* should really be made more generic. For example,
487
* tricker radio disable/enable on GTT interrupt burst
488
* (say, 10 GTT interrupts received without any TX
489
* frame being completed)
491
spin_unlock_bh(&sc->wiphy_lock);
492
ath_radio_disable(sc, aphy->hw);
493
ath_radio_enable(sc, aphy->hw);
494
/* Only the primary wiphy hw is used for queuing work */
495
ieee80211_queue_work(aphy->sc->hw,
496
&aphy->sc->chan_work);
497
return -EBUSY; /* previous select still in progress */
499
spin_unlock_bh(&sc->wiphy_lock);
500
return -EBUSY; /* previous select still in progress */
502
sc->wiphy_select_failures = 0;
504
/* Store the new channel */
505
sc->chan_idx = aphy->chan_idx;
506
sc->chan_is_ht = aphy->chan_is_ht;
507
sc->next_wiphy = aphy;
509
__ath9k_wiphy_pause_all(sc);
510
now = !__ath9k_wiphy_pausing(aphy->sc);
511
spin_unlock_bh(&sc->wiphy_lock);
514
/* Ready to request channel change immediately */
515
ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
519
* wiphys will be unpaused in ath9k_tx_status() once channel has been
520
* changed if any wiphy needs time to become paused.
526
bool ath9k_wiphy_started(struct ath_softc *sc)
529
spin_lock_bh(&sc->wiphy_lock);
530
if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
531
spin_unlock_bh(&sc->wiphy_lock);
534
for (i = 0; i < sc->num_sec_wiphy; i++) {
535
if (sc->sec_wiphy[i] &&
536
sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
537
spin_unlock_bh(&sc->wiphy_lock);
541
spin_unlock_bh(&sc->wiphy_lock);
545
static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
546
struct ath_wiphy *selected)
548
if (selected->state == ATH_WIPHY_SCAN) {
549
if (aphy == selected)
552
* Pause all other wiphys for the duration of the scan even if
553
* they are on the current channel now.
555
} else if (aphy->chan_idx == selected->chan_idx)
557
aphy->state = ATH_WIPHY_PAUSED;
558
ieee80211_stop_queues(aphy->hw);
561
void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
562
struct ath_wiphy *selected)
565
spin_lock_bh(&sc->wiphy_lock);
566
if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
567
ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
568
for (i = 0; i < sc->num_sec_wiphy; i++) {
569
if (sc->sec_wiphy[i] &&
570
sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
571
ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
573
spin_unlock_bh(&sc->wiphy_lock);
576
void ath9k_wiphy_work(struct work_struct *work)
578
struct ath_softc *sc = container_of(work, struct ath_softc,
580
struct ath_wiphy *aphy = NULL;
583
spin_lock_bh(&sc->wiphy_lock);
585
if (sc->wiphy_scheduler_int == 0) {
586
/* wiphy scheduler is disabled */
587
spin_unlock_bh(&sc->wiphy_lock);
592
sc->wiphy_scheduler_index++;
593
while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
594
aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
595
if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
598
sc->wiphy_scheduler_index++;
602
sc->wiphy_scheduler_index = 0;
603
if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
608
/* No wiphy is ready to be scheduled */
610
aphy = sc->pri_wiphy;
613
spin_unlock_bh(&sc->wiphy_lock);
616
aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
617
ath9k_wiphy_select(aphy)) {
618
printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
622
ieee80211_queue_delayed_work(sc->hw,
624
sc->wiphy_scheduler_int);
627
void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
629
cancel_delayed_work_sync(&sc->wiphy_work);
630
sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
631
if (sc->wiphy_scheduler_int)
632
ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
633
sc->wiphy_scheduler_int);
636
/* caller must hold wiphy_lock */
637
bool ath9k_all_wiphys_idle(struct ath_softc *sc)
640
if (!sc->pri_wiphy->idle)
642
for (i = 0; i < sc->num_sec_wiphy; i++) {
643
struct ath_wiphy *aphy = sc->sec_wiphy[i];
652
/* caller must hold wiphy_lock */
653
void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
655
struct ath_softc *sc = aphy->sc;
658
ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
659
"Marking %s as %sidle\n",
660
wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
662
/* Only bother starting a queue on an active virtual wiphy */
663
bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
665
struct ieee80211_hw *hw = sc->pri_wiphy->hw;
667
bool txq_started = false;
669
spin_lock_bh(&sc->wiphy_lock);
671
/* Start the primary wiphy */
672
if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
673
ieee80211_wake_queue(hw, skb_queue);
678
/* Now start the secondary wiphy queues */
679
for (i = 0; i < sc->num_sec_wiphy; i++) {
680
struct ath_wiphy *aphy = sc->sec_wiphy[i];
683
if (aphy->state != ATH_WIPHY_ACTIVE)
687
ieee80211_wake_queue(hw, skb_queue);
693
spin_unlock_bh(&sc->wiphy_lock);
697
/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
698
void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
700
struct ieee80211_hw *hw = sc->pri_wiphy->hw;
703
spin_lock_bh(&sc->wiphy_lock);
705
/* Stop the primary wiphy */
706
ieee80211_stop_queue(hw, skb_queue);
708
/* Now stop the secondary wiphy queues */
709
for (i = 0; i < sc->num_sec_wiphy; i++) {
710
struct ath_wiphy *aphy = sc->sec_wiphy[i];
714
ieee80211_stop_queue(hw, skb_queue);
716
spin_unlock_bh(&sc->wiphy_lock);