2
* Atheros CARL9170 driver
4
* 802.11 xmit & status routines
6
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7
* Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License as published by
11
* the Free Software Foundation; either version 2 of the License, or
12
* (at your option) any later version.
14
* This program is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
* GNU General Public License for more details.
19
* You should have received a copy of the GNU General Public License
20
* along with this program; see the file COPYING. If not, see
21
* http://www.gnu.org/licenses/.
23
* This file incorporates work covered by the following copyright and
25
* Copyright (c) 2007-2008 Atheros Communications, Inc.
27
* Permission to use, copy, modify, and/or distribute this software for any
28
* purpose with or without fee is hereby granted, provided that the above
29
* copyright notice and this permission notice appear in all copies.
31
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40
#include <linux/init.h>
41
#include <linux/slab.h>
42
#include <linux/module.h>
43
#include <linux/etherdevice.h>
44
#include <net/mac80211.h>
49
static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
52
if (unlikely(modparam_noht)) {
56
* This is just another workaround, until
57
* someone figures out how to get QoS and
58
* AMPDU to play nicely together.
65
static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
68
return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
71
static bool is_mem_full(struct ar9170 *ar)
73
return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
74
atomic_read(&ar->mem_free_blocks));
77
static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
82
atomic_inc(&ar->tx_total_queued);
84
queue = skb_get_queue_mapping(skb);
85
spin_lock_bh(&ar->tx_stats_lock);
88
* The driver has to accept the frame, regardless if the queue is
89
* full to the brim, or not. We have to do the queuing internally,
90
* since mac80211 assumes that a driver which can operate with
91
* aggregated frames does not reject frames for this reason.
93
ar->tx_stats[queue].len++;
94
ar->tx_stats[queue].count++;
96
mem_full = is_mem_full(ar);
97
for (i = 0; i < ar->hw->queues; i++) {
98
if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
99
ieee80211_stop_queue(ar->hw, i);
100
ar->queue_stop_timeout[i] = jiffies;
104
spin_unlock_bh(&ar->tx_stats_lock);
107
/* needs rcu_read_lock */
108
static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
111
struct _carl9170_tx_superframe *super = (void *) skb->data;
112
struct ieee80211_hdr *hdr = (void *) super->frame_data;
113
struct ieee80211_vif *vif;
116
vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
117
CARL9170_TX_SUPER_MISC_VIF_ID_S;
119
if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
122
vif = rcu_dereference(ar->vif_priv[vif_id].vif);
127
* Normally we should use wrappers like ieee80211_get_DA to get
128
* the correct peer ieee80211_sta.
130
* But there is a problem with indirect traffic (broadcasts, or
131
* data which is designated for other stations) in station mode.
132
* The frame will be directed to the AP for distribution and not
133
* to the actual destination.
136
return ieee80211_find_sta(vif, hdr->addr1);
139
static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
141
struct ieee80211_sta *sta;
142
struct carl9170_sta_info *sta_info;
145
sta = __carl9170_get_tx_sta(ar, skb);
149
sta_info = (struct carl9170_sta_info *) sta->drv_priv;
150
if (atomic_dec_return(&sta_info->pending_frames) == 0)
151
ieee80211_sta_block_awake(ar->hw, sta, false);
157
static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
161
queue = skb_get_queue_mapping(skb);
163
spin_lock_bh(&ar->tx_stats_lock);
165
ar->tx_stats[queue].len--;
167
if (!is_mem_full(ar)) {
169
for (i = 0; i < ar->hw->queues; i++) {
170
if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
173
if (ieee80211_queue_stopped(ar->hw, i)) {
176
tmp = jiffies - ar->queue_stop_timeout[i];
177
if (tmp > ar->max_queue_stop_timeout[i])
178
ar->max_queue_stop_timeout[i] = tmp;
181
ieee80211_wake_queue(ar->hw, i);
185
spin_unlock_bh(&ar->tx_stats_lock);
187
if (atomic_dec_and_test(&ar->tx_total_queued))
188
complete(&ar->tx_flush);
191
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
193
struct _carl9170_tx_superframe *super = (void *) skb->data;
197
atomic_inc(&ar->mem_allocs);
199
chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
200
if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
201
atomic_add(chunks, &ar->mem_free_blocks);
205
spin_lock_bh(&ar->mem_lock);
206
cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
207
spin_unlock_bh(&ar->mem_lock);
209
if (unlikely(cookie < 0)) {
210
atomic_add(chunks, &ar->mem_free_blocks);
214
super = (void *) skb->data;
217
* Cookie #0 serves two special purposes:
218
* 1. The firmware might use it generate BlockACK frames
219
* in responds of an incoming BlockAckReqs.
221
* 2. Prevent double-free bugs.
223
super->s.cookie = (u8) cookie + 1;
227
static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
229
struct _carl9170_tx_superframe *super = (void *) skb->data;
232
/* make a local copy of the cookie */
233
cookie = super->s.cookie;
234
/* invalidate cookie */
238
* Do a out-of-bounds check on the cookie:
240
* * cookie "0" is reserved and won't be assigned to any
241
* out-going frame. Internally however, it is used to
242
* mark no longer/un-accounted frames and serves as a
243
* cheap way of preventing frames from being freed
244
* twice by _accident_. NB: There is a tiny race...
246
* * obviously, cookie number is limited by the amount
247
* of available memory blocks, so the number can
248
* never execeed the mem_blocks count.
250
if (unlikely(WARN_ON_ONCE(cookie == 0) ||
251
WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
254
atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
255
&ar->mem_free_blocks);
257
spin_lock_bh(&ar->mem_lock);
258
bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
259
spin_unlock_bh(&ar->mem_lock);
262
/* Called from any context */
263
static void carl9170_tx_release(struct kref *ref)
266
struct carl9170_tx_info *arinfo;
267
struct ieee80211_tx_info *txinfo;
270
arinfo = container_of(ref, struct carl9170_tx_info, ref);
271
txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
273
skb = container_of((void *) txinfo, struct sk_buff, cb);
276
if (WARN_ON_ONCE(!ar))
280
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
282
memset(&txinfo->status.ampdu_ack_len, 0,
283
sizeof(struct ieee80211_tx_info) -
284
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
286
if (atomic_read(&ar->tx_total_queued))
287
ar->tx_schedule = true;
289
if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
290
if (!atomic_read(&ar->tx_ampdu_upload))
291
ar->tx_ampdu_schedule = true;
293
if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
294
struct _carl9170_tx_superframe *super;
296
super = (void *)skb->data;
297
txinfo->status.ampdu_len = super->s.rix;
298
txinfo->status.ampdu_ack_len = super->s.cnt;
299
} else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) &&
300
!(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
302
* drop redundant tx_status reports:
304
* 1. ampdu_ack_len of the final tx_status does
305
* include the feedback of this particular frame.
307
* 2. tx_status_irqsafe only queues up to 128
308
* tx feedback reports and discards the rest.
310
* 3. minstrel_ht is picky, it only accepts
311
* reports of frames with the TX_STATUS_AMPDU flag.
313
* 4. mac80211 is not particularly interested in
314
* feedback either [CTL_REQ_TX_STATUS not set]
317
ieee80211_free_txskb(ar->hw, skb);
321
* Either the frame transmission has failed or
322
* mac80211 requested tx status.
327
skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
328
ieee80211_tx_status_irqsafe(ar->hw, skb);
331
void carl9170_tx_get_skb(struct sk_buff *skb)
333
struct carl9170_tx_info *arinfo = (void *)
334
(IEEE80211_SKB_CB(skb))->rate_driver_data;
335
kref_get(&arinfo->ref);
338
int carl9170_tx_put_skb(struct sk_buff *skb)
340
struct carl9170_tx_info *arinfo = (void *)
341
(IEEE80211_SKB_CB(skb))->rate_driver_data;
343
return kref_put(&arinfo->ref, carl9170_tx_release);
346
/* Caller must hold the tid_info->lock & rcu_read_lock */
347
static void carl9170_tx_shift_bm(struct ar9170 *ar,
348
struct carl9170_sta_tid *tid_info, u16 seq)
352
off = SEQ_DIFF(seq, tid_info->bsn);
354
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
358
* Sanity check. For each MPDU we set the bit in bitmap and
359
* clear it once we received the tx_status.
360
* But if the bit is already cleared then we've been bitten
363
WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
365
off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
366
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
369
if (!bitmap_empty(tid_info->bitmap, off))
370
off = find_first_bit(tid_info->bitmap, off);
372
tid_info->bsn += off;
373
tid_info->bsn &= 0x0fff;
375
bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
376
off, CARL9170_BAW_BITS);
379
static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
380
struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
382
struct _carl9170_tx_superframe *super = (void *) skb->data;
383
struct ieee80211_hdr *hdr = (void *) super->frame_data;
384
struct ieee80211_sta *sta;
385
struct carl9170_sta_info *sta_info;
386
struct carl9170_sta_tid *tid_info;
389
if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
390
txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
391
(!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
395
sta = __carl9170_get_tx_sta(ar, skb);
399
tid = get_tid_h(hdr);
401
sta_info = (void *) sta->drv_priv;
402
tid_info = rcu_dereference(sta_info->agg[tid]);
406
spin_lock_bh(&tid_info->lock);
407
if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
408
carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
410
if (sta_info->stats[tid].clear) {
411
sta_info->stats[tid].clear = false;
412
sta_info->stats[tid].req = false;
413
sta_info->stats[tid].ampdu_len = 0;
414
sta_info->stats[tid].ampdu_ack_len = 0;
417
sta_info->stats[tid].ampdu_len++;
418
if (txinfo->status.rates[0].count == 1)
419
sta_info->stats[tid].ampdu_ack_len++;
421
if (!(txinfo->flags & IEEE80211_TX_STAT_ACK))
422
sta_info->stats[tid].req = true;
424
if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
425
super->s.rix = sta_info->stats[tid].ampdu_len;
426
super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
427
txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
428
if (sta_info->stats[tid].req)
429
txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
431
sta_info->stats[tid].clear = true;
433
spin_unlock_bh(&tid_info->lock);
439
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
442
struct ieee80211_tx_info *txinfo;
444
carl9170_tx_accounting_free(ar, skb);
446
txinfo = IEEE80211_SKB_CB(skb);
449
txinfo->flags |= IEEE80211_TX_STAT_ACK;
451
ar->tx_ack_failures++;
453
if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
454
carl9170_tx_status_process_ampdu(ar, skb, txinfo);
456
carl9170_tx_ps_unblock(ar, skb);
457
carl9170_tx_put_skb(skb);
460
/* This function may be called form any context */
461
void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
463
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
465
atomic_dec(&ar->tx_total_pending);
467
if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
468
atomic_dec(&ar->tx_ampdu_upload);
470
if (carl9170_tx_put_skb(skb))
471
tasklet_hi_schedule(&ar->usb_tasklet);
474
static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
475
struct sk_buff_head *queue)
479
spin_lock_bh(&queue->lock);
480
skb_queue_walk(queue, skb) {
481
struct _carl9170_tx_superframe *txc = (void *) skb->data;
483
if (txc->s.cookie != cookie)
486
__skb_unlink(skb, queue);
487
spin_unlock_bh(&queue->lock);
489
carl9170_release_dev_space(ar, skb);
492
spin_unlock_bh(&queue->lock);
497
static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
498
unsigned int tries, struct ieee80211_tx_info *txinfo)
502
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
503
if (txinfo->status.rates[i].idx < 0)
507
txinfo->status.rates[i].count = tries;
513
for (; i < IEEE80211_TX_MAX_RATES; i++) {
514
txinfo->status.rates[i].idx = -1;
515
txinfo->status.rates[i].count = 0;
519
static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
523
struct ieee80211_tx_info *txinfo;
524
struct carl9170_tx_info *arinfo;
525
bool restart = false;
527
for (i = 0; i < ar->hw->queues; i++) {
528
spin_lock_bh(&ar->tx_status[i].lock);
530
skb = skb_peek(&ar->tx_status[i]);
535
txinfo = IEEE80211_SKB_CB(skb);
536
arinfo = (void *) txinfo->rate_driver_data;
538
if (time_is_before_jiffies(arinfo->timeout +
539
msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
543
spin_unlock_bh(&ar->tx_status[i].lock);
548
* At least one queue has been stuck for long enough.
549
* Give the device a kick and hope it gets back to
552
* possible reasons may include:
553
* - frames got lost/corrupted (bad connection to the device)
554
* - stalled rx processing/usb controller hiccups
555
* - firmware errors/bugs
556
* - every bug you can think of.
557
* - all bugs you can't...
560
carl9170_restart(ar, CARL9170_RR_STUCK_TX);
564
static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
566
struct carl9170_sta_tid *iter;
568
struct ieee80211_tx_info *txinfo;
569
struct carl9170_tx_info *arinfo;
570
struct ieee80211_sta *sta;
573
list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
574
if (iter->state < CARL9170_TID_STATE_IDLE)
577
spin_lock_bh(&iter->lock);
578
skb = skb_peek(&iter->queue);
582
txinfo = IEEE80211_SKB_CB(skb);
583
arinfo = (void *)txinfo->rate_driver_data;
584
if (time_is_after_jiffies(arinfo->timeout +
585
msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
588
sta = __carl9170_get_tx_sta(ar, skb);
592
ieee80211_stop_tx_ba_session(sta, iter->tid);
594
spin_unlock_bh(&iter->lock);
600
void carl9170_tx_janitor(struct work_struct *work)
602
struct ar9170 *ar = container_of(work, struct ar9170,
607
ar->tx_janitor_last_run = jiffies;
609
carl9170_check_queue_stop_timeout(ar);
610
carl9170_tx_ampdu_timeout(ar);
612
if (!atomic_read(&ar->tx_total_queued))
615
ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
616
msecs_to_jiffies(CARL9170_TX_TIMEOUT));
619
static void __carl9170_tx_process_status(struct ar9170 *ar,
620
const uint8_t cookie, const uint8_t info)
623
struct ieee80211_tx_info *txinfo;
624
unsigned int r, t, q;
627
q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
629
skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
632
* We have lost the race to another thread.
638
txinfo = IEEE80211_SKB_CB(skb);
640
if (!(info & CARL9170_TX_STATUS_SUCCESS))
643
r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
644
t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
646
carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
647
carl9170_tx_status(ar, skb, success);
650
void carl9170_tx_process_status(struct ar9170 *ar,
651
const struct carl9170_rsp *cmd)
655
for (i = 0; i < cmd->hdr.ext; i++) {
656
if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
657
print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
658
(void *) cmd, cmd->hdr.len + 4);
662
__carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
663
cmd->_tx_status[i].info);
667
static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
668
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
669
unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
671
struct ieee80211_rate *rate = NULL;
679
if (txrate->flags & IEEE80211_TX_RC_MCS) {
680
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
681
/* +1 dBm for HT40 */
684
if (info->band == IEEE80211_BAND_2GHZ)
685
txpower = ar->power_2G_ht40;
687
txpower = ar->power_5G_ht40;
689
if (info->band == IEEE80211_BAND_2GHZ)
690
txpower = ar->power_2G_ht20;
692
txpower = ar->power_5G_ht20;
695
*phyrate = txrate->idx;
696
*tpc += txpower[idx & 7];
698
if (info->band == IEEE80211_BAND_2GHZ) {
700
txpower = ar->power_2G_cck;
702
txpower = ar->power_2G_ofdm;
704
txpower = ar->power_5G_leg;
708
rate = &__carl9170_ratetable[idx];
709
*tpc += txpower[(rate->hw_value & 0x30) >> 4];
710
*phyrate = rate->hw_value & 0xf;
713
if (ar->eeprom.tx_mask == 1) {
714
*chains = AR9170_TX_PHY_TXCHAIN_1;
716
if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
717
rate && rate->bitrate >= 360)
718
*chains = AR9170_TX_PHY_TXCHAIN_1;
720
*chains = AR9170_TX_PHY_TXCHAIN_2;
724
static __le32 carl9170_tx_physet(struct ar9170 *ar,
725
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
727
unsigned int power = 0, chains = 0, phyrate = 0;
730
tmp = cpu_to_le32(0);
732
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
733
tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
735
/* this works because 40 MHz is 2 and dup is 3 */
736
if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
737
tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
740
if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
741
tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
743
if (txrate->flags & IEEE80211_TX_RC_MCS) {
744
SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
746
/* heavy clip control */
747
tmp |= cpu_to_le32((txrate->idx & 0x7) <<
748
AR9170_TX_PHY_TX_HEAVY_CLIP_S);
750
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
753
* green field preamble does not work.
755
* if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
756
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
759
if (info->band == IEEE80211_BAND_2GHZ) {
760
if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
761
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
763
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
765
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
769
* short preamble seems to be broken too.
771
* if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
772
* tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
775
carl9170_tx_rate_tpc_chains(ar, info, txrate,
776
&phyrate, &power, &chains);
778
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
779
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
780
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
784
static bool carl9170_tx_rts_check(struct ar9170 *ar,
785
struct ieee80211_tx_rate *rate,
786
bool ampdu, bool multi)
788
switch (ar->erp_mode) {
789
case CARL9170_ERP_AUTO:
793
case CARL9170_ERP_MAC80211:
794
if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
797
case CARL9170_ERP_RTS:
808
static bool carl9170_tx_cts_check(struct ar9170 *ar,
809
struct ieee80211_tx_rate *rate)
811
switch (ar->erp_mode) {
812
case CARL9170_ERP_AUTO:
813
case CARL9170_ERP_MAC80211:
814
if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
817
case CARL9170_ERP_CTS:
827
static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
829
struct ieee80211_hdr *hdr;
830
struct _carl9170_tx_superframe *txc;
831
struct carl9170_vif_info *cvif;
832
struct ieee80211_tx_info *info;
833
struct ieee80211_tx_rate *txrate;
834
struct ieee80211_sta *sta;
835
struct carl9170_tx_info *arinfo;
836
unsigned int hw_queue;
842
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
843
BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
844
CARL9170_TX_SUPERDESC_LEN);
846
BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
847
AR9170_TX_HWDESC_LEN);
849
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
851
BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
852
((CARL9170_TX_SUPER_MISC_VIF_ID >>
853
CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
855
hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
857
hdr = (void *)skb->data;
858
info = IEEE80211_SKB_CB(skb);
862
* Note: If the frame was sent through a monitor interface,
863
* the ieee80211_vif pointer can be NULL.
865
if (likely(info->control.vif))
866
cvif = (void *) info->control.vif->drv_priv;
870
sta = info->control.sta;
872
txc = (void *)skb_push(skb, sizeof(*txc));
873
memset(txc, 0, sizeof(*txc));
875
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
878
SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
880
if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
881
txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
883
if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
884
txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
886
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
887
txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
889
mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
890
AR9170_TX_MAC_BACKOFF);
891
mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
894
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
895
if (unlikely(no_ack))
896
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
898
if (info->control.hw_key) {
899
len += info->control.hw_key->icv_len;
901
switch (info->control.hw_key->cipher) {
902
case WLAN_CIPHER_SUITE_WEP40:
903
case WLAN_CIPHER_SUITE_WEP104:
904
case WLAN_CIPHER_SUITE_TKIP:
905
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
907
case WLAN_CIPHER_SUITE_CCMP:
908
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
916
ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
918
unsigned int density, factor;
920
if (unlikely(!sta || !cvif))
923
factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
924
density = sta->ht_cap.ampdu_density;
930
* Otus uses slightly different density values than
931
* those from the 802.11n spec.
934
density = max_t(unsigned int, density + 1, 7u);
937
SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
938
txc->s.ampdu_settings, density);
940
SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
941
txc->s.ampdu_settings, factor);
943
for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
944
txrate = &info->control.rates[i];
945
if (txrate->idx >= 0) {
947
CARL9170_TX_SUPER_RI_AMPDU;
949
if (WARN_ON(!(txrate->flags &
950
IEEE80211_TX_RC_MCS))) {
952
* Not sure if it's even possible
953
* to aggregate non-ht rates with
962
txrate->count = ar->hw->max_rate_tries;
965
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
969
* NOTE: For the first rate, the ERP & AMPDU flags are directly
970
* taken from mac_control. For all fallback rate, the firmware
971
* updates the mac_control flags from the rate info field.
973
for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
974
txrate = &info->control.rates[i];
978
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
981
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
982
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
983
CARL9170_TX_SUPER_RI_ERP_PROT_S);
984
else if (carl9170_tx_cts_check(ar, txrate))
985
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
986
CARL9170_TX_SUPER_RI_ERP_PROT_S);
988
txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
991
txrate = &info->control.rates[0];
992
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
994
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
995
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
996
else if (carl9170_tx_cts_check(ar, txrate))
997
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
999
txc->s.len = cpu_to_le16(skb->len);
1000
txc->f.length = cpu_to_le16(len + FCS_LEN);
1001
txc->f.mac_control = mac_tmp;
1002
txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
1004
arinfo = (void *)info->rate_driver_data;
1005
arinfo->timeout = jiffies;
1007
kref_init(&arinfo->ref);
1011
skb_pull(skb, sizeof(*txc));
1015
static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
1017
struct _carl9170_tx_superframe *super;
1019
super = (void *) skb->data;
1020
super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
1023
static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
1025
struct _carl9170_tx_superframe *super;
1028
super = (void *) skb->data;
1030
tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
1031
CARL9170_TX_SUPER_AMPDU_DENSITY_S;
1034
* If you haven't noticed carl9170_tx_prepare has already filled
1035
* in all ampdu spacing & factor parameters.
1036
* Now it's the time to check whenever the settings have to be
1037
* updated by the firmware, or if everything is still the same.
1039
* There's no sane way to handle different density values with
1040
* this hardware, so we may as well just do the compare in the
1044
if (tmp != ar->current_density) {
1045
ar->current_density = tmp;
1046
super->s.ampdu_settings |=
1047
CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
1050
tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
1051
CARL9170_TX_SUPER_AMPDU_FACTOR_S;
1053
if (tmp != ar->current_factor) {
1054
ar->current_factor = tmp;
1055
super->s.ampdu_settings |=
1056
CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
1060
static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
1061
struct sk_buff *_src)
1063
struct _carl9170_tx_superframe *dest, *src;
1065
dest = (void *) _dest->data;
1066
src = (void *) _src->data;
1069
* The mac80211 rate control algorithm expects that all MPDUs in
1070
* an AMPDU share the same tx vectors.
1071
* This is not really obvious right now, because the hardware
1072
* does the AMPDU setup according to its own rulebook.
1073
* Our nicely assembled, strictly monotonic increasing mpdu
1074
* chains will be broken up, mashed back together...
1077
return (dest->f.phy_control == src->f.phy_control);
1080
static void carl9170_tx_ampdu(struct ar9170 *ar)
1082
struct sk_buff_head agg;
1083
struct carl9170_sta_tid *tid_info;
1084
struct sk_buff *skb, *first;
1085
unsigned int i = 0, done_ampdus = 0;
1086
u16 seq, queue, tmpssn;
1088
atomic_inc(&ar->tx_ampdu_scheduler);
1089
ar->tx_ampdu_schedule = false;
1091
if (atomic_read(&ar->tx_ampdu_upload))
1094
if (!ar->tx_ampdu_list_len)
1097
__skb_queue_head_init(&agg);
1100
tid_info = rcu_dereference(ar->tx_ampdu_iter);
1101
if (WARN_ON_ONCE(!tid_info)) {
1107
list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1110
if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1113
queue = TID_TO_WME_AC(tid_info->tid);
1115
spin_lock_bh(&tid_info->lock);
1116
if (tid_info->state != CARL9170_TID_STATE_XMIT)
1119
tid_info->counter++;
1120
first = skb_peek(&tid_info->queue);
1121
tmpssn = carl9170_get_seq(first);
1122
seq = tid_info->snx;
1124
if (unlikely(tmpssn != seq)) {
1125
tid_info->state = CARL9170_TID_STATE_IDLE;
1130
while ((skb = skb_peek(&tid_info->queue))) {
1131
/* strict 0, 1, ..., n - 1, n frame sequence order */
1132
if (unlikely(carl9170_get_seq(skb) != seq))
1135
/* don't upload more than AMPDU FACTOR allows. */
1136
if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1137
(tid_info->max - 1)))
1140
if (!carl9170_tx_rate_check(ar, skb, first))
1143
atomic_inc(&ar->tx_ampdu_upload);
1144
tid_info->snx = seq = SEQ_NEXT(seq);
1145
__skb_unlink(skb, &tid_info->queue);
1147
__skb_queue_tail(&agg, skb);
1149
if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1153
if (skb_queue_empty(&tid_info->queue) ||
1154
carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1157
* stop TID, if A-MPDU frames are still missing,
1158
* or whenever the queue is empty.
1161
tid_info->state = CARL9170_TID_STATE_IDLE;
1166
spin_unlock_bh(&tid_info->lock);
1168
if (skb_queue_empty(&agg))
1171
/* apply ampdu spacing & factor settings */
1172
carl9170_set_ampdu_params(ar, skb_peek(&agg));
1174
/* set aggregation push bit */
1175
carl9170_set_immba(ar, skb_peek_tail(&agg));
1177
spin_lock_bh(&ar->tx_pending[queue].lock);
1178
skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1179
spin_unlock_bh(&ar->tx_pending[queue].lock);
1180
ar->tx_schedule = true;
1182
if ((done_ampdus++ == 0) && (i++ == 0))
1185
rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1189
static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1190
struct sk_buff_head *queue)
1192
struct sk_buff *skb;
1193
struct ieee80211_tx_info *info;
1194
struct carl9170_tx_info *arinfo;
1196
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1198
spin_lock_bh(&queue->lock);
1199
skb = skb_peek(queue);
1203
if (carl9170_alloc_dev_space(ar, skb))
1206
__skb_unlink(skb, queue);
1207
spin_unlock_bh(&queue->lock);
1209
info = IEEE80211_SKB_CB(skb);
1210
arinfo = (void *) info->rate_driver_data;
1212
arinfo->timeout = jiffies;
1216
spin_unlock_bh(&queue->lock);
1220
void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1222
struct _carl9170_tx_superframe *super;
1227
super = (void *)skb->data;
1228
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1229
ar9170_qmap[carl9170_get_queue(ar, skb)]);
1230
__carl9170_tx_process_status(ar, super->s.cookie, q);
1233
static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
1235
struct ieee80211_sta *sta;
1236
struct carl9170_sta_info *sta_info;
1239
sta = __carl9170_get_tx_sta(ar, skb);
1243
sta_info = (void *) sta->drv_priv;
1244
if (unlikely(sta_info->sleeping)) {
1245
struct ieee80211_tx_info *tx_info;
1249
tx_info = IEEE80211_SKB_CB(skb);
1250
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1251
atomic_dec(&ar->tx_ampdu_upload);
1253
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1254
carl9170_tx_status(ar, skb, false);
1263
static void carl9170_tx(struct ar9170 *ar)
1265
struct sk_buff *skb;
1267
bool schedule_garbagecollector = false;
1269
ar->tx_schedule = false;
1271
if (unlikely(!IS_STARTED(ar)))
1274
carl9170_usb_handle_tx_err(ar);
1276
for (i = 0; i < ar->hw->queues; i++) {
1277
while (!skb_queue_empty(&ar->tx_pending[i])) {
1278
skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1282
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
1285
atomic_inc(&ar->tx_total_pending);
1287
q = __carl9170_get_queue(ar, i);
1289
* NB: tx_status[i] vs. tx_status[q],
1290
* TODO: Move into pick_skb or alloc_dev_space.
1292
skb_queue_tail(&ar->tx_status[q], skb);
1295
* increase ref count to "2".
1296
* Ref counting is the easiest way to solve the
1297
* race between the urb's completion routine:
1298
* carl9170_tx_callback
1299
* and wlan tx status functions:
1300
* carl9170_tx_status/janitor.
1302
carl9170_tx_get_skb(skb);
1304
carl9170_usb_tx(ar, skb);
1305
schedule_garbagecollector = true;
1309
if (!schedule_garbagecollector)
1312
ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1313
msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1316
static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1317
struct ieee80211_sta *sta, struct sk_buff *skb)
1319
struct _carl9170_tx_superframe *super = (void *) skb->data;
1320
struct carl9170_sta_info *sta_info;
1321
struct carl9170_sta_tid *agg;
1322
struct sk_buff *iter;
1323
u16 tid, seq, qseq, off;
1326
tid = carl9170_get_tid(skb);
1327
seq = carl9170_get_seq(skb);
1328
sta_info = (void *) sta->drv_priv;
1331
agg = rcu_dereference(sta_info->agg[tid]);
1334
goto err_unlock_rcu;
1336
spin_lock_bh(&agg->lock);
1337
if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1340
/* check if sequence is within the BA window */
1341
if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1344
if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1347
off = SEQ_DIFF(seq, agg->bsn);
1348
if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1351
if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1352
__skb_queue_tail(&agg->queue, skb);
1357
skb_queue_reverse_walk(&agg->queue, iter) {
1358
qseq = carl9170_get_seq(iter);
1360
if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1361
__skb_queue_after(&agg->queue, iter, skb);
1366
__skb_queue_head(&agg->queue, skb);
1369
if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1370
if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1371
agg->state = CARL9170_TID_STATE_XMIT;
1376
spin_unlock_bh(&agg->lock);
1382
spin_unlock_bh(&agg->lock);
1386
super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
1387
carl9170_tx_status(ar, skb, false);
1392
void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1394
struct ar9170 *ar = hw->priv;
1395
struct ieee80211_tx_info *info;
1396
struct ieee80211_sta *sta;
1399
if (unlikely(!IS_STARTED(ar)))
1402
info = IEEE80211_SKB_CB(skb);
1403
sta = info->control.sta;
1405
if (unlikely(carl9170_tx_prepare(ar, skb)))
1408
carl9170_tx_accounting(ar, skb);
1410
* from now on, one has to use carl9170_tx_status to free
1411
* all ressouces which are associated with the frame.
1415
struct carl9170_sta_info *stai = (void *) sta->drv_priv;
1416
atomic_inc(&stai->pending_frames);
1419
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1420
run = carl9170_tx_ampdu_queue(ar, sta, skb);
1422
carl9170_tx_ampdu(ar);
1425
unsigned int queue = skb_get_queue_mapping(skb);
1427
skb_queue_tail(&ar->tx_pending[queue], skb);
1435
ieee80211_free_txskb(ar->hw, skb);
1438
void carl9170_tx_scheduler(struct ar9170 *ar)
1441
if (ar->tx_ampdu_schedule)
1442
carl9170_tx_ampdu(ar);
1444
if (ar->tx_schedule)
1448
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
1450
struct sk_buff *skb = NULL;
1451
struct carl9170_vif_info *cvif;
1452
struct ieee80211_tx_info *txinfo;
1453
struct ieee80211_tx_rate *rate;
1454
__le32 *data, *old = NULL;
1455
unsigned int plcp, power, chains;
1456
u32 word, ht1, off, addr, len;
1460
cvif = rcu_dereference(ar->beacon_iter);
1462
if (ar->vifs == 0 || !cvif)
1465
list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
1466
if (cvif->active && cvif->enable_beacon)
1470
if (!ar->beacon_enabled || i++)
1476
rcu_assign_pointer(ar->beacon_iter, cvif);
1478
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
1486
txinfo = IEEE80211_SKB_CB(skb);
1487
spin_lock_bh(&ar->beacon_lock);
1488
data = (__le32 *)skb->data;
1490
old = (__le32 *)cvif->beacon->data;
1492
off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
1493
addr = ar->fw.beacon_addr + off;
1494
len = roundup(skb->len + FCS_LEN, 4);
1496
if ((off + len) > ar->fw.beacon_max_len) {
1497
if (net_ratelimit()) {
1498
wiphy_err(ar->hw->wiphy, "beacon does not "
1499
"fit into device memory!\n");
1505
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
1506
if (net_ratelimit()) {
1507
wiphy_err(ar->hw->wiphy, "no support for beacons "
1508
"bigger than %d (yours:%d).\n",
1509
AR9170_MAC_BCN_LENGTH_MAX, len);
1516
ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
1517
rate = &txinfo->control.rates[0];
1518
carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
1519
if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
1520
if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
1521
plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
1523
plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
1525
ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
1526
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
1527
plcp |= AR9170_MAC_BCN_HT2_SGI;
1529
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1530
ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
1531
plcp |= AR9170_MAC_BCN_HT2_BW40;
1533
if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
1534
ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
1535
plcp |= AR9170_MAC_BCN_HT2_BW40;
1538
SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
1541
SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
1542
SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
1543
SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
1544
if (chains == AR9170_TX_PHY_TXCHAIN_2)
1545
ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
1547
carl9170_async_regwrite_begin(ar);
1548
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
1549
if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
1550
carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
1552
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
1554
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
1556
* XXX: This accesses beyond skb data for up
1557
* to the last 3 bytes!!
1560
if (old && (data[i] == old[i]))
1563
word = le32_to_cpu(data[i]);
1564
carl9170_async_regwrite(addr + 4 * i, word);
1566
carl9170_async_regwrite_finish();
1568
dev_kfree_skb_any(cvif->beacon);
1569
cvif->beacon = NULL;
1571
err = carl9170_async_regwrite_result();
1574
spin_unlock_bh(&ar->beacon_lock);
1579
err = carl9170_bcn_ctrl(ar, cvif->id,
1580
CARL9170_BCN_CTRL_CAB_TRIGGER,
1581
addr, skb->len + FCS_LEN);
1591
spin_unlock_bh(&ar->beacon_lock);
1595
dev_kfree_skb_any(skb);