2
Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3
Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4
Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5
<http://rt2x00.serialmonkey.com>
7
This program is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License as published by
9
the Free Software Foundation; either version 2 of the License, or
10
(at your option) any later version.
12
This program is distributed in the hope that it will be useful,
13
but WITHOUT ANY WARRANTY; without even the implied warranty of
14
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
GNU General Public License for more details.
17
You should have received a copy of the GNU General Public License
18
along with this program; if not, write to the
19
Free Software Foundation, Inc.,
20
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25
Abstract: rt2x00 queue specific routines.
28
#include <linux/slab.h>
29
#include <linux/kernel.h>
30
#include <linux/module.h>
31
#include <linux/dma-mapping.h>
34
#include "rt2x00lib.h"
36
struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
38
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
40
struct skb_frame_desc *skbdesc;
41
unsigned int frame_size;
42
unsigned int head_size = 0;
43
unsigned int tail_size = 0;
46
* The frame size includes descriptor size, because the
47
* hardware directly receive the frame into the skbuffer.
49
frame_size = entry->queue->data_size + entry->queue->desc_size;
52
* The payload should be aligned to a 4-byte boundary,
53
* this means we need at least 3 bytes for moving the frame
54
* into the correct offset.
59
* For IV/EIV/ICV assembly we must make sure there is
60
* at least 8 bytes bytes available in headroom for IV/EIV
61
* and 8 bytes for ICV data as tailroon.
63
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
71
skb = dev_alloc_skb(frame_size + head_size + tail_size);
76
* Make sure we not have a frame with the requested bytes
77
* available in the head and tail.
79
skb_reserve(skb, head_size);
80
skb_put(skb, frame_size);
85
skbdesc = get_skb_frame_desc(skb);
86
memset(skbdesc, 0, sizeof(*skbdesc));
87
skbdesc->entry = entry;
89
if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
90
skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
94
skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
100
void rt2x00queue_map_txskb(struct queue_entry *entry)
102
struct device *dev = entry->queue->rt2x00dev->dev;
103
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
106
dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
107
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
109
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
111
void rt2x00queue_unmap_skb(struct queue_entry *entry)
113
struct device *dev = entry->queue->rt2x00dev->dev;
114
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
116
if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
117
dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
119
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120
} else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121
dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
123
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
126
EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
128
void rt2x00queue_free_skb(struct queue_entry *entry)
133
rt2x00queue_unmap_skb(entry);
134
dev_kfree_skb_any(entry->skb);
138
void rt2x00queue_align_frame(struct sk_buff *skb)
140
unsigned int frame_length = skb->len;
141
unsigned int align = ALIGN_SIZE(skb, 0);
146
skb_push(skb, align);
147
memmove(skb->data, skb->data + align, frame_length);
148
skb_trim(skb, frame_length);
151
void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
153
unsigned int frame_length = skb->len;
154
unsigned int align = ALIGN_SIZE(skb, header_length);
159
skb_push(skb, align);
160
memmove(skb->data, skb->data + align, frame_length);
161
skb_trim(skb, frame_length);
164
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
166
unsigned int payload_length = skb->len - header_length;
167
unsigned int header_align = ALIGN_SIZE(skb, 0);
168
unsigned int payload_align = ALIGN_SIZE(skb, header_length);
169
unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
172
* Adjust the header alignment if the payload needs to be moved more
175
if (payload_align > header_align)
178
/* There is nothing to do if no alignment is needed */
182
/* Reserve the amount of space needed in front of the frame */
183
skb_push(skb, header_align);
188
memmove(skb->data, skb->data + header_align, header_length);
190
/* Move the payload, if present and if required */
191
if (payload_length && payload_align)
192
memmove(skb->data + header_length + l2pad,
193
skb->data + header_length + l2pad + payload_align,
196
/* Trim the skb to the correct size */
197
skb_trim(skb, header_length + l2pad + payload_length);
200
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
203
* L2 padding is only present if the skb contains more than just the
204
* IEEE 802.11 header.
206
unsigned int l2pad = (skb->len > header_length) ?
207
L2PAD_SIZE(header_length) : 0;
212
memmove(skb->data + l2pad, skb->data, header_length);
213
skb_pull(skb, l2pad);
216
static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
217
struct txentry_desc *txdesc)
219
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
220
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
221
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
222
unsigned long irqflags;
224
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
227
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
229
if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
233
* The hardware is not able to insert a sequence number. Assign a
234
* software generated one here.
236
* This is wrong because beacons are not getting sequence
237
* numbers assigned properly.
239
* A secondary problem exists for drivers that cannot toggle
240
* sequence counting per-frame, since those will override the
241
* sequence counter given by mac80211.
243
spin_lock_irqsave(&intf->seqlock, irqflags);
245
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
247
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
248
hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
250
spin_unlock_irqrestore(&intf->seqlock, irqflags);
254
static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
255
struct txentry_desc *txdesc,
256
const struct rt2x00_rate *hwrate)
258
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
259
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
260
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
261
unsigned int data_length;
262
unsigned int duration;
263
unsigned int residual;
266
* Determine with what IFS priority this frame should be send.
267
* Set ifs to IFS_SIFS when the this is not the first fragment,
268
* or this fragment came after RTS/CTS.
270
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
271
txdesc->u.plcp.ifs = IFS_BACKOFF;
273
txdesc->u.plcp.ifs = IFS_SIFS;
275
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
276
data_length = entry->skb->len + 4;
277
data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
281
* Length calculation depends on OFDM/CCK rate.
283
txdesc->u.plcp.signal = hwrate->plcp;
284
txdesc->u.plcp.service = 0x04;
286
if (hwrate->flags & DEV_RATE_OFDM) {
287
txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
288
txdesc->u.plcp.length_low = data_length & 0x3f;
291
* Convert length to microseconds.
293
residual = GET_DURATION_RES(data_length, hwrate->bitrate);
294
duration = GET_DURATION(data_length, hwrate->bitrate);
300
* Check if we need to set the Length Extension
302
if (hwrate->bitrate == 110 && residual <= 30)
303
txdesc->u.plcp.service |= 0x80;
306
txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
307
txdesc->u.plcp.length_low = duration & 0xff;
310
* When preamble is enabled we should set the
311
* preamble bit for the signal.
313
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
314
txdesc->u.plcp.signal |= 0x08;
318
static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
319
struct txentry_desc *txdesc)
321
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
322
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
323
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
324
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
325
struct ieee80211_rate *rate;
326
const struct rt2x00_rate *hwrate = NULL;
328
memset(txdesc, 0, sizeof(*txdesc));
331
* Header and frame information.
333
txdesc->length = entry->skb->len;
334
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
337
* Check whether this frame is to be acked.
339
if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
340
__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
343
* Check if this is a RTS/CTS frame
345
if (ieee80211_is_rts(hdr->frame_control) ||
346
ieee80211_is_cts(hdr->frame_control)) {
347
__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
348
if (ieee80211_is_rts(hdr->frame_control))
349
__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
351
__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
352
if (tx_info->control.rts_cts_rate_idx >= 0)
354
ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
358
* Determine retry information.
360
txdesc->retry_limit = tx_info->control.rates[0].count - 1;
361
if (txdesc->retry_limit >= rt2x00dev->long_retry)
362
__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
365
* Check if more fragments are pending
367
if (ieee80211_has_morefrags(hdr->frame_control)) {
368
__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
369
__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
373
* Check if more frames (!= fragments) are pending
375
if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
376
__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
379
* Beacons and probe responses require the tsf timestamp
380
* to be inserted into the frame.
382
if (ieee80211_is_beacon(hdr->frame_control) ||
383
ieee80211_is_probe_resp(hdr->frame_control))
384
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
386
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
387
!test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
388
__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
391
* Determine rate modulation.
393
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
394
txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
395
else if (txrate->flags & IEEE80211_TX_RC_MCS)
396
txdesc->rate_mode = RATE_MODE_HT_MIX;
398
rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
399
hwrate = rt2x00_get_rate(rate->hw_value);
400
if (hwrate->flags & DEV_RATE_OFDM)
401
txdesc->rate_mode = RATE_MODE_OFDM;
403
txdesc->rate_mode = RATE_MODE_CCK;
407
* Apply TX descriptor handling by components
409
rt2x00crypto_create_tx_descriptor(entry, txdesc);
410
rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
412
if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
413
rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
415
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
418
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
419
struct txentry_desc *txdesc)
421
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
424
* This should not happen, we already checked the entry
425
* was ours. When the hardware disagrees there has been
426
* a queue corruption!
428
if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
429
rt2x00dev->ops->lib->get_entry_state(entry))) {
431
"Corrupt queue %d, accessing entry which is not ours.\n"
432
"Please file bug report to %s.\n",
433
entry->queue->qid, DRV_PROJECT);
438
* Add the requested extra tx headroom in front of the skb.
440
skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
441
memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
444
* Call the driver's write_tx_data function, if it exists.
446
if (rt2x00dev->ops->lib->write_tx_data)
447
rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
450
* Map the skb to DMA.
452
if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
453
rt2x00queue_map_txskb(entry);
458
static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
459
struct txentry_desc *txdesc)
461
struct data_queue *queue = entry->queue;
463
queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
466
* All processing on the frame has been completed, this means
467
* it is now ready to be dumped to userspace through debugfs.
469
rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
472
static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
473
struct txentry_desc *txdesc)
476
* Check if we need to kick the queue, there are however a few rules
477
* 1) Don't kick unless this is the last in frame in a burst.
478
* When the burst flag is set, this frame is always followed
479
* by another frame which in some way are related to eachother.
480
* This is true for fragments, RTS or CTS-to-self frames.
481
* 2) Rule 1 can be broken when the available entries
482
* in the queue are less then a certain threshold.
484
if (rt2x00queue_threshold(queue) ||
485
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
486
queue->rt2x00dev->ops->lib->kick_queue(queue);
489
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
492
struct ieee80211_tx_info *tx_info;
493
struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
494
struct txentry_desc txdesc;
495
struct skb_frame_desc *skbdesc;
496
u8 rate_idx, rate_flags;
498
if (unlikely(rt2x00queue_full(queue)))
501
if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
503
ERROR(queue->rt2x00dev,
504
"Arrived at non-free entry in the non-full queue %d.\n"
505
"Please file bug report to %s.\n",
506
queue->qid, DRV_PROJECT);
511
* Copy all TX descriptor information into txdesc,
512
* after that we are free to use the skb->cb array
513
* for our information.
516
rt2x00queue_create_tx_descriptor(entry, &txdesc);
519
* All information is retrieved from the skb->cb array,
520
* now we should claim ownership of the driver part of that
521
* array, preserving the bitrate index and flags.
523
tx_info = IEEE80211_SKB_CB(skb);
524
rate_idx = tx_info->control.rates[0].idx;
525
rate_flags = tx_info->control.rates[0].flags;
526
skbdesc = get_skb_frame_desc(skb);
527
memset(skbdesc, 0, sizeof(*skbdesc));
528
skbdesc->entry = entry;
529
skbdesc->tx_rate_idx = rate_idx;
530
skbdesc->tx_rate_flags = rate_flags;
533
skbdesc->flags |= SKBDESC_NOT_MAC80211;
536
* When hardware encryption is supported, and this frame
537
* is to be encrypted, we should strip the IV/EIV data from
538
* the frame so we can provide it to the driver separately.
540
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
541
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
542
if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
543
rt2x00crypto_tx_copy_iv(skb, &txdesc);
545
rt2x00crypto_tx_remove_iv(skb, &txdesc);
549
* When DMA allocation is required we should guarantee to the
550
* driver that the DMA is aligned to a 4-byte boundary.
551
* However some drivers require L2 padding to pad the payload
552
* rather then the header. This could be a requirement for
553
* PCI and USB devices, while header alignment only is valid
556
if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
557
rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
558
else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
559
rt2x00queue_align_frame(entry->skb);
562
* It could be possible that the queue was corrupted and this
563
* call failed. Since we always return NETDEV_TX_OK to mac80211,
564
* this frame will simply be dropped.
566
if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
567
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
572
set_bit(ENTRY_DATA_PENDING, &entry->flags);
574
rt2x00queue_index_inc(queue, Q_INDEX);
575
rt2x00queue_write_tx_descriptor(entry, &txdesc);
576
rt2x00queue_kick_tx_queue(queue, &txdesc);
581
int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
582
struct ieee80211_vif *vif)
584
struct rt2x00_intf *intf = vif_to_intf(vif);
586
if (unlikely(!intf->beacon))
589
mutex_lock(&intf->beacon_skb_mutex);
592
* Clean up the beacon skb.
594
rt2x00queue_free_skb(intf->beacon);
597
* Clear beacon (single bssid devices don't need to clear the beacon
598
* since the beacon queue will get stopped anyway).
600
if (rt2x00dev->ops->lib->clear_beacon)
601
rt2x00dev->ops->lib->clear_beacon(intf->beacon);
603
mutex_unlock(&intf->beacon_skb_mutex);
608
int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
609
struct ieee80211_vif *vif)
611
struct rt2x00_intf *intf = vif_to_intf(vif);
612
struct skb_frame_desc *skbdesc;
613
struct txentry_desc txdesc;
615
if (unlikely(!intf->beacon))
619
* Clean up the beacon skb.
621
rt2x00queue_free_skb(intf->beacon);
623
intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
624
if (!intf->beacon->skb)
628
* Copy all TX descriptor information into txdesc,
629
* after that we are free to use the skb->cb array
630
* for our information.
632
rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
635
* Fill in skb descriptor
637
skbdesc = get_skb_frame_desc(intf->beacon->skb);
638
memset(skbdesc, 0, sizeof(*skbdesc));
639
skbdesc->entry = intf->beacon;
642
* Send beacon to hardware.
644
rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
650
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
651
struct ieee80211_vif *vif)
653
struct rt2x00_intf *intf = vif_to_intf(vif);
656
mutex_lock(&intf->beacon_skb_mutex);
657
ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
658
mutex_unlock(&intf->beacon_skb_mutex);
663
void rt2x00queue_for_each_entry(struct data_queue *queue,
664
enum queue_index start,
665
enum queue_index end,
666
void (*fn)(struct queue_entry *entry))
668
unsigned long irqflags;
669
unsigned int index_start;
670
unsigned int index_end;
673
if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
674
ERROR(queue->rt2x00dev,
675
"Entry requested from invalid index range (%d - %d)\n",
681
* Only protect the range we are going to loop over,
682
* if during our loop a extra entry is set to pending
683
* it should not be kicked during this run, since it
684
* is part of another TX operation.
686
spin_lock_irqsave(&queue->index_lock, irqflags);
687
index_start = queue->index[start];
688
index_end = queue->index[end];
689
spin_unlock_irqrestore(&queue->index_lock, irqflags);
692
* Start from the TX done pointer, this guarantees that we will
693
* send out all frames in the correct order.
695
if (index_start < index_end) {
696
for (i = index_start; i < index_end; i++)
697
fn(&queue->entries[i]);
699
for (i = index_start; i < queue->limit; i++)
700
fn(&queue->entries[i]);
702
for (i = 0; i < index_end; i++)
703
fn(&queue->entries[i]);
706
EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
708
struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
709
enum queue_index index)
711
struct queue_entry *entry;
712
unsigned long irqflags;
714
if (unlikely(index >= Q_INDEX_MAX)) {
715
ERROR(queue->rt2x00dev,
716
"Entry requested from invalid index type (%d)\n", index);
720
spin_lock_irqsave(&queue->index_lock, irqflags);
722
entry = &queue->entries[queue->index[index]];
724
spin_unlock_irqrestore(&queue->index_lock, irqflags);
728
EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
730
void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
732
unsigned long irqflags;
734
if (unlikely(index >= Q_INDEX_MAX)) {
735
ERROR(queue->rt2x00dev,
736
"Index change on invalid index type (%d)\n", index);
740
spin_lock_irqsave(&queue->index_lock, irqflags);
742
queue->index[index]++;
743
if (queue->index[index] >= queue->limit)
744
queue->index[index] = 0;
746
queue->last_action[index] = jiffies;
748
if (index == Q_INDEX) {
750
} else if (index == Q_INDEX_DONE) {
755
spin_unlock_irqrestore(&queue->index_lock, irqflags);
758
void rt2x00queue_pause_queue(struct data_queue *queue)
760
if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
761
!test_bit(QUEUE_STARTED, &queue->flags) ||
762
test_and_set_bit(QUEUE_PAUSED, &queue->flags))
765
switch (queue->qid) {
771
* For TX queues, we have to disable the queue
774
ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
780
EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
782
void rt2x00queue_unpause_queue(struct data_queue *queue)
784
if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
785
!test_bit(QUEUE_STARTED, &queue->flags) ||
786
!test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
789
switch (queue->qid) {
795
* For TX queues, we have to enable the queue
798
ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
802
* For RX we need to kick the queue now in order to
805
queue->rt2x00dev->ops->lib->kick_queue(queue);
810
EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
812
void rt2x00queue_start_queue(struct data_queue *queue)
814
mutex_lock(&queue->status_lock);
816
if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
817
test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
818
mutex_unlock(&queue->status_lock);
822
set_bit(QUEUE_PAUSED, &queue->flags);
824
queue->rt2x00dev->ops->lib->start_queue(queue);
826
rt2x00queue_unpause_queue(queue);
828
mutex_unlock(&queue->status_lock);
830
EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
832
void rt2x00queue_stop_queue(struct data_queue *queue)
834
mutex_lock(&queue->status_lock);
836
if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
837
mutex_unlock(&queue->status_lock);
841
rt2x00queue_pause_queue(queue);
843
queue->rt2x00dev->ops->lib->stop_queue(queue);
845
mutex_unlock(&queue->status_lock);
847
EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
849
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
854
(queue->qid == QID_AC_VO) ||
855
(queue->qid == QID_AC_VI) ||
856
(queue->qid == QID_AC_BE) ||
857
(queue->qid == QID_AC_BK);
859
mutex_lock(&queue->status_lock);
862
* If the queue has been started, we must stop it temporarily
863
* to prevent any new frames to be queued on the device. If
864
* we are not dropping the pending frames, the queue must
865
* only be stopped in the software and not the hardware,
866
* otherwise the queue will never become empty on its own.
868
started = test_bit(QUEUE_STARTED, &queue->flags);
873
rt2x00queue_pause_queue(queue);
876
* If we are not supposed to drop any pending
877
* frames, this means we must force a start (=kick)
878
* to the queue to make sure the hardware will
879
* start transmitting.
881
if (!drop && tx_queue)
882
queue->rt2x00dev->ops->lib->kick_queue(queue);
886
* Check if driver supports flushing, we can only guarantee
887
* full support for flushing if the driver is able
888
* to cancel all pending frames (drop = true).
890
if (drop && queue->rt2x00dev->ops->lib->flush_queue)
891
queue->rt2x00dev->ops->lib->flush_queue(queue);
894
* When we don't want to drop any frames, or when
895
* the driver doesn't fully flush the queue correcly,
896
* we must wait for the queue to become empty.
898
for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
902
* The queue flush has failed...
904
if (unlikely(!rt2x00queue_empty(queue)))
905
WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
908
* Restore the queue to the previous status
911
rt2x00queue_unpause_queue(queue);
913
mutex_unlock(&queue->status_lock);
915
EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
917
void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
919
struct data_queue *queue;
922
* rt2x00queue_start_queue will call ieee80211_wake_queue
923
* for each queue after is has been properly initialized.
925
tx_queue_for_each(rt2x00dev, queue)
926
rt2x00queue_start_queue(queue);
928
rt2x00queue_start_queue(rt2x00dev->rx);
930
EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
932
void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
934
struct data_queue *queue;
937
* rt2x00queue_stop_queue will call ieee80211_stop_queue
938
* as well, but we are completely shutting doing everything
939
* now, so it is much safer to stop all TX queues at once,
940
* and use rt2x00queue_stop_queue for cleaning up.
942
ieee80211_stop_queues(rt2x00dev->hw);
944
tx_queue_for_each(rt2x00dev, queue)
945
rt2x00queue_stop_queue(queue);
947
rt2x00queue_stop_queue(rt2x00dev->rx);
949
EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
951
void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
953
struct data_queue *queue;
955
tx_queue_for_each(rt2x00dev, queue)
956
rt2x00queue_flush_queue(queue, drop);
958
rt2x00queue_flush_queue(rt2x00dev->rx, drop);
960
EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
962
static void rt2x00queue_reset(struct data_queue *queue)
964
unsigned long irqflags;
967
spin_lock_irqsave(&queue->index_lock, irqflags);
972
for (i = 0; i < Q_INDEX_MAX; i++) {
974
queue->last_action[i] = jiffies;
977
spin_unlock_irqrestore(&queue->index_lock, irqflags);
980
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
982
struct data_queue *queue;
985
queue_for_each(rt2x00dev, queue) {
986
rt2x00queue_reset(queue);
988
for (i = 0; i < queue->limit; i++)
989
rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
993
static int rt2x00queue_alloc_entries(struct data_queue *queue,
994
const struct data_queue_desc *qdesc)
996
struct queue_entry *entries;
997
unsigned int entry_size;
1000
rt2x00queue_reset(queue);
1002
queue->limit = qdesc->entry_num;
1003
queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1004
queue->data_size = qdesc->data_size;
1005
queue->desc_size = qdesc->desc_size;
1008
* Allocate all queue entries.
1010
entry_size = sizeof(*entries) + qdesc->priv_size;
1011
entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1015
#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1016
(((char *)(__base)) + ((__limit) * (__esize)) + \
1017
((__index) * (__psize)))
1019
for (i = 0; i < queue->limit; i++) {
1020
entries[i].flags = 0;
1021
entries[i].queue = queue;
1022
entries[i].skb = NULL;
1023
entries[i].entry_idx = i;
1024
entries[i].priv_data =
1025
QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1026
sizeof(*entries), qdesc->priv_size);
1029
#undef QUEUE_ENTRY_PRIV_OFFSET
1031
queue->entries = entries;
1036
static void rt2x00queue_free_skbs(struct data_queue *queue)
1040
if (!queue->entries)
1043
for (i = 0; i < queue->limit; i++) {
1044
rt2x00queue_free_skb(&queue->entries[i]);
1048
static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1051
struct sk_buff *skb;
1053
for (i = 0; i < queue->limit; i++) {
1054
skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1057
queue->entries[i].skb = skb;
1063
int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1065
struct data_queue *queue;
1068
status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1072
tx_queue_for_each(rt2x00dev, queue) {
1073
status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1078
status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1082
if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
1083
status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1084
rt2x00dev->ops->atim);
1089
status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1096
ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1098
rt2x00queue_uninitialize(rt2x00dev);
1103
void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1105
struct data_queue *queue;
1107
rt2x00queue_free_skbs(rt2x00dev->rx);
1109
queue_for_each(rt2x00dev, queue) {
1110
kfree(queue->entries);
1111
queue->entries = NULL;
1115
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1116
struct data_queue *queue, enum data_queue_qid qid)
1118
mutex_init(&queue->status_lock);
1119
spin_lock_init(&queue->index_lock);
1121
queue->rt2x00dev = rt2x00dev;
1129
int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1131
struct data_queue *queue;
1132
enum data_queue_qid qid;
1133
unsigned int req_atim =
1134
!!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1137
* We need the following queues:
1139
* TX: ops->tx_queues
1141
* Atim: 1 (if required)
1143
rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1145
queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1147
ERROR(rt2x00dev, "Queue allocation failed.\n");
1152
* Initialize pointers
1154
rt2x00dev->rx = queue;
1155
rt2x00dev->tx = &queue[1];
1156
rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1157
rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1160
* Initialize queue parameters.
1162
* TX: qid = QID_AC_VO + index
1163
* TX: cw_min: 2^5 = 32.
1164
* TX: cw_max: 2^10 = 1024.
1165
* BCN: qid = QID_BEACON
1166
* ATIM: qid = QID_ATIM
1168
rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1171
tx_queue_for_each(rt2x00dev, queue)
1172
rt2x00queue_init(rt2x00dev, queue, qid++);
1174
rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1176
rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1181
void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1183
kfree(rt2x00dev->rx);
1184
rt2x00dev->rx = NULL;
1185
rt2x00dev->tx = NULL;
1186
rt2x00dev->bcn = NULL;